From 6de09e07a6f7d043589205c25594852900a6626b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Jul 2019 15:33:37 +0100 Subject: [PATCH 001/136] Add membership column to current_state_events table. It turns out that doing a join is surprisingly expensive for the DB to do when room_membership table is larger than the disk cache. --- synapse/storage/events.py | 26 ++++++++++--------- synapse/storage/prepare_database.py | 2 +- synapse/storage/roommember.py | 6 ++--- .../56/current_state_events_membership.sql | 19 ++++++++++++++ synapse/storage/user_directory.py | 8 +++--- 5 files changed, 41 insertions(+), 20 deletions(-) create mode 100644 synapse/storage/schema/delta/56/current_state_events_membership.sql diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b486ca50eb33..b70457bfc6aa 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -918,8 +918,6 @@ def _persist_events_txn( min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering max_stream_order = events_and_contexts[-1][0].internal_metadata.stream_ordering - self._update_current_state_txn(txn, state_delta_for_room, min_stream_order) - self._update_forward_extremities_txn( txn, new_forward_extremities=new_forward_extremeties, @@ -993,6 +991,10 @@ def _persist_events_txn( backfilled=backfilled, ) + # We call this last as it assumes we've inserted the events into + # room_memberships, where applicable. + self._update_current_state_txn(txn, state_delta_for_room, min_stream_order) + def _update_current_state_txn(self, txn, state_delta_by_room, stream_id): for room_id, current_state_tuple in iteritems(state_delta_by_room): to_delete, to_insert = current_state_tuple @@ -1062,16 +1064,16 @@ def _update_current_state_txn(self, txn, state_delta_by_room, stream_id): ), ) - self._simple_insert_many_txn( - txn, - table="current_state_events", - values=[ - { - "event_id": ev_id, - "room_id": room_id, - "type": key[0], - "state_key": key[1], - } + # We include the membership in the current state table, hence we do + # a lookup when we insert. This assumes that all events have already + # been inserted into room_memberships. + txn.executemany( + """INSERT INTO current_state_events + (room_id, type, state_key, event_id, membership) + VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) + """, + [ + (room_id, key[0], key[1], ev_id, ev_id) for key, ev_id in iteritems(to_insert) ], ) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 7c4e1dc7ec97..d20eacda5901 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -27,7 +27,7 @@ # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 55 +SCHEMA_VERSION = 56 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 32cfd010a58a..4946afe63549 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -224,7 +224,7 @@ def _get_rooms_for_user_where_membership_is_txn( results = [] if membership_list: where_clause = "user_id = ? AND (%s) AND forgotten = 0" % ( - " OR ".join(["membership = ?" for _ in membership_list]), + " OR ".join(["m.membership = ?" for _ in membership_list]), ) args = [user_id] @@ -453,8 +453,8 @@ def is_host_joined(self, room_id, host): sql = """ SELECT state_key FROM current_state_events AS c - INNER JOIN room_memberships USING (event_id) - WHERE membership = 'join' + INNER JOIN room_memberships AS m USING (event_id) + WHERE m.membership = 'join' AND type = 'm.room.member' AND c.room_id = ? AND state_key LIKE ? diff --git a/synapse/storage/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/delta/56/current_state_events_membership.sql new file mode 100644 index 000000000000..5c754651cb04 --- /dev/null +++ b/synapse/storage/schema/delta/56/current_state_events_membership.sql @@ -0,0 +1,19 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- We add membership to current state so that we don't need to join against +-- room_memberships, which can be surprisingly costly (we do such queries +-- very frequently). +ALTER TABLE current_state_events ADD membership TEXT; diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index 83466e25d99f..7fd16fe65e31 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -618,15 +618,15 @@ def get_rooms_in_common_for_users(self, user_id, other_user_id): sql = """ SELECT room_id FROM ( SELECT c.room_id FROM current_state_events AS c - INNER JOIN room_memberships USING (event_id) + INNER JOIN room_memberships AS m USING (event_id) WHERE type = 'm.room.member' - AND membership = 'join' + AND m.membership = 'join' AND state_key = ? ) AS f1 INNER JOIN ( SELECT c.room_id FROM current_state_events AS c - INNER JOIN room_memberships USING (event_id) + INNER JOIN room_memberships AS m USING (event_id) WHERE type = 'm.room.member' - AND membership = 'join' + AND m.membership = 'join' AND state_key = ? ) f2 USING (room_id) """ From c618a5d348295b69885953cd1970fe1f339a4e9f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Jul 2019 15:50:08 +0100 Subject: [PATCH 002/136] Add background update for current_state_events.membership column --- synapse/storage/roommember.py | 51 +++++++++++++++++++ .../56/current_state_events_membership.sql | 3 ++ 2 files changed, 54 insertions(+) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 4946afe63549..275fef1f661e 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -53,6 +53,7 @@ MemberSummary = namedtuple("MemberSummary", ("members", "count")) _MEMBERSHIP_PROFILE_UPDATE_NAME = "room_membership_profile_update" +_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME = "current_state_events_membership" class RoomMemberWorkerStore(EventsWorkerStore): @@ -602,6 +603,10 @@ def __init__(self, db_conn, hs): self.register_background_update_handler( _MEMBERSHIP_PROFILE_UPDATE_NAME, self._background_add_membership_profile ) + self.register_background_update_handler( + _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, + self._background_current_state_membership, + ) def _store_room_members_txn(self, txn, events, backfilled): """Store a room member in the database. @@ -781,6 +786,52 @@ def add_membership_profile_txn(txn): defer.returnValue(result) + @defer.inlineCallbacks + def _background_current_state_membership(self, progress, batch_size): + """Update the new membership column on current_state_events. + """ + + if "rooms" not in progress: + rooms = yield self._simple_select_onecol( + table="current_state_events", + keyvalues={}, + retcol="DISTINCT room_id", + desc="_background_current_state_membership_get_rooms", + ) + progress["rooms"] = rooms + + rooms = progress["rooms"] + + def _background_current_state_membership_txn(txn): + processed = 0 + while rooms and processed < batch_size: + sql = """ + UPDATE current_state_events AS c + SET membership = ( + SELECT membership FROM room_memberships + WHERE event_id = c.event_id + ) + WHERE room_id = ? + """ + txn.execute(sql, (rooms.pop(),)) + processed += txn.rowcount + + self._background_update_progress_txn( + txn, _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, progress + ) + + return processed + + result = yield self.runInteraction( + "_background_current_state_membership_update", + _background_current_state_membership_txn, + ) + + if not rooms: + yield self._end_background_update(_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME) + + defer.returnValue(result) + class _JoinedHostsCache(object): """Cache for joined hosts in a room that is optimised to handle updates diff --git a/synapse/storage/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/delta/56/current_state_events_membership.sql index 5c754651cb04..ec7ad5bae21e 100644 --- a/synapse/storage/schema/delta/56/current_state_events_membership.sql +++ b/synapse/storage/schema/delta/56/current_state_events_membership.sql @@ -17,3 +17,6 @@ -- room_memberships, which can be surprisingly costly (we do such queries -- very frequently). ALTER TABLE current_state_events ADD membership TEXT; + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('current_state_events_membership', '{}'); From 059d8c1a4e720f9a0a179f7109f38302885bc9a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Jul 2019 16:09:14 +0100 Subject: [PATCH 003/136] Track if current_state_events.membership is up to date --- synapse/storage/roommember.py | 45 +++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 275fef1f661e..f913abf8d672 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -24,6 +24,8 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.storage._base import LoggingTransaction from synapse.storage.events_worker import EventsWorkerStore from synapse.types import get_domain_from_id from synapse.util.async_helpers import Linearizer @@ -57,6 +59,49 @@ class RoomMemberWorkerStore(EventsWorkerStore): + def __init__(self, db_conn, hs): + super(RoomMemberWorkerStore, self).__init__(db_conn, hs) + + # Is the current_state_events.membership up to date? Or is the + # background update still running? + self._current_state_events_membership_up_to_date = False + + txn = LoggingTransaction( + db_conn.cursor(), + name="_check_safe_current_state_events_membership_updated", + database_engine=self.database_engine, + after_callbacks=[], + exception_callbacks=[], + ) + self._check_safe_current_state_events_membership_updated_txn(txn) + txn.close() + + def _check_safe_current_state_events_membership_updated_txn(self, txn): + """Checks if it is safe to assume the new current_state_events + membership column is up to date + """ + + pending_update = self._simple_select_one_txn( + txn, + table="background_updates", + keyvalues={"update_name": _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME}, + retcols=["update_name"], + allow_none=True, + ) + + self._current_state_events_membership_up_to_date = not pending_update + + # If the update is still running, reschedule to run. + if pending_update: + self._clock.call_later( + 15.0, + run_as_background_process, + "_check_safe_current_state_events_membership_updated", + self.runInteraction, + "_check_safe_current_state_events_membership_updated", + self._check_safe_current_state_events_membership_updated_txn, + ) + @cachedInlineCallbacks(max_entries=100000, iterable=True, cache_context=True) def get_hosts_in_room(self, room_id, cache_context): """Returns the set of all hosts currently in the room From 8e1ada9e6fdebe0cedaf39794a326196a9bd90d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Jul 2019 16:17:17 +0100 Subject: [PATCH 004/136] Use the current_state_events.membership column --- synapse/storage/roommember.py | 54 ++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index f913abf8d672..6541da3b8a75 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -115,14 +115,23 @@ def get_hosts_in_room(self, room_id, cache_context): @cached(max_entries=100000, iterable=True) def get_users_in_room(self, room_id): def f(txn): - sql = ( - "SELECT m.user_id FROM room_memberships as m" - " INNER JOIN current_state_events as c" - " ON m.event_id = c.event_id " - " AND m.room_id = c.room_id " - " AND m.user_id = c.state_key" - " WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?" - ) + # If we can assume current_state_events.membership is up to date + # then we can avoid a join, which is a Very Good Thing given how + # frequently this function gets called. + if self._current_state_events_membership_up_to_date: + sql = """ + SELECT state_key FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? AND membership = ? + """ + else: + sql = """ + SELECT state_key FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? + """ txn.execute(sql, (room_id, Membership.JOIN)) return [to_ascii(r[0]) for r in txn] @@ -144,15 +153,26 @@ def _get_room_summary_txn(txn): # first get counts. # We do this all in one transaction to keep the cache small. # FIXME: get rid of this when we have room_stats - sql = """ - SELECT count(*), m.membership FROM room_memberships as m - INNER JOIN current_state_events as c - ON m.event_id = c.event_id - AND m.room_id = c.room_id - AND m.user_id = c.state_key - WHERE c.type = 'm.room.member' AND c.room_id = ? - GROUP BY m.membership - """ + + # If we can assume current_state_events.membership is up to date + # then we can avoid a join, which is a Very Good Thing given how + # frequently this function gets called. + if self._current_state_events_membership_up_to_date: + sql = """ + SELECT count(*), membership FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? + GROUP BY membership + """ + else: + sql = """ + SELECT count(*), m.membership FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? + GROUP BY m.membership + """ txn.execute(sql, (room_id,)) res = {} From 89c885909aeb4591756c011f5eb339d7301591d5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 17 Jul 2019 16:22:26 +0100 Subject: [PATCH 005/136] Newsfile --- changelog.d/5706.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5706.misc diff --git a/changelog.d/5706.misc b/changelog.d/5706.misc new file mode 100644 index 000000000000..5e15dfd5faa7 --- /dev/null +++ b/changelog.d/5706.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From 10523241d86eeaa1fa43607a03352f9e7b04efda Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jul 2019 15:17:37 +0100 Subject: [PATCH 006/136] Delegate to cached version when using get_filtered_current_state_ids In the case where it gets called with `StateFilter.all()` --- synapse/storage/state.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 0bfe1b4550c6..a35289876d10 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -510,6 +510,12 @@ def get_filtered_current_state_ids(self, room_id, state_filter=StateFilter.all() event ID. """ + where_clause, where_args = state_filter.make_sql_filter_clause() + + if not where_clause: + # We delegate to the cached version + return self.get_current_state_ids(room_id) + def _get_filtered_current_state_ids_txn(txn): results = {} sql = """ @@ -517,8 +523,6 @@ def _get_filtered_current_state_ids_txn(txn): WHERE room_id = ? """ - where_clause, where_args = state_filter.make_sql_filter_clause() - if where_clause: sql += " AND (%s)" % (where_clause,) From dd2851d576649194205725bb5105f3cbb4a87e55 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 18 Jul 2019 15:27:18 +0100 Subject: [PATCH 007/136] Newsfile --- changelog.d/5713.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5713.misc diff --git a/changelog.d/5713.misc b/changelog.d/5713.misc new file mode 100644 index 000000000000..01ea1cf8d741 --- /dev/null +++ b/changelog.d/5713.misc @@ -0,0 +1 @@ +Improve caching when fetching `get_filtered_current_state_ids`. From 6a85cb5ef7f9dfe4cd58abc313d66ee270db3549 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Jul 2019 01:40:08 +1000 Subject: [PATCH 008/136] Remove non-dedicated logging options and command line arguments (#5678) --- changelog.d/5678.removal | 1 + synapse/config/logger.py | 81 ++++++--------------------------------- synapse/config/workers.py | 6 --- 3 files changed, 13 insertions(+), 75 deletions(-) create mode 100644 changelog.d/5678.removal diff --git a/changelog.d/5678.removal b/changelog.d/5678.removal new file mode 100644 index 000000000000..085b84fda69d --- /dev/null +++ b/changelog.d/5678.removal @@ -0,0 +1 @@ +Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration. diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 40502a579878..d321d00b80e8 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import logging import logging.config import os @@ -75,10 +76,8 @@ class LoggingConfig(Config): def read_config(self, config, **kwargs): - self.verbosity = config.get("verbose", 0) - self.no_redirect_stdio = config.get("no_redirect_stdio", False) self.log_config = self.abspath(config.get("log_config")) - self.log_file = self.abspath(config.get("log_file")) + self.no_redirect_stdio = config.get("no_redirect_stdio", False) def generate_config_section(self, config_dir_path, server_name, **kwargs): log_config = os.path.join(config_dir_path, server_name + ".log.config") @@ -94,38 +93,12 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): ) def read_arguments(self, args): - if args.verbose is not None: - self.verbosity = args.verbose if args.no_redirect_stdio is not None: self.no_redirect_stdio = args.no_redirect_stdio - if args.log_config is not None: - self.log_config = args.log_config - if args.log_file is not None: - self.log_file = args.log_file @staticmethod def add_arguments(parser): logging_group = parser.add_argument_group("logging") - logging_group.add_argument( - "-v", - "--verbose", - dest="verbose", - action="count", - help="The verbosity level. Specify multiple times to increase " - "verbosity. (Ignored if --log-config is specified.)", - ) - logging_group.add_argument( - "-f", - "--log-file", - dest="log_file", - help="File to log to. (Ignored if --log-config is specified.)", - ) - logging_group.add_argument( - "--log-config", - dest="log_config", - default=None, - help="Python logging config file", - ) logging_group.add_argument( "-n", "--no-redirect-stdio", @@ -153,58 +126,29 @@ def setup_logging(config, use_worker_options=False): config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data - use_worker_options (bool): True to use 'worker_log_config' and - 'worker_log_file' options instead of 'log_config' and 'log_file'. + use_worker_options (bool): True to use the 'worker_log_config' option + instead of 'log_config'. register_sighup (func | None): Function to call to register a sighup handler. """ log_config = config.worker_log_config if use_worker_options else config.log_config - log_file = config.worker_log_file if use_worker_options else config.log_file - - log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" - " - %(message)s" - ) if log_config is None: - # We don't have a logfile, so fall back to the 'verbosity' param from - # the config or cmdline. (Note that we generate a log config for new - # installs, so this will be an unusual case) - level = logging.INFO - level_for_storage = logging.INFO - if config.verbosity: - level = logging.DEBUG - if config.verbosity > 1: - level_for_storage = logging.DEBUG + log_format = ( + "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" + " - %(message)s" + ) logger = logging.getLogger("") - logger.setLevel(level) - - logging.getLogger("synapse.storage.SQL").setLevel(level_for_storage) + logger.setLevel(logging.INFO) + logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) formatter = logging.Formatter(log_format) - if log_file: - # TODO: Customisable file size / backup count - handler = logging.handlers.RotatingFileHandler( - log_file, maxBytes=(1000 * 1000 * 100), backupCount=3, encoding="utf8" - ) - - def sighup(signum, stack): - logger.info("Closing log file due to SIGHUP") - handler.doRollover() - logger.info("Opened new log file due to SIGHUP") - - else: - handler = logging.StreamHandler() - - def sighup(*args): - pass + handler = logging.StreamHandler() handler.setFormatter(formatter) - handler.addFilter(LoggingContextFilter(request="")) - logger.addHandler(handler) else: @@ -218,8 +162,7 @@ def sighup(*args): logging.info("Reloaded log config from %s due to SIGHUP", log_config) load_log_config() - - appbase.register_sighup(sighup) + appbase.register_sighup(sighup) # make sure that the first thing we log is a thing we can grep backwards # for diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 3b75471d8585..246d72cd611b 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -31,8 +31,6 @@ def read_config(self, config, **kwargs): self.worker_listeners = config.get("worker_listeners", []) self.worker_daemonize = config.get("worker_daemonize") self.worker_pid_file = config.get("worker_pid_file") - self.worker_log_file = config.get("worker_log_file") - self.worker_log_config = config.get("worker_log_config") # The host used to connect to the main synapse self.worker_replication_host = config.get("worker_replication_host", None) @@ -78,9 +76,5 @@ def read_arguments(self, args): if args.daemonize is not None: self.worker_daemonize = args.daemonize - if args.log_config is not None: - self.worker_log_config = args.log_config - if args.log_file is not None: - self.worker_log_file = args.log_file if args.manhole is not None: self.worker_manhole = args.worker_manhole From 356ed0438e3081b48a29c71042620c0c68af3c25 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 19 Jul 2019 19:01:23 +1000 Subject: [PATCH 009/136] Speed up the PostgreSQL unit tests (#5717) --- .buildkite/pipeline.yml | 12 +++++++++--- changelog.d/5717.misc | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5717.misc diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 7f42fad909d1..d5e5aeec6b84 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -117,8 +117,10 @@ steps: limit: 2 - label: ":python: 3.5 / :postgres: 9.5" + agents: + queue: "medium" env: - TRIAL_FLAGS: "-j 4" + TRIAL_FLAGS: "-j 8" command: - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'" plugins: @@ -134,8 +136,10 @@ steps: limit: 2 - label: ":python: 3.7 / :postgres: 9.5" + agents: + queue: "medium" env: - TRIAL_FLAGS: "-j 4" + TRIAL_FLAGS: "-j 8" command: - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'" plugins: @@ -151,8 +155,10 @@ steps: limit: 2 - label: ":python: 3.7 / :postgres: 11" + agents: + queue: "medium" env: - TRIAL_FLAGS: "-j 4" + TRIAL_FLAGS: "-j 8" command: - "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'" plugins: diff --git a/changelog.d/5717.misc b/changelog.d/5717.misc new file mode 100644 index 000000000000..07dc3bca946b --- /dev/null +++ b/changelog.d/5717.misc @@ -0,0 +1 @@ +Speed up PostgreSQL unit tests in CI. From b73ce4ba81fa059ef1a10db55e55decefe814649 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 19 Jul 2019 11:55:14 +0100 Subject: [PATCH 010/136] Update the coding style doc (#5719) A few fixes and removal of duplicated stuff, but mostly a bunch of the words on the config file. --- changelog.d/5719.misc | 1 + docs/code_style.rst | 124 ++++++++++++++++++++++++++++++++---------- 2 files changed, 95 insertions(+), 30 deletions(-) create mode 100644 changelog.d/5719.misc diff --git a/changelog.d/5719.misc b/changelog.d/5719.misc new file mode 100644 index 000000000000..6d5294724c7f --- /dev/null +++ b/changelog.d/5719.misc @@ -0,0 +1 @@ +Update the coding style document. diff --git a/docs/code_style.rst b/docs/code_style.rst index e3ca626bfd95..39ac4ebedc5c 100644 --- a/docs/code_style.rst +++ b/docs/code_style.rst @@ -1,4 +1,8 @@ -# Code Style +Code Style +========== + +Formatting tools +---------------- The Synapse codebase uses a number of code formatting tools in order to quickly and automatically check for formatting (and sometimes logical) errors @@ -6,20 +10,20 @@ in code. The necessary tools are detailed below. -## Formatting tools +- **black** -The Synapse codebase uses [black](https://pypi.org/project/black/) as an -opinionated code formatter, ensuring all comitted code is properly -formatted. + The Synapse codebase uses `black `_ as an + opinionated code formatter, ensuring all comitted code is properly + formatted. -First install ``black`` with:: + First install ``black`` with:: - pip install --upgrade black + pip install --upgrade black -Have ``black`` auto-format your code (it shouldn't change any -functionality) with:: + Have ``black`` auto-format your code (it shouldn't change any functionality) + with:: - black . --exclude="\.tox|build|env" + black . --exclude="\.tox|build|env" - **flake8** @@ -54,17 +58,16 @@ functionality is supported in your editor for a more convenient development workflow. It is not, however, recommended to run ``flake8`` on save as it takes a while and is very resource intensive. -## General rules +General rules +------------- - **Naming**: - Use camel case for class and type names - Use underscores for functions and variables. -- Use double quotes ``"foo"`` rather than single quotes ``'foo'``. - -- **Comments**: should follow the `google code style - `_. +- **Docstrings**: should follow the `google code style + `_. This is so that we can generate documentation with `sphinx `_. See the `examples @@ -73,6 +76,8 @@ takes a while and is very resource intensive. - **Imports**: + - Imports should be sorted by ``isort`` as described above. + - Prefer to import classes and functions rather than packages or modules. Example:: @@ -92,25 +97,84 @@ takes a while and is very resource intensive. This goes against the advice in the Google style guide, but it means that errors in the name are caught early (at import time). - - Multiple imports from the same package can be combined onto one line:: + - Avoid wildcard imports (``from synapse.types import *``) and relative + imports (``from .types import UserID``). - from synapse.types import GroupID, RoomID, UserID +Configuration file format +------------------------- - An effort should be made to keep the individual imports in alphabetical - order. +The `sample configuration file <./sample_config.yaml>`_ acts as a reference to +Synapse's configuration options for server administrators. Remember that many +readers will be unfamiliar with YAML and server administration in general, so +that it is important that the file be as easy to understand as possible, which +includes following a consistent format. - If the list becomes long, wrap it with parentheses and split it over - multiple lines. +Some guidelines follow: - - As per `PEP-8 `_, - imports should be grouped in the following order, with a blank line between - each group: +* Sections should be separated with a heading consisting of a single line + prefixed and suffixed with ``##``. There should be **two** blank lines + before the section header, and **one** after. - 1. standard library imports - 2. related third party imports - 3. local application/library specific imports +* Each option should be listed in the file with the following format: - - Imports within each group should be sorted alphabetically by module name. + * A comment describing the setting. Each line of this comment should be + prefixed with a hash (``#``) and a space. - - Avoid wildcard imports (``from synapse.types import *``) and relative - imports (``from .types import UserID``). + The comment should describe the default behaviour (ie, what happens if + the setting is omitted), as well as what the effect will be if the + setting is changed. + + Often, the comment end with something like "uncomment the + following to \". + + * A line consisting of only ``#``. + + * A commented-out example setting, prefixed with only ``#``. + + For boolean (on/off) options, convention is that this example should be + the *opposite* to the default (so the comment will end with "Uncomment + the following to enable [or disable] \." For other options, + the example should give some non-default value which is likely to be + useful to the reader. + +* There should be a blank line between each option. + +* Where several settings are grouped into a single dict, *avoid* the + convention where the whole block is commented out, resulting in comment + lines starting ``# #``, as this is hard to read and confusing to + edit. Instead, leave the top-level config option uncommented, and follow + the conventions above for sub-options. Ensure that your code correctly + handles the top-level option being set to ``None`` (as it will be if no + sub-options are enabled). + +* Lines should be wrapped at 80 characters. + +Example:: + + ## Frobnication ## + + # The frobnicator will ensure that all requests are fully frobnicated. + # To enable it, uncomment the following. + # + #frobnicator_enabled: true + + # By default, the frobnicator will frobnicate with the default frobber. + # The following will make it use an alternative frobber. + # + #frobincator_frobber: special_frobber + + # Settings for the frobber + # + frobber: + # frobbing speed. Defaults to 1. + # + #speed: 10 + + # frobbing distance. Defaults to 1000. + # + #distance: 100 + +Note that the sample configuration is generated from the synapse code and is +maintained by a script, ``scripts-dev/generate_sample_config``. Making sure +that the output from this script matches the desired format is left as an +exercise for the reader! From 5c05ae7ba0c7ec97b84d55efdbc91446361bf9e1 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 19 Jul 2019 12:03:36 +0100 Subject: [PATCH 011/136] Add 'rel' attribute to default welcome page. (#5695) add rel attribute as a precaution against reverse tabnabbing in future --- changelog.d/5695.misc | 1 + synapse/static/index.html | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5695.misc diff --git a/changelog.d/5695.misc b/changelog.d/5695.misc new file mode 100644 index 000000000000..4741d32e255b --- /dev/null +++ b/changelog.d/5695.misc @@ -0,0 +1 @@ +Add precautionary measures to prevent future abuse of `window.opener` in default welcome page. diff --git a/synapse/static/index.html b/synapse/static/index.html index d3f1c7dce098..bf46df90978c 100644 --- a/synapse/static/index.html +++ b/synapse/static/index.html @@ -48,13 +48,13 @@

It works! Synapse is running

Your Synapse server is listening on this port and is ready for messages.

-

To use this server you'll need a Matrix client. +

To use this server you'll need a Matrix client.

Welcome to the Matrix universe :)


- + matrix.org From ebc5ed1296c433e97d4dcf1c8a5fc1477506e84e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jul 2019 13:29:02 +0100 Subject: [PATCH 012/136] Update comment for new column --- .../schema/delta/56/current_state_events_membership.sql | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/storage/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/delta/56/current_state_events_membership.sql index ec7ad5bae21e..b2e08cd85dcb 100644 --- a/synapse/storage/schema/delta/56/current_state_events_membership.sql +++ b/synapse/storage/schema/delta/56/current_state_events_membership.sql @@ -16,6 +16,9 @@ -- We add membership to current state so that we don't need to join against -- room_memberships, which can be surprisingly costly (we do such queries -- very frequently). +-- This will be null for non-membership events and the content.membership key +-- for membership events. (Will also be null for membership events until the +-- background update job has finished). ALTER TABLE current_state_events ADD membership TEXT; INSERT INTO background_updates (update_name, progress_json) VALUES From bd2e1a2aa86b81d232f9d14d2a82a04de4b1643d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jul 2019 13:36:04 +0100 Subject: [PATCH 013/136] LoggingTransaction accepts None for callback lists. Its a bit disingenuousto give LoggingTransaction lists to append callbacks to if we're not going to run the callbacks. --- synapse/storage/_base.py | 18 ++++++++++++++++-- synapse/storage/event_push_actions.py | 2 -- synapse/storage/roommember.py | 2 -- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 2f940dbae6a2..a7c93efa4654 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -86,7 +86,21 @@ class LoggingTransaction(object): """An object that almost-transparently proxies for the 'txn' object passed to the constructor. Adds logging and metrics to the .execute() - method.""" + method. + + Args: + txn: The database transcation object to wrap. + name (str): The name of this transactions for logging. + database_engine (Sqlite3Engine|PostgresEngine) + after_callbacks(list|None): A list that callbacks will be appended to + that have been added by `call_after` which should be run on + successful completion of the transaction. None indicates that no + callbacks should be allowed to be scheduled to run. + exception_callbacks(list|None): A list that callbacks will be appended + to that have been added by `call_on_exception` which should be run + if transaction ends with an error. None indicates that no callbacks + should be allowed to be scheduled to run. + """ __slots__ = [ "txn", @@ -97,7 +111,7 @@ class LoggingTransaction(object): ] def __init__( - self, txn, name, database_engine, after_callbacks, exception_callbacks + self, txn, name, database_engine, after_callbacks=None, exception_callbacks=None ): object.__setattr__(self, "txn", txn) object.__setattr__(self, "name", name) diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index eca77069fd16..dcfb67e0294e 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -79,8 +79,6 @@ def __init__(self, db_conn, hs): db_conn.cursor(), name="_find_stream_orderings_for_times_txn", database_engine=self.database_engine, - after_callbacks=[], - exception_callbacks=[], ) self._find_stream_orderings_for_times_txn(cur) cur.close() diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 6541da3b8a75..257bcdb2f80d 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -70,8 +70,6 @@ def __init__(self, db_conn, hs): db_conn.cursor(), name="_check_safe_current_state_events_membership_updated", database_engine=self.database_engine, - after_callbacks=[], - exception_callbacks=[], ) self._check_safe_current_state_events_membership_updated_txn(txn) txn.close() From 2410335507b9fdaffb889755d76a11b0bea66f60 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jul 2019 11:34:15 +0100 Subject: [PATCH 014/136] Use upsert when updating destination retry interval --- synapse/storage/transactions.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index fd1861917805..c585cf6cf79d 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -196,6 +196,26 @@ def set_destination_retry_timings(self, destination, retry_last_ts, retry_interv def _set_destination_retry_timings( self, txn, destination, retry_last_ts, retry_interval ): + + if self.database_engine.can_native_upsert: + # Upsert retry time interval if retry_interval is zero (i.e. we're + # resetting it) or greater than the existing retry interval. + + sql = """ + INSERT INTO destinations (destination, retry_last_ts, retry_interval) + VALUES (?, ?, ?) + ON CONFLICT (destination) DO UPDATE SET + retry_last_ts = EXCLUDED.retry_last_ts, + retry_interval = EXCLUDED.retry_interval + WHERE + EXCLUDED.retry_interval = 0 + OR destinations.retry_interval < EXCLUDED.retry_interval + """ + + txn.execute(sql, (destination, retry_last_ts, retry_interval)) + + return + self.database_engine.lock_table(txn, "destinations") # We need to be careful here as the data may have changed from under us From ced4fdaa84a9addcafc87ba1af6202de90dd2685 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jul 2019 11:37:37 +0100 Subject: [PATCH 015/136] Newsfile --- changelog.d/5720.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5720.misc diff --git a/changelog.d/5720.misc b/changelog.d/5720.misc new file mode 100644 index 000000000000..590f64f19d0c --- /dev/null +++ b/changelog.d/5720.misc @@ -0,0 +1 @@ +Improve database query performance when recording retry intervals for remote hosts. From 7b8bc618340598623555782c34af862ace5012c3 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Fri, 19 Jul 2019 16:29:57 +0100 Subject: [PATCH 016/136] Don't accept opentracing data from clients. (#5715) * Don't accept opentracing data from clients. * newsfile --- changelog.d/5715.misc | 1 + synapse/logging/opentracing.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5715.misc diff --git a/changelog.d/5715.misc b/changelog.d/5715.misc new file mode 100644 index 000000000000..a77366e0c0ed --- /dev/null +++ b/changelog.d/5715.misc @@ -0,0 +1 @@ +Don't accept opentracing data from clients. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 415040f5ee00..56d900080b19 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -340,8 +340,7 @@ def trace_servlet(servlet_name, func): @wraps(func) @defer.inlineCallbacks def _trace_servlet_inner(request, *args, **kwargs): - with start_active_span_from_context( - request.requestHeaders, + with start_active_span( "incoming-client-request", tags={ "request_id": request.get_request_id(), From c7095be9136825efc5bd85181b0395b833f96aee Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Jul 2019 17:49:19 +0100 Subject: [PATCH 017/136] Refactor Keyring._start_key_lookups There's an awful lot of deferreds and dictionaries flying around here. The whole thing can be made much simpler and achieve the same effect. --- synapse/crypto/keyring.py | 86 +++++++++++++++--------------------- tests/crypto/test_keyring.py | 29 ------------ 2 files changed, 35 insertions(+), 80 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 341c86315273..efa72dc5fc6b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -238,27 +238,9 @@ def _start_key_lookups(self, verify_requests): """ try: - # create a deferred for each server we're going to look up the keys - # for; we'll resolve them once we have completed our lookups. - # These will be passed into wait_for_previous_lookups to block - # any other lookups until we have finished. - # The deferreds are called with no logcontext. - server_to_deferred = { - rq.server_name: defer.Deferred() for rq in verify_requests - } - - # We want to wait for any previous lookups to complete before - # proceeding. - yield self.wait_for_previous_lookups(server_to_deferred) + ctx = LoggingContext.current_context() - # Actually start fetching keys. - self._get_server_verify_keys(verify_requests) - - # When we've finished fetching all the keys for a given server_name, - # resolve the deferred passed to `wait_for_previous_lookups` so that - # any lookups waiting will proceed. - # - # map from server name to a set of request ids + # map from server name to a set of outstanding request ids server_to_request_ids = {} for verify_request in verify_requests: @@ -266,40 +248,55 @@ def _start_key_lookups(self, verify_requests): request_id = id(verify_request) server_to_request_ids.setdefault(server_name, set()).add(request_id) - def remove_deferreds(res, verify_request): + # Wait for any previous lookups to complete before proceeding. + yield self.wait_for_previous_lookups(server_to_request_ids.keys()) + + # take out a lock on each of the servers by sticking a Deferred in + # key_downloads + for server_name in server_to_request_ids.keys(): + self.key_downloads[server_name] = defer.Deferred() + logger.debug("Got key lookup lock on %s", server_name) + + # When we've finished fetching all the keys for a given server_name, + # drop the lock by resolving the deferred in key_downloads. + def lookup_done(res, verify_request): server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids[server_name].discard(request_id) - if not server_to_request_ids[server_name]: - d = server_to_deferred.pop(server_name, None) - if d: - d.callback(None) + server_requests = server_to_request_ids[server_name] + server_requests.remove(id(verify_request)) + + # if there are no more requests for this server, we can drop the lock. + if not server_requests: + with PreserveLoggingContext(ctx): + logger.debug("Releasing key lookup lock on %s", server_name) + + d = self.key_downloads.pop(server_name) + d.callback(None) return res for verify_request in verify_requests: - verify_request.key_ready.addBoth(remove_deferreds, verify_request) + verify_request.key_ready.addBoth(lookup_done, verify_request) + + # Actually start fetching keys. + self._get_server_verify_keys(verify_requests) except Exception: logger.exception("Error starting key lookups") @defer.inlineCallbacks - def wait_for_previous_lookups(self, server_to_deferred): + def wait_for_previous_lookups(self, server_names): """Waits for any previous key lookups for the given servers to finish. Args: - server_to_deferred (dict[str, Deferred]): server_name to deferred which gets - resolved once we've finished looking up keys for that server. - The Deferreds should be regular twisted ones which call their - callbacks with no logcontext. - - Returns: a Deferred which resolves once all key lookups for the given - servers have completed. Follows the synapse rules of logcontext - preservation. + server_names (Iterable[str]): list of servers which we want to look up + + Returns: + Deferred[None]: resolves once all key lookups for the given servers have + completed. Follows the synapse rules of logcontext preservation. """ loop_count = 1 while True: wait_on = [ (server_name, self.key_downloads[server_name]) - for server_name in server_to_deferred.keys() + for server_name in server_names if server_name in self.key_downloads ] if not wait_on: @@ -314,19 +311,6 @@ def wait_for_previous_lookups(self, server_to_deferred): loop_count += 1 - ctx = LoggingContext.current_context() - - def rm(r, server_name_): - with PreserveLoggingContext(ctx): - logger.debug("Releasing key lookup lock on %s", server_name_) - self.key_downloads.pop(server_name_, None) - return r - - for server_name, deferred in server_to_deferred.items(): - logger.debug("Got key lookup lock on %s", server_name) - self.key_downloads[server_name] = deferred - deferred.addBoth(rm, server_name) - def _get_server_verify_keys(self, verify_requests): """Tries to find at least one key for each verify request diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 795703967d78..8d94a503d690 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -86,35 +86,6 @@ def check_context(self, _, expected): getattr(LoggingContext.current_context(), "request", None), expected ) - def test_wait_for_previous_lookups(self): - kr = keyring.Keyring(self.hs) - - lookup_1_deferred = defer.Deferred() - lookup_2_deferred = defer.Deferred() - - # we run the lookup in a logcontext so that the patched inlineCallbacks can check - # it is doing the right thing with logcontexts. - wait_1_deferred = run_in_context( - kr.wait_for_previous_lookups, {"server1": lookup_1_deferred} - ) - - # there were no previous lookups, so the deferred should be ready - self.successResultOf(wait_1_deferred) - - # set off another wait. It should block because the first lookup - # hasn't yet completed. - wait_2_deferred = run_in_context( - kr.wait_for_previous_lookups, {"server1": lookup_2_deferred} - ) - - self.assertFalse(wait_2_deferred.called) - - # let the first lookup complete (in the sentinel context) - lookup_1_deferred.callback(None) - - # now the second wait should complete. - self.successResultOf(wait_2_deferred) - def test_verify_json_objects_for_server_awaits_previous_requests(self): key1 = signedjson.key.generate_signing_key(1) From dcca56babad3a42ac9967995f7e6f9db51e37353 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Jul 2019 17:57:00 +0100 Subject: [PATCH 018/136] Add a delay to key lookup lock release to fix stack overflow A tactical call_later here should fix #5723 --- synapse/crypto/keyring.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index efa72dc5fc6b..e8bb420ad1df 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -259,6 +259,10 @@ def _start_key_lookups(self, verify_requests): # When we've finished fetching all the keys for a given server_name, # drop the lock by resolving the deferred in key_downloads. + def drop_server_lock(server_name): + d = self.key_downloads.pop(server_name) + d.callback(None) + def lookup_done(res, verify_request): server_name = verify_request.server_name server_requests = server_to_request_ids[server_name] @@ -269,8 +273,10 @@ def lookup_done(res, verify_request): with PreserveLoggingContext(ctx): logger.debug("Releasing key lookup lock on %s", server_name) - d = self.key_downloads.pop(server_name) - d.callback(None) + # ... but not immediately, as that can cause stack explosions if + # we get a long queue of lookups. + self.clock.call_later(0, drop_server_lock, server_name) + return res for verify_request in verify_requests: From f214bff0c0af157429525098fb6ebb9ca0579fcd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 19 Jul 2019 17:58:17 +0100 Subject: [PATCH 019/136] changelog --- changelog.d/5724.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5724.bugfix diff --git a/changelog.d/5724.bugfix b/changelog.d/5724.bugfix new file mode 100644 index 000000000000..1b3683daf6b2 --- /dev/null +++ b/changelog.d/5724.bugfix @@ -0,0 +1 @@ +Fix stack overflow in server key lookup code. \ No newline at end of file From dc7cf81267e464858c74e6215184de0c634e2b26 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Fri, 19 Jul 2019 18:16:42 +0100 Subject: [PATCH 020/136] Remove deprecated 'verbose' cli arg --- demo/start.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/demo/start.sh b/demo/start.sh index 1c4f12d0bb1b..eccaa2abebf1 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -29,7 +29,7 @@ for port in 8080 8081 8082; do if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config - + echo 'enable_registration: true' >> $DIR/etc/$port.config # Warning, this heredoc depends on the interaction of tabs and spaces. Please don't @@ -43,7 +43,7 @@ for port in 8080 8081 8082; do tls: true resources: - names: [client, federation] - + - port: $port tls: false bind_addresses: ['::1', '127.0.0.1'] @@ -68,7 +68,7 @@ for port in 8080 8081 8082; do # Generate tls keys openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix" - + # Ignore keys from the trusted keys server echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config echo 'trusted_key_servers:' >> $DIR/etc/$port.config @@ -120,7 +120,6 @@ for port in 8080 8081 8082; do python3 -m synapse.app.homeserver \ --config-path "$DIR/etc/$port.config" \ -D \ - -vv \ popd done From f99554b15d25432e924f36ed01c8297346c2822c Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Fri, 19 Jul 2019 18:19:27 +0100 Subject: [PATCH 021/136] Revert "Remove deprecated 'verbose' cli arg" This reverts commit dc7cf81267e464858c74e6215184de0c634e2b26. --- demo/start.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/demo/start.sh b/demo/start.sh index eccaa2abebf1..1c4f12d0bb1b 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -29,7 +29,7 @@ for port in 8080 8081 8082; do if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config - + echo 'enable_registration: true' >> $DIR/etc/$port.config # Warning, this heredoc depends on the interaction of tabs and spaces. Please don't @@ -43,7 +43,7 @@ for port in 8080 8081 8082; do tls: true resources: - names: [client, federation] - + - port: $port tls: false bind_addresses: ['::1', '127.0.0.1'] @@ -68,7 +68,7 @@ for port in 8080 8081 8082; do # Generate tls keys openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix" - + # Ignore keys from the trusted keys server echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config echo 'trusted_key_servers:' >> $DIR/etc/$port.config @@ -120,6 +120,7 @@ for port in 8080 8081 8082; do python3 -m synapse.app.homeserver \ --config-path "$DIR/etc/$port.config" \ -D \ + -vv \ popd done From f337d2f0f089e5b53e13c85fef0b89e93defa5e5 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Mon, 22 Jul 2019 11:31:05 +0100 Subject: [PATCH 022/136] Demo uses deprecated cli option (#5725) * Remove deprecated 'verbose' cli arg * Create 5725.bugfix --- changelog.d/5725.bugfix | 1 + demo/start.sh | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5725.bugfix diff --git a/changelog.d/5725.bugfix b/changelog.d/5725.bugfix new file mode 100644 index 000000000000..73ef419727d9 --- /dev/null +++ b/changelog.d/5725.bugfix @@ -0,0 +1 @@ +start.sh no longer uses deprecated cli option. diff --git a/demo/start.sh b/demo/start.sh index 1c4f12d0bb1b..eccaa2abebf1 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -29,7 +29,7 @@ for port in 8080 8081 8082; do if ! grep -F "Customisation made by demo/start.sh" -q $DIR/etc/$port.config; then printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config - + echo 'enable_registration: true' >> $DIR/etc/$port.config # Warning, this heredoc depends on the interaction of tabs and spaces. Please don't @@ -43,7 +43,7 @@ for port in 8080 8081 8082; do tls: true resources: - names: [client, federation] - + - port: $port tls: false bind_addresses: ['::1', '127.0.0.1'] @@ -68,7 +68,7 @@ for port in 8080 8081 8082; do # Generate tls keys openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix" - + # Ignore keys from the trusted keys server echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config echo 'trusted_key_servers:' >> $DIR/etc/$port.config @@ -120,7 +120,6 @@ for port in 8080 8081 8082; do python3 -m synapse.app.homeserver \ --config-path "$DIR/etc/$port.config" \ -D \ - -vv \ popd done From 5ea773c50568088b30e15728b65480d0335fe14e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 13:15:08 +0100 Subject: [PATCH 023/136] Cache get_version_string. The version of a module isn't going to change over the lifetime of the process (assuming no funky hot reloading is going on, which it isn't), so let's just cache the result to avoid spawning lots of git subprocesses. Fixes #5672. --- synapse/util/versionstring.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py index a4d9a462f790..fa404b9d7563 100644 --- a/synapse/util/versionstring.py +++ b/synapse/util/versionstring.py @@ -22,6 +22,23 @@ def get_version_string(module): + """Given a module calculate a git-aware version string for it. + + If called on a module not in a git checkout will return `__verison__`. + + Args: + module (module) + + Returns: + str + """ + + cached_version = getattr(module, "_synapse_version_string_cache", None) + if cached_version: + return cached_version + + version_string = module.__version__ + try: null = open(os.devnull, "w") cwd = os.path.dirname(os.path.abspath(module.__file__)) @@ -80,8 +97,10 @@ def get_version_string(module): s for s in (git_branch, git_tag, git_commit, git_dirty) if s ) - return "%s (%s)" % (module.__version__, git_version) + version_string = "%s (%s)" % (module.__version__, git_version) except Exception as e: logger.info("Failed to check for git repository: %s", e) - return module.__version__ + module._synapse_version_string_cache = version_string + + return version_string From 2017369f7d134fb40f52564123819d6c77f4f9b0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 13:18:25 +0100 Subject: [PATCH 024/136] Newsfile --- changelog.d/5730.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5730.misc diff --git a/changelog.d/5730.misc b/changelog.d/5730.misc new file mode 100644 index 000000000000..88767fe2f763 --- /dev/null +++ b/changelog.d/5730.misc @@ -0,0 +1 @@ +Cache result of get_version_string to reduce overhead off /versions client and federation requests. From 66f5ff72fd2179c2cbb6a7755d36273d51a2e32f Mon Sep 17 00:00:00 2001 From: Jason Robinson Date: Mon, 22 Jul 2019 15:21:19 +0300 Subject: [PATCH 025/136] Add `user_type` to returned fields in admin API user list endpoints Mostly user type will be empty (normal user) but there is also the "support" user type. Signed-off-by: Jason Robinson --- changelog.d/5731.misc | 1 + synapse/storage/__init__.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5731.misc diff --git a/changelog.d/5731.misc b/changelog.d/5731.misc new file mode 100644 index 000000000000..dffae5d874e7 --- /dev/null +++ b/changelog.d/5731.misc @@ -0,0 +1 @@ +Return 'user_type' in admin API user endpoints results. diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 6b0ca800876c..86a333a91916 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -469,7 +469,7 @@ def get_users(self): return self._simple_select_list( table="users", keyvalues={}, - retcols=["name", "password_hash", "is_guest", "admin"], + retcols=["name", "password_hash", "is_guest", "admin", "user_type"], desc="get_users", ) @@ -494,7 +494,7 @@ def get_users_paginate(self, order, start, limit): orderby=order, start=start, limit=limit, - retcols=["name", "password_hash", "is_guest", "admin"], + retcols=["name", "password_hash", "is_guest", "admin", "user_type"], ) count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn) retval = {"users": users, "total": count} @@ -514,7 +514,7 @@ def search_users(self, term): table="users", term=term, col="name", - retcols=["name", "password_hash", "is_guest", "admin"], + retcols=["name", "password_hash", "is_guest", "admin", "user_type"], desc="search_users", ) From 22e862304a9c3faac86d4373a50a3b7efd6758b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 14:09:56 +0100 Subject: [PATCH 026/136] Update changelog.d/5730.misc Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5730.misc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5730.misc b/changelog.d/5730.misc index 88767fe2f763..a99677f5e7a8 100644 --- a/changelog.d/5730.misc +++ b/changelog.d/5730.misc @@ -1 +1 @@ -Cache result of get_version_string to reduce overhead off /versions client and federation requests. +Cache result of get_version_string to reduce overhead of `/version` federation requests. From c560b791e1eb50cded53886b926abdc102cf2e51 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 14:19:38 +0100 Subject: [PATCH 027/136] Add process hooks to tell systemd our state. Fixes #5676. --- synapse/app/_base.py | 29 +++++++++++++++++++++++++++++ synapse/python_dependencies.py | 1 + 2 files changed, 30 insertions(+) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 540dbd92369c..c010e7095580 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -15,10 +15,12 @@ import gc import logging +import os import signal import sys import traceback +import sdnotify from daemonize import Daemonize from twisted.internet import defer, error, reactor @@ -242,9 +244,16 @@ def start(hs, listeners=None): if hasattr(signal, "SIGHUP"): def handle_sighup(*args, **kwargs): + # Tell systemd our state, if we're using it. This will silently fail if + # we're not using systemd. + sd_channel = sdnotify.SystemdNotifier() + sd_channel.notify("RELOADING=1") + for i in _sighup_callbacks: i(hs) + sd_channel.notify("READY=1") + signal.signal(signal.SIGHUP, handle_sighup) register_sighup(refresh_certificate) @@ -260,6 +269,7 @@ def handle_sighup(*args, **kwargs): hs.get_datastore().start_profiling() setup_sentry(hs) + setup_sdnotify(hs) except Exception: traceback.print_exc(file=sys.stderr) reactor = hs.get_reactor() @@ -292,6 +302,25 @@ def setup_sentry(hs): scope.set_tag("worker_name", name) +def setup_sdnotify(hs): + """Adds process state hooks to tell systemd what we are up to. + """ + + # Tell systemd our state, if we're using it. This will silently fail if + # we're not using systemd. + sd_channel = sdnotify.SystemdNotifier() + + hs.get_reactor().addSystemEventTrigger( + "after", + "startup", + lambda: sd_channel.notify("READY=1\nMAINPID=%s" % (os.getpid())), + ) + + hs.get_reactor().addSystemEventTrigger( + "before", "shutdown", lambda: sd_channel.notify("STOPPING=1") + ) + + def install_dns_limiter(reactor, max_dns_requests_in_flight=100): """Replaces the resolver with one that limits the number of in flight DNS requests. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index c6465c0386df..195a7a70c8c2 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -72,6 +72,7 @@ "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", + "sdnotify>=0.3", ] CONDITIONAL_REQUIREMENTS = { From 79f689e6c2ead8b24bf75fcb99acd0fb0faca324 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 14:51:53 +0100 Subject: [PATCH 028/136] Newsfile --- changelog.d/5732.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5732.feature diff --git a/changelog.d/5732.feature b/changelog.d/5732.feature new file mode 100644 index 000000000000..9021864350cf --- /dev/null +++ b/changelog.d/5732.feature @@ -0,0 +1 @@ +Add sd_notify hooks to ease systemd integration and allows usage of Type=Notify. From 80cfad233efc6b03c75ab5496db7079466eeb894 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 15:22:14 +0100 Subject: [PATCH 029/136] Call startup commands as system triggers. This helps ensures that we only consider ourselves "up" once all the startup functions have completed. --- synapse/app/appservice.py | 4 +++- synapse/app/client_reader.py | 4 +++- synapse/app/event_creator.py | 4 +++- synapse/app/federation_reader.py | 4 +++- synapse/app/federation_sender.py | 4 +++- synapse/app/frontend_proxy.py | 4 +++- synapse/app/homeserver.py | 2 +- synapse/app/media_repository.py | 4 +++- synapse/app/pusher.py | 2 +- synapse/app/synchrotron.py | 4 +++- synapse/app/user_dir.py | 4 +++- 11 files changed, 29 insertions(+), 11 deletions(-) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index e01f3e5f3b04..54bb114dec74 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -168,7 +168,9 @@ def start(config_options): ) ps.setup() - reactor.callWhenRunning(_base.start, ps, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ps, config.worker_listeners + ) _base.start_worker_reactor("synapse-appservice", config) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 29bddc4823f1..721bb5b119f3 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -194,7 +194,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-client-reader", config) diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index 042cfd04afcf..473c8895d0fe 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -193,7 +193,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-event-creator", config) diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 76a97f8f3271..5255d9e8ccef 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -175,7 +175,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-federation-reader", config) diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index fec49d509299..c5a2880e6995 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -198,7 +198,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-federation-sender", config) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 1f1f1df78e5e..5b563c277883 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -247,7 +247,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-frontend-proxy", config) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 0c075cb3f1eb..34c3f5ee99a0 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -447,7 +447,7 @@ def start(): reactor.stop() sys.exit(1) - reactor.callWhenRunning(start) + reactor.addSystemEventTrigger("before", "startup", start) return hs diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index d70780e9d54e..ea26f29acb88 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -161,7 +161,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-media-repository", config) diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 070de7d0b015..692ffa2f0482 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -216,7 +216,7 @@ def start(): _base.start(ps, config.worker_listeners) ps.get_pusherpool().start() - reactor.callWhenRunning(start) + reactor.addSystemEventTrigger("before", "startup", start) _base.start_worker_reactor("synapse-pusher", config) diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index 315c03069477..a1c3b162f7ed 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -451,7 +451,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-synchrotron", config) diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 03ef21bd01d8..cb29a1afabec 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -224,7 +224,9 @@ def start(config_options): ) ss.setup() - reactor.callWhenRunning(_base.start, ss, config.worker_listeners) + reactor.addSystemEventTrigger( + "before", "startup", _base.start, ss, config.worker_listeners + ) _base.start_worker_reactor("synapse-user-dir", config) From 17c27df6ea81f18da186ada5e3b79200c84f6d55 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 15:24:25 +0100 Subject: [PATCH 030/136] Update example systemd service file --- contrib/systemd/matrix-synapse.service | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service index 595b69916c59..38d369ea3d49 100644 --- a/contrib/systemd/matrix-synapse.service +++ b/contrib/systemd/matrix-synapse.service @@ -14,7 +14,9 @@ Description=Synapse Matrix homeserver [Service] -Type=simple +Type=notify +NotifyAccess=main +ExecReload=/bin/kill -HUP $MAINPID Restart=on-abort User=synapse From 0d0f6d12bc84b106ac83ecf824bd722a08070b78 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 23 Jul 2019 01:05:00 +1000 Subject: [PATCH 031/136] Fix logging in workers (#5729) This also adds a worker blacklist. --- .buildkite/pipeline.yml | 3 ++- .buildkite/worker-blacklist | 28 ++++++++++++++++++++++++++++ changelog.d/5729.removal | 1 + synapse/config/workers.py | 1 + 4 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 .buildkite/worker-blacklist create mode 100644 changelog.d/5729.removal diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index d5e5aeec6b84..c8ae1a44bed5 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -220,8 +220,10 @@ steps: env: POSTGRES: "1" WORKERS: "1" + BLACKLIST: "synapse-blacklist-with-workers" command: - "bash .buildkite/merge_base_branch.sh" + - "bash -c 'cat /src/sytest-blacklist /src/.buildkite/worker-blacklist > /src/synapse-blacklist-with-workers'" - "bash /synapse_sytest.sh" plugins: - docker#v3.0.1: @@ -229,7 +231,6 @@ steps: propagate-environment: true always-pull: true workdir: "/src" - soft_fail: true retry: automatic: - exit_status: -1 diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist new file mode 100644 index 000000000000..a211ed7b1874 --- /dev/null +++ b/.buildkite/worker-blacklist @@ -0,0 +1,28 @@ +# This file serves as a blacklist for SyTest tests that we expect will fail in +# Synapse when run under worker mode. For more details, see sytest-blacklist. + +Message history can be paginated + +m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users + +m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users + +Can re-join room if re-invited + +/upgrade creates a new room + +The only membership state included in an initial sync is for all the senders in the timeline + +Local device key changes get to remote servers + +If remote user leaves room we no longer receive device updates + +Forgotten room messages cannot be paginated + +Inbound federation can get public room list + +Members from the gap are included in gappy incr LL sync + +Leaves are present in non-gapped incremental syncs + +Old leaves are present in gapped incremental syncs \ No newline at end of file diff --git a/changelog.d/5729.removal b/changelog.d/5729.removal new file mode 100644 index 000000000000..3af5198e6bb4 --- /dev/null +++ b/changelog.d/5729.removal @@ -0,0 +1 @@ + Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration. diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 246d72cd611b..bc0fc165e3a1 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -31,6 +31,7 @@ def read_config(self, config, **kwargs): self.worker_listeners = config.get("worker_listeners", []) self.worker_daemonize = config.get("worker_daemonize") self.worker_pid_file = config.get("worker_pid_file") + self.worker_log_config = config.get("worker_log_config") # The host used to connect to the main synapse self.worker_replication_host = config.get("worker_replication_host", None) From c96322c8d2f934e4ccd73e6eac3e2e7e7a4af916 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 16:07:12 +0100 Subject: [PATCH 032/136] Don't package sytest-blacklist file. I don't think its useful, and I don't even know where it would end up. --- MANIFEST.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MANIFEST.in b/MANIFEST.in index 834ddfad39c6..919cd8a1cdb8 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -7,7 +7,6 @@ include demo/README include demo/demo.tls.dh include demo/*.py include demo/*.sh -include sytest-blacklist recursive-include synapse/storage/schema *.sql recursive-include synapse/storage/schema *.sql.postgres @@ -34,6 +33,7 @@ exclude Dockerfile exclude .dockerignore exclude test_postgresql.sh exclude .editorconfig +exclude sytest-blacklist include pyproject.toml recursive-include changelog.d * From d9ea9881d252790ac7f1e3525217e37ef9bbceb9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 22 Jul 2019 16:09:15 +0100 Subject: [PATCH 033/136] Newsfile --- changelog.d/5733.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5733.misc diff --git a/changelog.d/5733.misc b/changelog.d/5733.misc new file mode 100644 index 000000000000..a2a8c26383f6 --- /dev/null +++ b/changelog.d/5733.misc @@ -0,0 +1 @@ +Don't package the sytest test blacklist file. From b2a629ef498df2b0585c9474613dda778bec7be0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 09:49:26 +0100 Subject: [PATCH 034/136] Speed up current state background update. Turns out that storing huge JSON arrays in the progress JSON isn't something that postgres particularly likes. --- synapse/storage/roommember.py | 48 ++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 18 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 257bcdb2f80d..b3c002b9eb92 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -852,22 +852,25 @@ def add_membership_profile_txn(txn): @defer.inlineCallbacks def _background_current_state_membership(self, progress, batch_size): """Update the new membership column on current_state_events. + + This works by iterating over all rooms in alphebetical order. """ - if "rooms" not in progress: - rooms = yield self._simple_select_onecol( - table="current_state_events", - keyvalues={}, - retcol="DISTINCT room_id", - desc="_background_current_state_membership_get_rooms", - ) - progress["rooms"] = rooms + def _background_current_state_membership_txn(txn, last_processed_room): + processed = 0 + while processed < batch_size: + txn.execute( + """ + SELECT MIN(room_id) FROM rooms WHERE room_id > ? + """, + (last_processed_room,), + ) + row = txn.fetchone() + if not row or not row[0]: + return processed, True - rooms = progress["rooms"] + next_room, = row - def _background_current_state_membership_txn(txn): - processed = 0 - while rooms and processed < batch_size: sql = """ UPDATE current_state_events AS c SET membership = ( @@ -876,24 +879,33 @@ def _background_current_state_membership_txn(txn): ) WHERE room_id = ? """ - txn.execute(sql, (rooms.pop(),)) + txn.execute(sql, (next_room,)) processed += txn.rowcount + last_processed_room = next_room + self._background_update_progress_txn( - txn, _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, progress + txn, + _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, + {"last_processed_room": last_processed_room}, ) - return processed + return processed, False - result = yield self.runInteraction( + # If we haven't got a last processed room then just use the empty + # string, which will compare before all room IDs correctly. + last_processed_room = progress.get("last_processed_room", "") + + row_count, finished = yield self.runInteraction( "_background_current_state_membership_update", _background_current_state_membership_txn, + last_processed_room, ) - if not rooms: + if finished: yield self._end_background_update(_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME) - defer.returnValue(result) + defer.returnValue(row_count) class _JoinedHostsCache(object): From cf0006719d9086000ddcd2d3129364197a6fa875 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 09:53:23 +0100 Subject: [PATCH 035/136] Newsfile --- changelog.d/5738.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5738.misc diff --git a/changelog.d/5738.misc b/changelog.d/5738.misc new file mode 100644 index 000000000000..5e15dfd5faa7 --- /dev/null +++ b/changelog.d/5738.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From cda4460d99d0956359767ef7a2b8a9740d5aec7c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 10:13:12 +0100 Subject: [PATCH 036/136] Also update systemd-with-workers contrib examples --- .../systemd-with-workers/system/matrix-synapse-worker@.service | 3 ++- contrib/systemd-with-workers/system/matrix-synapse.service | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service index 9d980d516881..3507e2e9896b 100644 --- a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service +++ b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service @@ -4,7 +4,8 @@ After=matrix-synapse.service BindsTo=matrix-synapse.service [Service] -Type=simple +Type=notify +NotifyAccess=main User=matrix-synapse WorkingDirectory=/var/lib/matrix-synapse EnvironmentFile=/etc/default/matrix-synapse diff --git a/contrib/systemd-with-workers/system/matrix-synapse.service b/contrib/systemd-with-workers/system/matrix-synapse.service index 3aae19034cb8..68e8991f187d 100644 --- a/contrib/systemd-with-workers/system/matrix-synapse.service +++ b/contrib/systemd-with-workers/system/matrix-synapse.service @@ -2,7 +2,8 @@ Description=Synapse Matrix Homeserver [Service] -Type=simple +Type=notify +NotifyAccess=main User=matrix-synapse WorkingDirectory=/var/lib/matrix-synapse EnvironmentFile=/etc/default/matrix-synapse From 1883223a01ee17b8813a2aca9493532bb07915d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 10:26:52 +0100 Subject: [PATCH 037/136] Mark flakey tests as blacklisted for worker mode --- .buildkite/worker-blacklist | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index a211ed7b1874..8ed8eef1a356 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -25,4 +25,10 @@ Members from the gap are included in gappy incr LL sync Leaves are present in non-gapped incremental syncs -Old leaves are present in gapped incremental syncs \ No newline at end of file +Old leaves are present in gapped incremental syncs + +User sees updates to presence from other users in the incremental sync. + +Gapped incremental syncs include all state changes + +Old members are included in gappy incr LL sync if they start speaking From 22d2338aceed83c6b32081e0118c7653bb9474e6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 10:27:53 +0100 Subject: [PATCH 038/136] Newsfile --- changelog.d/5740.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5740.misc diff --git a/changelog.d/5740.misc b/changelog.d/5740.misc new file mode 100644 index 000000000000..97a476bef557 --- /dev/null +++ b/changelog.d/5740.misc @@ -0,0 +1 @@ +Blacklist some flakey tests in worker mode. From 3db1377b261eaf3fcff486547d6302ccb24553e5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 13:31:03 +0100 Subject: [PATCH 039/136] Log when we receive receipt from a different origin --- synapse/handlers/receipts.py | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a85dd8cdee69..e58bf7e360bf 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -17,7 +17,7 @@ from twisted.internet import defer from synapse.handlers._base import BaseHandler -from synapse.types import ReadReceipt +from synapse.types import ReadReceipt, get_domain_from_id logger = logging.getLogger(__name__) @@ -40,18 +40,27 @@ def __init__(self, hs): def _received_remote_receipt(self, origin, content): """Called when we receive an EDU of type m.receipt from a remote HS. """ - receipts = [ - ReadReceipt( - room_id=room_id, - receipt_type=receipt_type, - user_id=user_id, - event_ids=user_values["event_ids"], - data=user_values.get("data", {}), - ) - for room_id, room_values in content.items() - for receipt_type, users in room_values.items() - for user_id, user_values in users.items() - ] + receipts = [] + for room_id, room_values in content.items(): + for receipt_type, users in room_values.items(): + for user_id, user_values in users.items(): + if get_domain_from_id(user_id) != origin: + logger.info( + "Received receipt for user %r from server %s, ignoring", + user_id, + origin, + ) + continue + + receipts.append( + ReadReceipt( + room_id=room_id, + receipt_type=receipt_type, + user_id=user_id, + event_ids=user_values["event_ids"], + data=user_values.get("data", {}), + ) + ) yield self._handle_new_receipts(receipts) From 18a466b84e52b6e8c51a878e612d86410d6af680 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Tue, 23 Jul 2019 13:31:16 +0100 Subject: [PATCH 040/136] Opentracing Utils (#5722) * Add decerators for tracing functions * Use the new clean contexts * Context and edu utils * Move opentracing setters * Move whitelisting * Sectioning comments * Better args wrapper * Docstrings Co-Authored-By: Erik Johnston * Remove unused methods. * Don't use global * One tracing decorator to rule them all. --- changelog.d/5722.misc | 1 + synapse/logging/opentracing.py | 455 +++++++++++++++++++------ synapse/logging/scopecontextmanager.py | 2 +- 3 files changed, 357 insertions(+), 101 deletions(-) create mode 100644 changelog.d/5722.misc diff --git a/changelog.d/5722.misc b/changelog.d/5722.misc new file mode 100644 index 000000000000..f2d236188dee --- /dev/null +++ b/changelog.d/5722.misc @@ -0,0 +1 @@ +Add a set of opentracing utils. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 04393697c03f..96a4714d82de 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -11,7 +11,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License.import opentracing +# limitations under the License. # NOTE @@ -150,10 +150,13 @@ def deferred_function(*args, **kwargs): """ import contextlib +import inspect import logging import re from functools import wraps +from canonicaljson import json + from twisted.internet import defer from synapse.config import ConfigError @@ -173,36 +176,12 @@ def deferred_function(*args, **kwargs): logger = logging.getLogger(__name__) -class _DumTagNames(object): - """wrapper of opentracings tags. We need to have them if we - want to reference them without opentracing around. Clearly they - should never actually show up in a trace. `set_tags` overwrites - these with the correct ones.""" +# Block everything by default +# A regex which matches the server_names to expose traces for. +# None means 'block everything'. +_homeserver_whitelist = None - INVALID_TAG = "invalid-tag" - COMPONENT = INVALID_TAG - DATABASE_INSTANCE = INVALID_TAG - DATABASE_STATEMENT = INVALID_TAG - DATABASE_TYPE = INVALID_TAG - DATABASE_USER = INVALID_TAG - ERROR = INVALID_TAG - HTTP_METHOD = INVALID_TAG - HTTP_STATUS_CODE = INVALID_TAG - HTTP_URL = INVALID_TAG - MESSAGE_BUS_DESTINATION = INVALID_TAG - PEER_ADDRESS = INVALID_TAG - PEER_HOSTNAME = INVALID_TAG - PEER_HOST_IPV4 = INVALID_TAG - PEER_HOST_IPV6 = INVALID_TAG - PEER_PORT = INVALID_TAG - PEER_SERVICE = INVALID_TAG - SAMPLING_PRIORITY = INVALID_TAG - SERVICE = INVALID_TAG - SPAN_KIND = INVALID_TAG - SPAN_KIND_CONSUMER = INVALID_TAG - SPAN_KIND_PRODUCER = INVALID_TAG - SPAN_KIND_RPC_CLIENT = INVALID_TAG - SPAN_KIND_RPC_SERVER = INVALID_TAG +# Util methods def only_if_tracing(func): @@ -219,11 +198,13 @@ def _only_if_tracing_inner(*args, **kwargs): return _only_if_tracing_inner -# A regex which matches the server_names to expose traces for. -# None means 'block everything'. -_homeserver_whitelist = None +@contextlib.contextmanager +def _noop_context_manager(*args, **kwargs): + """Does exactly what it says on the tin""" + yield + -tags = _DumTagNames +# Setup def init_tracer(config): @@ -260,12 +241,38 @@ def init_tracer(config): tags = opentracing.tags -@contextlib.contextmanager -def _noop_context_manager(*args, **kwargs): - """Does absolutely nothing really well. Can be entered and exited arbitrarily. - Good substitute for an opentracing scope.""" - yield +# Whitelisting + + +@only_if_tracing +def set_homeserver_whitelist(homeserver_whitelist): + """Sets the homeserver whitelist + Args: + homeserver_whitelist (Iterable[str]): regex of whitelisted homeservers + """ + global _homeserver_whitelist + if homeserver_whitelist: + # Makes a single regex which accepts all passed in regexes in the list + _homeserver_whitelist = re.compile( + "({})".format(")|(".join(homeserver_whitelist)) + ) + + +@only_if_tracing +def whitelisted_homeserver(destination): + """Checks if a destination matches the whitelist + + Args: + destination (str) + """ + _homeserver_whitelist + if _homeserver_whitelist: + return _homeserver_whitelist.match(destination) + return False + + +# Start spans and scopes # Could use kwargs but I want these to be explicit def start_active_span( @@ -285,8 +292,10 @@ def start_active_span( Returns: scope (Scope) or noop_context_manager """ + if opentracing is None: return _noop_context_manager() + else: # We need to enter the scope here for the logcontext to become active return opentracing.tracer.start_active_span( @@ -300,63 +309,13 @@ def start_active_span( ) -@only_if_tracing -def close_active_span(): - """Closes the active span. This will close it's logcontext if the context - was made for the span""" - opentracing.tracer.scope_manager.active.__exit__(None, None, None) - - -@only_if_tracing -def set_tag(key, value): - """Set's a tag on the active span""" - opentracing.tracer.active_span.set_tag(key, value) - - -@only_if_tracing -def log_kv(key_values, timestamp=None): - """Log to the active span""" - opentracing.tracer.active_span.log_kv(key_values, timestamp) - - -# Note: we don't have a get baggage items because we're trying to hide all -# scope and span state from synapse. I think this method may also be useless -# as a result -@only_if_tracing -def set_baggage_item(key, value): - """Attach baggage to the active span""" - opentracing.tracer.active_span.set_baggage_item(key, value) - - -@only_if_tracing -def set_operation_name(operation_name): - """Sets the operation name of the active span""" - opentracing.tracer.active_span.set_operation_name(operation_name) - - -@only_if_tracing -def set_homeserver_whitelist(homeserver_whitelist): - """Sets the whitelist - - Args: - homeserver_whitelist (iterable of strings): regex of whitelisted homeservers - """ - global _homeserver_whitelist - if homeserver_whitelist: - # Makes a single regex which accepts all passed in regexes in the list - _homeserver_whitelist = re.compile( - "({})".format(")|(".join(homeserver_whitelist)) - ) - - -@only_if_tracing -def whitelisted_homeserver(destination): - """Checks if a destination matches the whitelist - Args: - destination (String)""" - if _homeserver_whitelist: - return _homeserver_whitelist.match(destination) - return False +def start_active_span_follows_from(operation_name, contexts): + if opentracing is None: + return _noop_context_manager() + else: + references = [opentracing.follows_from(context) for context in contexts] + scope = start_active_span(operation_name, references=references) + return scope def start_active_span_from_context( @@ -372,12 +331,16 @@ def start_active_span_from_context( Extracts a span context from Twisted Headers. args: headers (twisted.web.http_headers.Headers) + + For the other args see opentracing.tracer + returns: span_context (opentracing.span.SpanContext) """ # Twisted encodes the values as lists whereas opentracing doesn't. # So, we take the first item in the list. # Also, twisted uses byte arrays while opentracing expects strings. + if opentracing is None: return _noop_context_manager() @@ -395,17 +358,90 @@ def start_active_span_from_context( ) +def start_active_span_from_edu( + edu_content, + operation_name, + references=[], + tags=None, + start_time=None, + ignore_active_span=False, + finish_on_close=True, +): + """ + Extracts a span context from an edu and uses it to start a new active span + + Args: + edu_content (dict): and edu_content with a `context` field whose value is + canonical json for a dict which contains opentracing information. + + For the other args see opentracing.tracer + """ + + if opentracing is None: + return _noop_context_manager() + + carrier = json.loads(edu_content.get("context", "{}")).get("opentracing", {}) + context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) + _references = [ + opentracing.child_of(span_context_from_string(x)) + for x in carrier.get("references", []) + ] + + # For some reason jaeger decided not to support the visualization of multiple parent + # spans or explicitely show references. I include the span context as a tag here as + # an aid to people debugging but it's really not an ideal solution. + + references += _references + + scope = opentracing.tracer.start_active_span( + operation_name, + child_of=context, + references=references, + tags=tags, + start_time=start_time, + ignore_active_span=ignore_active_span, + finish_on_close=finish_on_close, + ) + + scope.span.set_tag("references", carrier.get("references", [])) + return scope + + +# Opentracing setters for tags, logs, etc + + +@only_if_tracing +def set_tag(key, value): + """Sets a tag on the active span""" + opentracing.tracer.active_span.set_tag(key, value) + + +@only_if_tracing +def log_kv(key_values, timestamp=None): + """Log to the active span""" + opentracing.tracer.active_span.log_kv(key_values, timestamp) + + +@only_if_tracing +def set_operation_name(operation_name): + """Sets the operation name of the active span""" + opentracing.tracer.active_span.set_operation_name(operation_name) + + +# Injection and extraction + + @only_if_tracing def inject_active_span_twisted_headers(headers, destination): """ - Injects a span context into twisted headers inplace + Injects a span context into twisted headers in-place Args: headers (twisted.web.http_headers.Headers) span (opentracing.Span) Returns: - Inplace modification of headers + In-place modification of headers Note: The headers set by the tracer are custom to the tracer implementation which @@ -437,7 +473,7 @@ def inject_active_span_byte_dict(headers, destination): span (opentracing.Span) Returns: - Inplace modification of headers + In-place modification of headers Note: The headers set by the tracer are custom to the tracer implementation which @@ -458,9 +494,190 @@ def inject_active_span_byte_dict(headers, destination): headers[key.encode()] = [value.encode()] +@only_if_tracing +def inject_active_span_text_map(carrier, destination=None): + """ + Injects a span context into a dict + + Args: + carrier (dict) + destination (str): the name of the remote server. The span context + will only be injected if the destination matches the homeserver_whitelist + or destination is None. + + Returns: + In-place modification of carrier + + Note: + The headers set by the tracer are custom to the tracer implementation which + should be unique enough that they don't interfere with any headers set by + synapse or twisted. If we're still using jaeger these headers would be those + here: + https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py + """ + + if destination and not whitelisted_homeserver(destination): + return + + opentracing.tracer.inject( + opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + ) + + +def active_span_context_as_string(): + """ + Returns: + The active span context encoded as a string. + """ + carrier = {} + if opentracing: + opentracing.tracer.inject( + opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + ) + return json.dumps(carrier) + + +@only_if_tracing +def span_context_from_string(carrier): + """ + Returns: + The active span context decoded from a string. + """ + carrier = json.loads(carrier) + return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) + + +@only_if_tracing +def extract_text_map(carrier): + """ + Wrapper method for opentracing's tracer.extract for TEXT_MAP. + Args: + carrier (dict): a dict possibly containing a span context. + + Returns: + The active span context extracted from carrier. + """ + return opentracing.tracer.extract(opentracing.Format.TEXT_MAP, carrier) + + +# Tracing decorators + + +def trace(func): + """ + Decorator to trace a function. + Sets the operation name to that of the function's. + """ + if opentracing is None: + return func + + @wraps(func) + def _trace_inner(self, *args, **kwargs): + if opentracing is None: + return func(self, *args, **kwargs) + + scope = start_active_span(func.__name__) + scope.__enter__() + + try: + result = func(self, *args, **kwargs) + if isinstance(result, defer.Deferred): + + def call_back(result): + scope.__exit__(None, None, None) + return result + + def err_back(result): + scope.span.set_tag(tags.ERROR, True) + scope.__exit__(None, None, None) + return result + + result.addCallbacks(call_back, err_back) + + else: + scope.__exit__(None, None, None) + + return result + + except Exception as e: + scope.__exit__(type(e), None, e.__traceback__) + raise + + return _trace_inner + + +def trace_using_operation_name(operation_name): + """Decorator to trace a function. Explicitely sets the operation_name.""" + + def trace(func): + """ + Decorator to trace a function. + Sets the operation name to that of the function's. + """ + if opentracing is None: + return func + + @wraps(func) + def _trace_inner(self, *args, **kwargs): + if opentracing is None: + return func(self, *args, **kwargs) + + scope = start_active_span(operation_name) + scope.__enter__() + + try: + result = func(self, *args, **kwargs) + if isinstance(result, defer.Deferred): + + def call_back(result): + scope.__exit__(None, None, None) + return result + + def err_back(result): + scope.span.set_tag(tags.ERROR, True) + scope.__exit__(None, None, None) + return result + + result.addCallbacks(call_back, err_back) + else: + scope.__exit__(None, None, None) + + return result + + except Exception as e: + scope.__exit__(type(e), None, e.__traceback__) + raise + + return _trace_inner + + return trace + + +def tag_args(func): + """ + Tags all of the args to the active span. + """ + + if not opentracing: + return func + + @wraps(func) + def _tag_args_inner(self, *args, **kwargs): + argspec = inspect.getargspec(func) + for i, arg in enumerate(argspec.args[1:]): + set_tag("ARG_" + arg, args[i]) + set_tag("args", args[len(argspec.args) :]) + set_tag("kwargs", kwargs) + return func(self, *args, **kwargs) + + return _tag_args_inner + + def trace_servlet(servlet_name, func): """Decorator which traces a serlet. It starts a span with some servlet specific tags such as the servlet_name and request information""" + if not opentracing: + return func @wraps(func) @defer.inlineCallbacks @@ -477,6 +694,44 @@ def _trace_servlet_inner(request, *args, **kwargs): }, ): result = yield defer.maybeDeferred(func, request, *args, **kwargs) - defer.returnValue(result) + defer.returnValue(result) return _trace_servlet_inner + + +# Helper class + + +class _DummyTagNames(object): + """wrapper of opentracings tags. We need to have them if we + want to reference them without opentracing around. Clearly they + should never actually show up in a trace. `set_tags` overwrites + these with the correct ones.""" + + INVALID_TAG = "invalid-tag" + COMPONENT = INVALID_TAG + DATABASE_INSTANCE = INVALID_TAG + DATABASE_STATEMENT = INVALID_TAG + DATABASE_TYPE = INVALID_TAG + DATABASE_USER = INVALID_TAG + ERROR = INVALID_TAG + HTTP_METHOD = INVALID_TAG + HTTP_STATUS_CODE = INVALID_TAG + HTTP_URL = INVALID_TAG + MESSAGE_BUS_DESTINATION = INVALID_TAG + PEER_ADDRESS = INVALID_TAG + PEER_HOSTNAME = INVALID_TAG + PEER_HOST_IPV4 = INVALID_TAG + PEER_HOST_IPV6 = INVALID_TAG + PEER_PORT = INVALID_TAG + PEER_SERVICE = INVALID_TAG + SAMPLING_PRIORITY = INVALID_TAG + SERVICE = INVALID_TAG + SPAN_KIND = INVALID_TAG + SPAN_KIND_CONSUMER = INVALID_TAG + SPAN_KIND_PRODUCER = INVALID_TAG + SPAN_KIND_RPC_CLIENT = INVALID_TAG + SPAN_KIND_RPC_SERVER = INVALID_TAG + + +tags = _DummyTagNames diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index 8c661302c9f0..4eed4f233822 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -131,7 +131,7 @@ def __exit__(self, type, value, traceback): def close(self): if self.manager.active is not self: - logger.error("Tried to close a none active scope!") + logger.error("Tried to close a non-active scope!") return if self._finish_on_close: From fadfde9aaaf37da6b3f8f4ca27a028f43cc8a3f3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 13:32:37 +0100 Subject: [PATCH 041/136] Newsfile --- changelog.d/5743.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5743.bugfix diff --git a/changelog.d/5743.bugfix b/changelog.d/5743.bugfix new file mode 100644 index 000000000000..a160e9945f48 --- /dev/null +++ b/changelog.d/5743.bugfix @@ -0,0 +1 @@ +Log when we receive receipt from a different origin. From 4806651744616bf48abf408034ab9560e33f60ce Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 23 Jul 2019 23:00:55 +1000 Subject: [PATCH 042/136] Replace returnValue with return (#5736) --- changelog.d/5736.misc | 1 + docs/log_contexts.rst | 2 +- synapse/api/auth.py | 44 ++++--- synapse/api/filtering.py | 2 +- synapse/app/frontend_proxy.py | 8 +- synapse/app/homeserver.py | 2 +- synapse/appservice/__init__.py | 28 ++--- synapse/appservice/api.py | 38 +++--- synapse/appservice/scheduler.py | 4 +- synapse/crypto/keyring.py | 14 +-- synapse/events/builder.py | 16 ++- synapse/events/snapshot.py | 28 ++--- synapse/events/third_party_rules.py | 8 +- synapse/events/utils.py | 4 +- synapse/federation/federation_base.py | 6 +- synapse/federation/federation_client.py | 46 ++++--- synapse/federation/federation_server.py | 77 +++++------- .../sender/per_destination_queue.py | 4 +- .../federation/sender/transaction_manager.py | 2 +- synapse/federation/transport/client.py | 30 ++--- synapse/groups/attestations.py | 2 +- synapse/groups/groups_server.py | 92 +++++++------- synapse/handlers/account_data.py | 4 +- synapse/handlers/account_validity.py | 6 +- synapse/handlers/acme.py | 2 +- synapse/handlers/admin.py | 10 +- synapse/handlers/appservice.py | 22 ++-- synapse/handlers/auth.py | 44 +++---- synapse/handlers/deactivate_account.py | 2 +- synapse/handlers/device.py | 22 ++-- synapse/handlers/directory.py | 14 +-- synapse/handlers/e2e_keys.py | 10 +- synapse/handlers/e2e_room_keys.py | 8 +- synapse/handlers/events.py | 6 +- synapse/handlers/federation.py | 82 +++++++------ synapse/handlers/groups_local.py | 26 ++-- synapse/handlers/identity.py | 18 +-- synapse/handlers/initial_sync.py | 54 ++++----- synapse/handlers/message.py | 32 +++-- synapse/handlers/pagination.py | 14 +-- synapse/handlers/presence.py | 54 ++++----- synapse/handlers/profile.py | 18 +-- synapse/handlers/receipts.py | 14 +-- synapse/handlers/register.py | 16 +-- synapse/handlers/room.py | 16 +-- synapse/handlers/room_list.py | 10 +- synapse/handlers/room_member.py | 42 +++---- synapse/handlers/room_member_worker.py | 2 +- synapse/handlers/search.py | 14 +-- synapse/handlers/state_deltas.py | 8 +- synapse/handlers/stats.py | 6 +- synapse/handlers/sync.py | 112 ++++++++---------- synapse/handlers/typing.py | 4 +- synapse/handlers/user_directory.py | 2 +- synapse/http/client.py | 28 ++--- .../federation/matrix_federation_agent.py | 46 ++++--- synapse/http/federation/srv_resolver.py | 8 +- synapse/http/matrixfederationclient.py | 16 +-- synapse/logging/opentracing.py | 6 +- synapse/module_api/__init__.py | 2 +- synapse/notifier.py | 18 +-- synapse/push/bulk_push_rule_evaluator.py | 10 +- synapse/push/httppusher.py | 18 +-- synapse/push/mailer.py | 84 ++++++------- synapse/push/presentable_names.py | 25 ++-- synapse/push/push_tools.py | 4 +- synapse/push/pusherpool.py | 6 +- synapse/replication/http/_base.py | 2 +- synapse/replication/http/federation.py | 10 +- synapse/replication/http/login.py | 2 +- synapse/replication/http/membership.py | 4 +- synapse/replication/http/register.py | 4 +- synapse/replication/http/send_event.py | 4 +- synapse/replication/tcp/streams/_base.py | 12 +- synapse/replication/tcp/streams/events.py | 2 +- synapse/rest/admin/__init__.py | 48 ++++---- synapse/rest/admin/server_notice_servlet.py | 2 +- synapse/rest/client/v1/directory.py | 18 ++- synapse/rest/client/v1/events.py | 6 +- synapse/rest/client/v1/initial_sync.py | 2 +- synapse/rest/client/v1/login.py | 14 +-- synapse/rest/client/v1/logout.py | 4 +- synapse/rest/client/v1/presence.py | 4 +- synapse/rest/client/v1/profile.py | 14 +-- synapse/rest/client/v1/push_rule.py | 10 +- synapse/rest/client/v1/pusher.py | 8 +- synapse/rest/client/v1/room.py | 46 +++---- synapse/rest/client/v1/voip.py | 22 ++-- synapse/rest/client/v2_alpha/account.py | 32 ++--- synapse/rest/client/v2_alpha/account_data.py | 8 +- .../rest/client/v2_alpha/account_validity.py | 4 +- synapse/rest/client/v2_alpha/auth.py | 4 +- synapse/rest/client/v2_alpha/capabilities.py | 2 +- synapse/rest/client/v2_alpha/devices.py | 10 +- synapse/rest/client/v2_alpha/filter.py | 4 +- synapse/rest/client/v2_alpha/groups.py | 64 +++++----- synapse/rest/client/v2_alpha/keys.py | 8 +- synapse/rest/client/v2_alpha/notifications.py | 4 +- synapse/rest/client/v2_alpha/openid.py | 18 ++- synapse/rest/client/v2_alpha/read_marker.py | 2 +- synapse/rest/client/v2_alpha/receipts.py | 2 +- synapse/rest/client/v2_alpha/register.py | 38 +++--- synapse/rest/client/v2_alpha/relations.py | 8 +- synapse/rest/client/v2_alpha/report_event.py | 2 +- synapse/rest/client/v2_alpha/room_keys.py | 14 +-- .../v2_alpha/room_upgrade_rest_servlet.py | 2 +- synapse/rest/client/v2_alpha/sendtodevice.py | 2 +- synapse/rest/client/v2_alpha/sync.py | 48 ++++---- synapse/rest/client/v2_alpha/tags.py | 6 +- synapse/rest/client/v2_alpha/thirdparty.py | 10 +- .../rest/client/v2_alpha/user_directory.py | 4 +- synapse/rest/media/v1/media_repository.py | 18 +-- synapse/rest/media/v1/media_storage.py | 12 +- synapse/rest/media/v1/preview_url_resource.py | 34 +++--- .../resource_limits_server_notices.py | 2 +- .../server_notices/server_notices_manager.py | 6 +- synapse/state/__init__.py | 38 +++--- synapse/state/v1.py | 8 +- synapse/state/v2.py | 26 ++-- synapse/storage/__init__.py | 2 +- synapse/storage/_base.py | 14 +-- synapse/storage/account_data.py | 14 +-- synapse/storage/appservice.py | 14 +-- synapse/storage/background_updates.py | 20 ++-- synapse/storage/client_ips.py | 26 ++-- synapse/storage/deviceinbox.py | 10 +- synapse/storage/devices.py | 32 ++--- synapse/storage/directory.py | 10 +- synapse/storage/e2e_room_keys.py | 6 +- synapse/storage/end_to_end_keys.py | 8 +- synapse/storage/event_federation.py | 12 +- synapse/storage/event_push_actions.py | 16 +-- synapse/storage/events.py | 34 +++--- synapse/storage/events_bg_updates.py | 6 +- synapse/storage/events_worker.py | 20 ++-- synapse/storage/filtering.py | 4 +- synapse/storage/group_server.py | 38 +++--- synapse/storage/monthly_active_users.py | 2 +- synapse/storage/presence.py | 6 +- synapse/storage/profile.py | 12 +- synapse/storage/push_rule.py | 18 ++- synapse/storage/pusher.py | 10 +- synapse/storage/receipts.py | 36 +++--- synapse/storage/registration.py | 34 +++--- synapse/storage/relations.py | 4 +- synapse/storage/room.py | 10 +- synapse/storage/roommember.py | 42 ++++--- synapse/storage/search.py | 56 ++++----- synapse/storage/signatures.py | 2 +- synapse/storage/state.py | 54 ++++----- synapse/storage/stats.py | 16 +-- synapse/storage/stream.py | 44 ++++--- synapse/storage/tags.py | 12 +- synapse/storage/transactions.py | 4 +- synapse/storage/user_directory.py | 30 ++--- synapse/storage/user_erasure_store.py | 5 +- synapse/streams/events.py | 4 +- synapse/util/__init__.py | 2 +- synapse/util/async_helpers.py | 4 +- synapse/util/caches/descriptors.py | 2 +- synapse/util/caches/response_cache.py | 2 +- synapse/util/metrics.py | 2 +- synapse/util/retryutils.py | 16 ++- synapse/visibility.py | 8 +- tests/crypto/test_keyring.py | 6 +- tests/handlers/test_register.py | 2 +- .../test_matrix_federation_agent.py | 4 +- tests/http/federation/test_srv_resolver.py | 2 +- tests/http/test_fedclient.py | 2 +- tests/rest/client/test_transactions.py | 2 +- tests/storage/test_background_update.py | 4 +- tests/storage/test_redaction.py | 4 +- tests/storage/test_roommember.py | 2 +- tests/storage/test_state.py | 2 +- tests/test_visibility.py | 6 +- tests/util/caches/test_descriptors.py | 8 +- tests/utils.py | 6 +- 177 files changed, 1360 insertions(+), 1514 deletions(-) create mode 100644 changelog.d/5736.misc mode change 100755 => 100644 synapse/app/homeserver.py diff --git a/changelog.d/5736.misc b/changelog.d/5736.misc new file mode 100644 index 000000000000..5713b8b32d77 --- /dev/null +++ b/changelog.d/5736.misc @@ -0,0 +1 @@ +Replace uses of returnValue with plain return, as returnValue is not needed on Python 3. diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index f5cd5de8abac..4502cd94544c 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -148,7 +148,7 @@ call any other functions. d = more_stuff() result = yield d # also fine, of course - defer.returnValue(result) + return result def nonInlineCallbacksFun(): logger.debug("just a wrapper really") diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 7ce6540bddd3..351790cca495 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -128,7 +128,7 @@ def check_joined_room(self, room_id, user_id, current_state=None): ) self._check_joined_room(member, user_id, room_id) - defer.returnValue(member) + return member @defer.inlineCallbacks def check_user_was_in_room(self, room_id, user_id): @@ -156,13 +156,13 @@ def check_user_was_in_room(self, room_id, user_id): if forgot: raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) - defer.returnValue(member) + return member @defer.inlineCallbacks def check_host_in_room(self, room_id, host): with Measure(self.clock, "check_host_in_room"): latest_event_ids = yield self.store.is_host_joined(room_id, host) - defer.returnValue(latest_event_ids) + return latest_event_ids def _check_joined_room(self, member, user_id, room_id): if not member or member.membership != Membership.JOIN: @@ -219,9 +219,7 @@ def get_user_by_req( device_id="dummy-device", # stubbed ) - defer.returnValue( - synapse.types.create_requester(user_id, app_service=app_service) - ) + return synapse.types.create_requester(user_id, app_service=app_service) user_info = yield self.get_user_by_access_token(access_token, rights) user = user_info["user"] @@ -262,10 +260,8 @@ def get_user_by_req( request.authenticated_entity = user.to_string() - defer.returnValue( - synapse.types.create_requester( - user, token_id, is_guest, device_id, app_service=app_service - ) + return synapse.types.create_requester( + user, token_id, is_guest, device_id, app_service=app_service ) except KeyError: raise MissingClientTokenError() @@ -276,25 +272,25 @@ def _get_appservice_user_id(self, request): self.get_access_token_from_request(request) ) if app_service is None: - defer.returnValue((None, None)) + return (None, None) if app_service.ip_range_whitelist: ip_address = IPAddress(self.hs.get_ip_from_request(request)) if ip_address not in app_service.ip_range_whitelist: - defer.returnValue((None, None)) + return (None, None) if b"user_id" not in request.args: - defer.returnValue((app_service.sender, app_service)) + return (app_service.sender, app_service) user_id = request.args[b"user_id"][0].decode("utf8") if app_service.sender == user_id: - defer.returnValue((app_service.sender, app_service)) + return (app_service.sender, app_service) if not app_service.is_interested_in_user(user_id): raise AuthError(403, "Application service cannot masquerade as this user.") if not (yield self.store.get_user_by_id(user_id)): raise AuthError(403, "Application service has not registered this user") - defer.returnValue((user_id, app_service)) + return (user_id, app_service) @defer.inlineCallbacks def get_user_by_access_token(self, token, rights="access"): @@ -330,7 +326,7 @@ def get_user_by_access_token(self, token, rights="access"): msg="Access token has expired", soft_logout=True ) - defer.returnValue(r) + return r # otherwise it needs to be a valid macaroon try: @@ -378,7 +374,7 @@ def get_user_by_access_token(self, token, rights="access"): } else: raise RuntimeError("Unknown rights setting %s", rights) - defer.returnValue(ret) + return ret except ( _InvalidMacaroonException, pymacaroons.exceptions.MacaroonException, @@ -506,7 +502,7 @@ def _verify_expiry(self, caveat): def _look_up_user_by_access_token(self, token): ret = yield self.store.get_user_by_access_token(token) if not ret: - defer.returnValue(None) + return None # we use ret.get() below because *lots* of unit tests stub out # get_user_by_access_token in a way where it only returns a couple of @@ -518,7 +514,7 @@ def _look_up_user_by_access_token(self, token): "device_id": ret.get("device_id"), "valid_until_ms": ret.get("valid_until_ms"), } - defer.returnValue(user_info) + return user_info def get_appservice_by_req(self, request): token = self.get_access_token_from_request(request) @@ -543,7 +539,7 @@ def is_server_admin(self, user): @defer.inlineCallbacks def compute_auth_events(self, event, current_state_ids, for_verification=False): if event.type == EventTypes.Create: - defer.returnValue([]) + return [] auth_ids = [] @@ -604,7 +600,7 @@ def compute_auth_events(self, event, current_state_ids, for_verification=False): if member_event.content["membership"] == Membership.JOIN: auth_ids.append(member_event.event_id) - defer.returnValue(auth_ids) + return auth_ids @defer.inlineCallbacks def check_can_change_room_list(self, room_id, user): @@ -618,7 +614,7 @@ def check_can_change_room_list(self, room_id, user): is_admin = yield self.is_server_admin(user) if is_admin: - defer.returnValue(True) + return True user_id = user.to_string() yield self.check_joined_room(room_id, user_id) @@ -712,7 +708,7 @@ def check_in_room_or_world_readable(self, room_id, user_id): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.check_user_was_in_room(room_id, user_id) - defer.returnValue((member_event.membership, member_event.event_id)) + return (member_event.membership, member_event.event_id) except AuthError: visibility = yield self.state.get_current_state( room_id, EventTypes.RoomHistoryVisibility, "" @@ -721,7 +717,7 @@ def check_in_room_or_world_readable(self, room_id, user_id): visibility and visibility.content["history_visibility"] == "world_readable" ): - defer.returnValue((Membership.JOIN, None)) + return (Membership.JOIN, None) return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 9b3daca29bbf..9f06556bd2b2 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -132,7 +132,7 @@ def __init__(self, hs): @defer.inlineCallbacks def get_user_filter(self, user_localpart, filter_id): result = yield self.store.get_user_filter(user_localpart, filter_id) - defer.returnValue(FilterCollection(result)) + return FilterCollection(result) def add_user_filter(self, user_localpart, user_filter): self.check_valid_filter(user_filter) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 5b563c277883..e2822ca848b3 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -70,12 +70,12 @@ def on_GET(self, request, user_id): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def on_PUT(self, request, user_id): yield self.auth.get_user_by_req(request) - defer.returnValue((200, {})) + return (200, {}) class KeyUploadServlet(RestServlet): @@ -126,11 +126,11 @@ def on_POST(self, request, device_id): self.main_uri + request.uri.decode("ascii"), body, headers=headers ) - defer.returnValue((200, result)) + return (200, result) else: # Just interested in counts. result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - defer.returnValue((200, {"one_time_key_counts": result})) + return (200, {"one_time_key_counts": result}) class FrontendProxySlavedStore( diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py old mode 100755 new mode 100644 index 34c3f5ee99a0..7d6b51b5bc09 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -406,7 +406,7 @@ def do_acme(): if provision: yield acme.provision_certificate() - defer.returnValue(provision) + return provision @defer.inlineCallbacks def reprovision_acme(): diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index b26a31dd54d1..33b357942578 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -175,21 +175,21 @@ def _is_exclusive(self, ns_key, test_string): @defer.inlineCallbacks def _matches_user(self, event, store): if not event: - defer.returnValue(False) + return False if self.is_interested_in_user(event.sender): - defer.returnValue(True) + return True # also check m.room.member state key if event.type == EventTypes.Member and self.is_interested_in_user( event.state_key ): - defer.returnValue(True) + return True if not store: - defer.returnValue(False) + return False does_match = yield self._matches_user_in_member_list(event.room_id, store) - defer.returnValue(does_match) + return does_match @cachedInlineCallbacks(num_args=1, cache_context=True) def _matches_user_in_member_list(self, room_id, store, cache_context): @@ -200,8 +200,8 @@ def _matches_user_in_member_list(self, room_id, store, cache_context): # check joined member events for user_id in member_list: if self.is_interested_in_user(user_id): - defer.returnValue(True) - defer.returnValue(False) + return True + return False def _matches_room_id(self, event): if hasattr(event, "room_id"): @@ -211,13 +211,13 @@ def _matches_room_id(self, event): @defer.inlineCallbacks def _matches_aliases(self, event, store): if not store or not event: - defer.returnValue(False) + return False alias_list = yield store.get_aliases_for_room(event.room_id) for alias in alias_list: if self.is_interested_in_alias(alias): - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def is_interested(self, event, store=None): @@ -231,15 +231,15 @@ def is_interested(self, event, store=None): """ # Do cheap checks first if self._matches_room_id(event): - defer.returnValue(True) + return True if (yield self._matches_aliases(event, store)): - defer.returnValue(True) + return True if (yield self._matches_user(event, store)): - defer.returnValue(True) + return True - defer.returnValue(False) + return False def is_interested_in_user(self, user_id): return ( diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 571881775bf2..007ca75a9479 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -97,40 +97,40 @@ def __init__(self, hs): @defer.inlineCallbacks def query_user(self, service, user_id): if service.url is None: - defer.returnValue(False) + return False uri = service.url + ("/users/%s" % urllib.parse.quote(user_id)) response = None try: response = yield self.get_json(uri, {"access_token": service.hs_token}) if response is not None: # just an empty json object - defer.returnValue(True) + return True except CodeMessageException as e: if e.code == 404: - defer.returnValue(False) + return False return logger.warning("query_user to %s received %s", uri, e.code) except Exception as ex: logger.warning("query_user to %s threw exception %s", uri, ex) - defer.returnValue(False) + return False @defer.inlineCallbacks def query_alias(self, service, alias): if service.url is None: - defer.returnValue(False) + return False uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias)) response = None try: response = yield self.get_json(uri, {"access_token": service.hs_token}) if response is not None: # just an empty json object - defer.returnValue(True) + return True except CodeMessageException as e: logger.warning("query_alias to %s received %s", uri, e.code) if e.code == 404: - defer.returnValue(False) + return False return except Exception as ex: logger.warning("query_alias to %s threw exception %s", uri, ex) - defer.returnValue(False) + return False @defer.inlineCallbacks def query_3pe(self, service, kind, protocol, fields): @@ -141,7 +141,7 @@ def query_3pe(self, service, kind, protocol, fields): else: raise ValueError("Unrecognised 'kind' argument %r to query_3pe()", kind) if service.url is None: - defer.returnValue([]) + return [] uri = "%s%s/thirdparty/%s/%s" % ( service.url, @@ -155,7 +155,7 @@ def query_3pe(self, service, kind, protocol, fields): logger.warning( "query_3pe to %s returned an invalid response %r", uri, response ) - defer.returnValue([]) + return [] ret = [] for r in response: @@ -166,14 +166,14 @@ def query_3pe(self, service, kind, protocol, fields): "query_3pe to %s returned an invalid result %r", uri, r ) - defer.returnValue(ret) + return ret except Exception as ex: logger.warning("query_3pe to %s threw exception %s", uri, ex) - defer.returnValue([]) + return [] def get_3pe_protocol(self, service, protocol): if service.url is None: - defer.returnValue({}) + return {} @defer.inlineCallbacks def _get(): @@ -189,7 +189,7 @@ def _get(): logger.warning( "query_3pe_protocol to %s did not return a" " valid result", uri ) - defer.returnValue(None) + return None for instance in info.get("instances", []): network_id = instance.get("network_id", None) @@ -198,10 +198,10 @@ def _get(): service.id, network_id ).to_string() - defer.returnValue(info) + return info except Exception as ex: logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex) - defer.returnValue(None) + return None key = (service.id, protocol) return self.protocol_meta_cache.wrap(key, _get) @@ -209,7 +209,7 @@ def _get(): @defer.inlineCallbacks def push_bulk(self, service, events, txn_id=None): if service.url is None: - defer.returnValue(True) + return True events = self._serialize(events) @@ -229,14 +229,14 @@ def push_bulk(self, service, events, txn_id=None): ) sent_transactions_counter.labels(service.id).inc() sent_events_counter.labels(service.id).inc(len(events)) - defer.returnValue(True) + return True return except CodeMessageException as e: logger.warning("push_bulk to %s received %s", uri, e.code) except Exception as ex: logger.warning("push_bulk to %s threw exception %s", uri, ex) failed_transactions_counter.labels(service.id).inc() - defer.returnValue(False) + return False def _serialize(self, events): time_now = self.clock.time_msec() diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index e5b36494f57f..42a350bff8b5 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -193,7 +193,7 @@ def _start_recoverer(self, service): @defer.inlineCallbacks def _is_service_up(self, service): state = yield self.store.get_appservice_state(service) - defer.returnValue(state == ApplicationServiceState.UP or state is None) + return state == ApplicationServiceState.UP or state is None class _Recoverer(object): @@ -208,7 +208,7 @@ def start(clock, store, as_api, callback): r.service.id, ) r.recover() - defer.returnValue(recoverers) + return recoverers def __init__(self, clock, store, as_api, service, callback): self.clock = clock diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index e8bb420ad1df..6c3e885e72e6 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -462,7 +462,7 @@ def get_keys(self, keys_to_fetch): keys = {} for (server_name, key_id), key in res.items(): keys.setdefault(server_name, {})[key_id] = key - defer.returnValue(keys) + return keys class BaseV2KeyFetcher(object): @@ -566,7 +566,7 @@ def process_v2_response(self, from_server, response_json, time_added_ms): ).addErrback(unwrapFirstError) ) - defer.returnValue(verify_keys) + return verify_keys class PerspectivesKeyFetcher(BaseV2KeyFetcher): @@ -588,7 +588,7 @@ def get_key(key_server): result = yield self.get_server_verify_key_v2_indirect( keys_to_fetch, key_server ) - defer.returnValue(result) + return result except KeyLookupError as e: logger.warning( "Key lookup failed from %r: %s", key_server.server_name, e @@ -601,7 +601,7 @@ def get_key(key_server): str(e), ) - defer.returnValue({}) + return {} results = yield make_deferred_yieldable( defer.gatherResults( @@ -615,7 +615,7 @@ def get_key(key_server): for server_name, keys in result.items(): union_of_keys.setdefault(server_name, {}).update(keys) - defer.returnValue(union_of_keys) + return union_of_keys @defer.inlineCallbacks def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server): @@ -701,7 +701,7 @@ def get_server_verify_key_v2_indirect(self, keys_to_fetch, key_server): perspective_name, time_now_ms, added_keys ) - defer.returnValue(keys) + return keys def _validate_perspectives_response(self, key_server, response): """Optionally check the signature on the result of a /key/query request @@ -843,7 +843,7 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): ) keys.update(response_keys) - defer.returnValue(keys) + return keys @defer.inlineCallbacks diff --git a/synapse/events/builder.py b/synapse/events/builder.py index db011e04078f..399775133726 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -144,15 +144,13 @@ def build(self, prev_event_ids): if self._origin_server_ts is not None: event_dict["origin_server_ts"] = self._origin_server_ts - defer.returnValue( - create_local_event_from_event_dict( - clock=self._clock, - hostname=self._hostname, - signing_key=self._signing_key, - format_version=self.format_version, - event_dict=event_dict, - internal_metadata_dict=self.internal_metadata.get_dict(), - ) + return create_local_event_from_event_dict( + clock=self._clock, + hostname=self._hostname, + signing_key=self._signing_key, + format_version=self.format_version, + event_dict=event_dict, + internal_metadata_dict=self.internal_metadata.get_dict(), ) diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a9545e6c1b17..acbcbeeced85 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -133,19 +133,17 @@ def serialize(self, event, store): else: prev_state_id = None - defer.returnValue( - { - "prev_state_id": prev_state_id, - "event_type": event.type, - "event_state_key": event.state_key if event.is_state() else None, - "state_group": self.state_group, - "rejected": self.rejected, - "prev_group": self.prev_group, - "delta_ids": _encode_state_dict(self.delta_ids), - "prev_state_events": self.prev_state_events, - "app_service_id": self.app_service.id if self.app_service else None, - } - ) + return { + "prev_state_id": prev_state_id, + "event_type": event.type, + "event_state_key": event.state_key if event.is_state() else None, + "state_group": self.state_group, + "rejected": self.rejected, + "prev_group": self.prev_group, + "delta_ids": _encode_state_dict(self.delta_ids), + "prev_state_events": self.prev_state_events, + "app_service_id": self.app_service.id if self.app_service else None, + } @staticmethod def deserialize(store, input): @@ -202,7 +200,7 @@ def get_current_state_ids(self, store): yield make_deferred_yieldable(self._fetching_state_deferred) - defer.returnValue(self._current_state_ids) + return self._current_state_ids @defer.inlineCallbacks def get_prev_state_ids(self, store): @@ -222,7 +220,7 @@ def get_prev_state_ids(self, store): yield make_deferred_yieldable(self._fetching_state_deferred) - defer.returnValue(self._prev_state_ids) + return self._prev_state_ids def get_cached_current_state_ids(self): """Gets the current state IDs if we have them already cached. diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 8f5d95696b79..714a9b1579ab 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -51,7 +51,7 @@ def check_event_allowed(self, event, context): defer.Deferred[bool]: True if the event should be allowed, False if not. """ if self.third_party_rules is None: - defer.returnValue(True) + return True prev_state_ids = yield context.get_prev_state_ids(self.store) @@ -61,7 +61,7 @@ def check_event_allowed(self, event, context): state_events[key] = yield self.store.get_event(event_id, allow_none=True) ret = yield self.third_party_rules.check_event_allowed(event, state_events) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_create_room(self, requester, config, is_requester_admin): @@ -98,7 +98,7 @@ def check_threepid_can_be_invited(self, medium, address, room_id): """ if self.third_party_rules is None: - defer.returnValue(True) + return True state_ids = yield self.store.get_filtered_current_state_ids(room_id) room_state_events = yield self.store.get_events(state_ids.values()) @@ -110,4 +110,4 @@ def check_threepid_can_be_invited(self, medium, address, room_id): ret = yield self.third_party_rules.check_threepid_can_be_invited( medium, address, state_events ) - defer.returnValue(ret) + return ret diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 9487a886f580..07d1c5bcf045 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -360,7 +360,7 @@ def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs): """ # To handle the case of presence events and the like if not isinstance(event, EventBase): - defer.returnValue(event) + return event event_id = event.event_id serialized_event = serialize_event(event, time_now, **kwargs) @@ -406,7 +406,7 @@ def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs): "sender": edit.sender, } - defer.returnValue(serialized_event) + return serialized_event def serialize_events(self, events, time_now, **kwargs): """Serializes multiple events. diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index f7bb806ae724..5a1e23a145b4 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -106,7 +106,7 @@ def handle_check_result(pdu, deferred): "Failed to find copy of %s with valid signature", pdu.event_id ) - defer.returnValue(res) + return res handle = preserve_fn(handle_check_result) deferreds2 = [handle(pdu, deferred) for pdu, deferred in zip(pdus, deferreds)] @@ -116,9 +116,9 @@ def handle_check_result(pdu, deferred): ).addErrback(unwrapFirstError) if include_none: - defer.returnValue(valid_pdus) + return valid_pdus else: - defer.returnValue([p for p in valid_pdus if p]) + return [p for p in valid_pdus if p] def _check_sigs_and_hash(self, room_version, pdu): return make_deferred_yieldable( diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 3cb4b9442085..25ed1257f11b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -213,7 +213,7 @@ def backfill(self, dest, room_id, limit, extremities): ).addErrback(unwrapFirstError) ) - defer.returnValue(pdus) + return pdus @defer.inlineCallbacks @log_function @@ -245,7 +245,7 @@ def get_pdu( ev = self._get_pdu_cache.get(event_id) if ev: - defer.returnValue(ev) + return ev pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {}) @@ -307,7 +307,7 @@ def get_pdu( if signed_pdu: self._get_pdu_cache[event_id] = signed_pdu - defer.returnValue(signed_pdu) + return signed_pdu @defer.inlineCallbacks @log_function @@ -355,7 +355,7 @@ def get_state_for_room(self, destination, room_id, event_id): auth_chain.sort(key=lambda e: e.depth) - defer.returnValue((pdus, auth_chain)) + return (pdus, auth_chain) except HttpResponseException as e: if e.code == 400 or e.code == 404: logger.info("Failed to use get_room_state_ids API, falling back") @@ -404,7 +404,7 @@ def get_state_for_room(self, destination, room_id, event_id): signed_auth.sort(key=lambda e: e.depth) - defer.returnValue((signed_pdus, signed_auth)) + return (signed_pdus, signed_auth) @defer.inlineCallbacks def get_events_from_store_or_dest(self, destination, room_id, event_ids): @@ -429,7 +429,7 @@ def get_events_from_store_or_dest(self, destination, room_id, event_ids): missing_events.discard(k) if not missing_events: - defer.returnValue((signed_events, failed_to_fetch)) + return (signed_events, failed_to_fetch) logger.debug( "Fetching unknown state/auth events %s for room %s", @@ -465,7 +465,7 @@ def get_events_from_store_or_dest(self, destination, room_id, event_ids): # We removed all events we successfully fetched from `batch` failed_to_fetch.update(batch) - defer.returnValue((signed_events, failed_to_fetch)) + return (signed_events, failed_to_fetch) @defer.inlineCallbacks @log_function @@ -485,7 +485,7 @@ def get_event_auth(self, destination, room_id, event_id): signed_auth.sort(key=lambda e: e.depth) - defer.returnValue(signed_auth) + return signed_auth @defer.inlineCallbacks def _try_destination_list(self, description, destinations, callback): @@ -521,7 +521,7 @@ def _try_destination_list(self, description, destinations, callback): try: res = yield callback(destination) - defer.returnValue(res) + return res except InvalidResponseError as e: logger.warn("Failed to %s via %s: %s", description, destination, e) except HttpResponseException as e: @@ -615,7 +615,7 @@ def send_request(destination): event_dict=pdu_dict, ) - defer.returnValue((destination, ev, event_format)) + return (destination, ev, event_format) return self._try_destination_list( "make_" + membership, destinations, send_request @@ -728,13 +728,11 @@ def send_request(destination): check_authchain_validity(signed_auth) - defer.returnValue( - { - "state": signed_state, - "auth_chain": signed_auth, - "origin": destination, - } - ) + return { + "state": signed_state, + "auth_chain": signed_auth, + "origin": destination, + } return self._try_destination_list("send_join", destinations, send_request) @@ -758,7 +756,7 @@ def send_invite(self, destination, room_id, event_id, pdu): # FIXME: We should handle signature failures more gracefully. - defer.returnValue(pdu) + return pdu @defer.inlineCallbacks def _do_send_invite(self, destination, pdu, room_version): @@ -786,7 +784,7 @@ def _do_send_invite(self, destination, pdu, room_version): "invite_room_state": pdu.unsigned.get("invite_room_state", []), }, ) - defer.returnValue(content) + return content except HttpResponseException as e: if e.code in [400, 404]: err = e.to_synapse_error() @@ -821,7 +819,7 @@ def _do_send_invite(self, destination, pdu, room_version): event_id=pdu.event_id, content=pdu.get_pdu_json(time_now), ) - defer.returnValue(content) + return content def send_leave(self, destinations, pdu): """Sends a leave event to one of a list of homeservers. @@ -856,7 +854,7 @@ def send_request(destination): ) logger.debug("Got content: %s", content) - defer.returnValue(None) + return None return self._try_destination_list("send_leave", destinations, send_request) @@ -917,7 +915,7 @@ def query_auth(self, destination, room_id, event_id, local_auth): "missing": content.get("missing", []), } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_missing_events( @@ -974,7 +972,7 @@ def get_missing_events( # get_missing_events signed_events = [] - defer.returnValue(signed_events) + return signed_events @defer.inlineCallbacks def forward_third_party_invite(self, destinations, room_id, event_dict): @@ -986,7 +984,7 @@ def forward_third_party_invite(self, destinations, room_id, event_dict): yield self.transport_layer.exchange_third_party_invite( destination=destination, room_id=room_id, event_dict=event_dict ) - defer.returnValue(None) + return None except CodeMessageException: raise except Exception as e: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8c0a18b12039..b4b9a05ca629 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -99,7 +99,7 @@ def on_backfill_request(self, origin, room_id, versions, limit): res = self._transaction_from_pdus(pdus).get_dict() - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks @log_function @@ -126,7 +126,7 @@ def on_incoming_transaction(self, origin, transaction_data): origin, transaction, request_time ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _handle_incoming_transaction(self, origin, transaction, request_time): @@ -147,8 +147,7 @@ def _handle_incoming_transaction(self, origin, transaction, request_time): "[%s] We've already responded to this request", transaction.transaction_id, ) - defer.returnValue(response) - return + return response logger.debug("[%s] Transaction is new", transaction.transaction_id) @@ -163,7 +162,7 @@ def _handle_incoming_transaction(self, origin, transaction, request_time): yield self.transaction_actions.set_response( origin, transaction, 400, response ) - defer.returnValue((400, response)) + return (400, response) received_pdus_counter.inc(len(transaction.pdus)) @@ -265,7 +264,7 @@ def process_pdus_for_room(room_id): logger.debug("Returning: %s", str(response)) yield self.transaction_actions.set_response(origin, transaction, 200, response) - defer.returnValue((200, response)) + return (200, response) @defer.inlineCallbacks def received_edu(self, origin, edu_type, content): @@ -298,7 +297,7 @@ def on_context_state_request(self, origin, room_id, event_id): event_id, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_state_ids_request(self, origin, room_id, event_id): @@ -315,9 +314,7 @@ def on_state_ids_request(self, origin, room_id, event_id): state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id) auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids) - defer.returnValue( - (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}) - ) + return (200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}) @defer.inlineCallbacks def _on_context_state_request_compute(self, room_id, event_id): @@ -336,12 +333,10 @@ def _on_context_state_request_compute(self, room_id, event_id): ) ) - defer.returnValue( - { - "pdus": [pdu.get_pdu_json() for pdu in pdus], - "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], - } - ) + return { + "pdus": [pdu.get_pdu_json() for pdu in pdus], + "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain], + } @defer.inlineCallbacks @log_function @@ -349,15 +344,15 @@ def on_pdu_request(self, origin, event_id): pdu = yield self.handler.get_persisted_pdu(origin, event_id) if pdu: - defer.returnValue((200, self._transaction_from_pdus([pdu]).get_dict())) + return (200, self._transaction_from_pdus([pdu]).get_dict()) else: - defer.returnValue((404, "")) + return (404, "") @defer.inlineCallbacks def on_query_request(self, query_type, args): received_queries_counter.labels(query_type).inc() resp = yield self.registry.on_query(query_type, args) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_make_join_request(self, origin, room_id, user_id, supported_versions): @@ -371,9 +366,7 @@ def on_make_join_request(self, origin, room_id, user_id, supported_versions): pdu = yield self.handler.on_make_join_request(room_id, user_id) time_now = self._clock.time_msec() - defer.returnValue( - {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - ) + return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} @defer.inlineCallbacks def on_invite_request(self, origin, content, room_version): @@ -391,7 +384,7 @@ def on_invite_request(self, origin, content, room_version): yield self.check_server_matches_acl(origin_host, pdu.room_id) ret_pdu = yield self.handler.on_invite_request(origin, pdu) time_now = self._clock.time_msec() - defer.returnValue({"event": ret_pdu.get_pdu_json(time_now)}) + return {"event": ret_pdu.get_pdu_json(time_now)} @defer.inlineCallbacks def on_send_join_request(self, origin, content, room_id): @@ -407,16 +400,14 @@ def on_send_join_request(self, origin, content, room_id): logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures) res_pdus = yield self.handler.on_send_join_request(origin, pdu) time_now = self._clock.time_msec() - defer.returnValue( - ( - 200, - { - "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], - "auth_chain": [ - p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] - ], - }, - ) + return ( + 200, + { + "state": [p.get_pdu_json(time_now) for p in res_pdus["state"]], + "auth_chain": [ + p.get_pdu_json(time_now) for p in res_pdus["auth_chain"] + ], + }, ) @defer.inlineCallbacks @@ -428,9 +419,7 @@ def on_make_leave_request(self, origin, room_id, user_id): room_version = yield self.store.get_room_version(room_id) time_now = self._clock.time_msec() - defer.returnValue( - {"event": pdu.get_pdu_json(time_now), "room_version": room_version} - ) + return {"event": pdu.get_pdu_json(time_now), "room_version": room_version} @defer.inlineCallbacks def on_send_leave_request(self, origin, content, room_id): @@ -445,7 +434,7 @@ def on_send_leave_request(self, origin, content, room_id): logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures) yield self.handler.on_send_leave_request(origin, pdu) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_event_auth(self, origin, room_id, event_id): @@ -456,7 +445,7 @@ def on_event_auth(self, origin, room_id, event_id): time_now = self._clock.time_msec() auth_pdus = yield self.handler.on_event_auth(event_id) res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]} - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks def on_query_auth_request(self, origin, content, room_id, event_id): @@ -509,7 +498,7 @@ def on_query_auth_request(self, origin, content, room_id, event_id): "missing": ret.get("missing", []), } - defer.returnValue((200, send_content)) + return (200, send_content) @log_function def on_query_client_keys(self, origin, content): @@ -548,7 +537,7 @@ def on_claim_client_keys(self, origin, content): ), ) - defer.returnValue({"one_time_keys": json_result}) + return {"one_time_keys": json_result} @defer.inlineCallbacks @log_function @@ -580,9 +569,7 @@ def on_get_missing_events( time_now = self._clock.time_msec() - defer.returnValue( - {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} - ) + return {"events": [ev.get_pdu_json(time_now) for ev in missing_events]} @log_function def on_openid_userinfo(self, token): @@ -676,14 +663,14 @@ def exchange_third_party_invite( ret = yield self.handler.exchange_third_party_invite( sender_user_id, target_user_id, room_id, signed ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_exchange_third_party_invite_request(self, origin, room_id, event_dict): ret = yield self.handler.on_exchange_third_party_invite_request( origin, room_id, event_dict ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def check_server_matches_acl(self, server_name, room_id): diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 9aab12c0d328..fad980b89307 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -374,7 +374,7 @@ def _get_device_update_edus(self, limit): assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs" - defer.returnValue((edus, now_stream_id)) + return (edus, now_stream_id) @defer.inlineCallbacks def _get_to_device_message_edus(self, limit): @@ -393,4 +393,4 @@ def _get_to_device_message_edus(self, limit): for content in contents ] - defer.returnValue((edus, stream_id)) + return (edus, stream_id) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 0460a8c4acc6..52706302f228 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -133,4 +133,4 @@ def json_data_cb(): ) success = False - defer.returnValue(success) + return success diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 1aae9ec9e74d..2a6709ff48e9 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -183,7 +183,7 @@ def send_transaction(self, transaction, json_data_callback=None): try_trailing_slash_on_400=True, ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -201,7 +201,7 @@ def make_query( ignore_backoff=ignore_backoff, ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -259,7 +259,7 @@ def make_membership_event(self, destination, room_id, user_id, membership, param ignore_backoff=ignore_backoff, ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -270,7 +270,7 @@ def send_join(self, destination, room_id, event_id, content): destination=destination, path=path, data=content ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -288,7 +288,7 @@ def send_leave(self, destination, room_id, event_id, content): ignore_backoff=True, ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -299,7 +299,7 @@ def send_invite_v1(self, destination, room_id, event_id, content): destination=destination, path=path, data=content, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -310,7 +310,7 @@ def send_invite_v2(self, destination, room_id, event_id, content): destination=destination, path=path, data=content, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -339,7 +339,7 @@ def get_public_rooms( destination=remote_server, path=path, args=args, ignore_backoff=True ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -350,7 +350,7 @@ def exchange_third_party_invite(self, destination, room_id, event_dict): destination=destination, path=path, data=event_dict ) - defer.returnValue(response) + return response @defer.inlineCallbacks @log_function @@ -359,7 +359,7 @@ def get_event_auth(self, destination, room_id, event_id): content = yield self.client.get_json(destination=destination, path=path) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -370,7 +370,7 @@ def send_query_auth(self, destination, room_id, event_id, content): destination=destination, path=path, data=content ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -402,7 +402,7 @@ def query_client_keys(self, destination, query_content, timeout): content = yield self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -426,7 +426,7 @@ def query_user_devices(self, destination, user_id, timeout): content = yield self.client.get_json( destination=destination, path=path, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -460,7 +460,7 @@ def claim_client_keys(self, destination, query_content, timeout): content = yield self.client.post_json( destination=destination, path=path, data=query_content, timeout=timeout ) - defer.returnValue(content) + return content @defer.inlineCallbacks @log_function @@ -488,7 +488,7 @@ def get_missing_events( timeout=timeout, ) - defer.returnValue(content) + return content @log_function def get_group_profile(self, destination, group_id, requester_user_id): diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index f497711133e9..dfd7ae041ba6 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -157,7 +157,7 @@ def on_renew_attestation(self, group_id, user_id, content): yield self.store.update_remote_attestion(group_id, user_id, attestation) - defer.returnValue({}) + return {} def _start_renew_attestations(self): return run_as_background_process("renew_attestations", self._renew_attestations) diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 168c9e3f84b5..d50e691436f6 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -85,7 +85,7 @@ def check_group_is_ours( if not is_admin: raise SynapseError(403, "User is not admin in group") - defer.returnValue(group) + return group @defer.inlineCallbacks def get_group_summary(self, group_id, requester_user_id): @@ -151,22 +151,20 @@ def get_group_summary(self, group_id, requester_user_id): group_id, requester_user_id ) - defer.returnValue( - { - "profile": profile, - "users_section": { - "users": users, - "roles": roles, - "total_user_count_estimate": 0, # TODO - }, - "rooms_section": { - "rooms": rooms, - "categories": categories, - "total_room_count_estimate": 0, # TODO - }, - "user": membership_info, - } - ) + return { + "profile": profile, + "users_section": { + "users": users, + "roles": roles, + "total_user_count_estimate": 0, # TODO + }, + "rooms_section": { + "rooms": rooms, + "categories": categories, + "total_room_count_estimate": 0, # TODO + }, + "user": membership_info, + } @defer.inlineCallbacks def update_group_summary_room( @@ -192,7 +190,7 @@ def update_group_summary_room( is_public=is_public, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_summary_room( @@ -208,7 +206,7 @@ def delete_group_summary_room( group_id=group_id, room_id=room_id, category_id=category_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def set_group_join_policy(self, group_id, requester_user_id, content): @@ -228,7 +226,7 @@ def set_group_join_policy(self, group_id, requester_user_id, content): yield self.store.set_group_join_policy(group_id, join_policy=join_policy) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_categories(self, group_id, requester_user_id): @@ -237,7 +235,7 @@ def get_group_categories(self, group_id, requester_user_id): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) categories = yield self.store.get_group_categories(group_id=group_id) - defer.returnValue({"categories": categories}) + return {"categories": categories} @defer.inlineCallbacks def get_group_category(self, group_id, requester_user_id, category_id): @@ -249,7 +247,7 @@ def get_group_category(self, group_id, requester_user_id, category_id): group_id=group_id, category_id=category_id ) - defer.returnValue(res) + return res @defer.inlineCallbacks def update_group_category(self, group_id, requester_user_id, category_id, content): @@ -269,7 +267,7 @@ def update_group_category(self, group_id, requester_user_id, category_id, conten profile=profile, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_category(self, group_id, requester_user_id, category_id): @@ -283,7 +281,7 @@ def delete_group_category(self, group_id, requester_user_id, category_id): group_id=group_id, category_id=category_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_roles(self, group_id, requester_user_id): @@ -292,7 +290,7 @@ def get_group_roles(self, group_id, requester_user_id): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) roles = yield self.store.get_group_roles(group_id=group_id) - defer.returnValue({"roles": roles}) + return {"roles": roles} @defer.inlineCallbacks def get_group_role(self, group_id, requester_user_id, role_id): @@ -301,7 +299,7 @@ def get_group_role(self, group_id, requester_user_id, role_id): yield self.check_group_is_ours(group_id, requester_user_id, and_exists=True) res = yield self.store.get_group_role(group_id=group_id, role_id=role_id) - defer.returnValue(res) + return res @defer.inlineCallbacks def update_group_role(self, group_id, requester_user_id, role_id, content): @@ -319,7 +317,7 @@ def update_group_role(self, group_id, requester_user_id, role_id, content): group_id=group_id, role_id=role_id, is_public=is_public, profile=profile ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_role(self, group_id, requester_user_id, role_id): @@ -331,7 +329,7 @@ def delete_group_role(self, group_id, requester_user_id, role_id): yield self.store.remove_group_role(group_id=group_id, role_id=role_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def update_group_summary_user( @@ -355,7 +353,7 @@ def update_group_summary_user( is_public=is_public, ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_id): @@ -369,7 +367,7 @@ def delete_group_summary_user(self, group_id, requester_user_id, user_id, role_i group_id=group_id, user_id=user_id, role_id=role_id ) - defer.returnValue({}) + return {} @defer.inlineCallbacks def get_group_profile(self, group_id, requester_user_id): @@ -391,7 +389,7 @@ def get_group_profile(self, group_id, requester_user_id): group_description = {key: group[key] for key in cols} group_description["is_openly_joinable"] = group["join_policy"] == "open" - defer.returnValue(group_description) + return group_description else: raise SynapseError(404, "Unknown group") @@ -461,9 +459,7 @@ def get_users_in_group(self, group_id, requester_user_id): # TODO: If admin add lists of users whose attestations have timed out - defer.returnValue( - {"chunk": chunk, "total_user_count_estimate": len(user_results)} - ) + return {"chunk": chunk, "total_user_count_estimate": len(user_results)} @defer.inlineCallbacks def get_invited_users_in_group(self, group_id, requester_user_id): @@ -494,9 +490,7 @@ def get_invited_users_in_group(self, group_id, requester_user_id): logger.warn("Error getting profile for %s: %s", user_id, e) user_profiles.append(user_profile) - defer.returnValue( - {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)} - ) + return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)} @defer.inlineCallbacks def get_rooms_in_group(self, group_id, requester_user_id): @@ -533,9 +527,7 @@ def get_rooms_in_group(self, group_id, requester_user_id): chunk.sort(key=lambda e: -e["num_joined_members"]) - defer.returnValue( - {"chunk": chunk, "total_room_count_estimate": len(room_results)} - ) + return {"chunk": chunk, "total_room_count_estimate": len(room_results)} @defer.inlineCallbacks def add_room_to_group(self, group_id, requester_user_id, room_id, content): @@ -551,7 +543,7 @@ def add_room_to_group(self, group_id, requester_user_id, room_id, content): yield self.store.add_room_to_group(group_id, room_id, is_public=is_public) - defer.returnValue({}) + return {} @defer.inlineCallbacks def update_room_in_group( @@ -574,7 +566,7 @@ def update_room_in_group( else: raise SynapseError(400, "Uknown config option") - defer.returnValue({}) + return {} @defer.inlineCallbacks def remove_room_from_group(self, group_id, requester_user_id, room_id): @@ -586,7 +578,7 @@ def remove_room_from_group(self, group_id, requester_user_id, room_id): yield self.store.remove_room_from_group(group_id, room_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def invite_to_group(self, group_id, user_id, requester_user_id, content): @@ -644,9 +636,9 @@ def invite_to_group(self, group_id, user_id, requester_user_id, content): ) elif res["state"] == "invite": yield self.store.add_group_invite(group_id, user_id) - defer.returnValue({"state": "invite"}) + return {"state": "invite"} elif res["state"] == "reject": - defer.returnValue({"state": "reject"}) + return {"state": "reject"} else: raise SynapseError(502, "Unknown state returned by HS") @@ -679,7 +671,7 @@ def _add_user(self, group_id, user_id, content): remote_attestation=remote_attestation, ) - defer.returnValue(local_attestation) + return local_attestation @defer.inlineCallbacks def accept_invite(self, group_id, requester_user_id, content): @@ -699,7 +691,7 @@ def accept_invite(self, group_id, requester_user_id, content): local_attestation = yield self._add_user(group_id, requester_user_id, content) - defer.returnValue({"state": "join", "attestation": local_attestation}) + return {"state": "join", "attestation": local_attestation} @defer.inlineCallbacks def join_group(self, group_id, requester_user_id, content): @@ -716,7 +708,7 @@ def join_group(self, group_id, requester_user_id, content): local_attestation = yield self._add_user(group_id, requester_user_id, content) - defer.returnValue({"state": "join", "attestation": local_attestation}) + return {"state": "join", "attestation": local_attestation} @defer.inlineCallbacks def knock(self, group_id, requester_user_id, content): @@ -769,7 +761,7 @@ def remove_user_from_group(self, group_id, user_id, requester_user_id, content): if not self.hs.is_mine_id(user_id): yield self.store.maybe_delete_remote_profile_cache(user_id) - defer.returnValue({}) + return {} @defer.inlineCallbacks def create_group(self, group_id, requester_user_id, content): @@ -845,7 +837,7 @@ def create_group(self, group_id, requester_user_id, content): avatar_url=user_profile.get("avatar_url"), ) - defer.returnValue({"group_id": group_id}) + return {"group_id": group_id} @defer.inlineCallbacks def delete_group(self, group_id, requester_user_id): diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index e62e6cab7702..8acd9f9a8312 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -51,8 +51,8 @@ def get_new_events(self, user, from_key, **kwargs): {"type": account_data_type, "content": content, "room_id": room_id} ) - defer.returnValue((results, current_stream_id)) + return (results, current_stream_id) @defer.inlineCallbacks def get_pagination_rows(self, user, config, key): - defer.returnValue(([], config.to_id)) + return ([], config.to_id) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 1f1708ba7d5d..930204e2d034 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -193,7 +193,7 @@ def _get_email_addresses_for_user(self, user_id): if threepid["medium"] == "email": addresses.append(threepid["address"]) - defer.returnValue(addresses) + return addresses @defer.inlineCallbacks def _get_renewal_token(self, user_id): @@ -214,7 +214,7 @@ def _get_renewal_token(self, user_id): try: renewal_token = stringutils.random_string(32) yield self.store.set_renewal_token_for_user(user_id, renewal_token) - defer.returnValue(renewal_token) + return renewal_token except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a unique string as refresh string.") @@ -254,4 +254,4 @@ def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False): user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent ) - defer.returnValue(expiration_ts) + return expiration_ts diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index fbef2f3d3848..46ac73106de7 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -100,4 +100,4 @@ def provision_certificate(self): logger.exception("Failed saving!") raise - defer.returnValue(True) + return True diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index e8a651e231b8..2f22f56ca4d7 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -49,7 +49,7 @@ def get_whois(self, user): "devices": {"": {"sessions": [{"connections": connections}]}}, } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_users(self): @@ -61,7 +61,7 @@ def get_users(self): """ ret = yield self.store.get_users() - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_users_paginate(self, order, start, limit): @@ -78,7 +78,7 @@ def get_users_paginate(self, order, start, limit): """ ret = yield self.store.get_users_paginate(order, start, limit) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def search_users(self, term): @@ -92,7 +92,7 @@ def search_users(self, term): """ ret = yield self.store.search_users(term) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def export_user_data(self, user_id, writer): @@ -225,7 +225,7 @@ def export_user_data(self, user_id, writer): state = yield self.store.get_state_for_event(event_id) writer.write_state(room_id, event_id, state) - defer.returnValue(writer.finished()) + return writer.finished() class ExfiltrationWriter(object): diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 8f089f0e3355..d1a51df6f9c0 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -167,8 +167,8 @@ def query_user_exists(self, user_id): for user_service in user_query_services: is_known_user = yield self.appservice_api.query_user(user_service, user_id) if is_known_user: - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def query_room_alias_exists(self, room_alias): @@ -192,7 +192,7 @@ def query_room_alias_exists(self, room_alias): if is_known_alias: # the alias exists now so don't query more ASes. result = yield self.store.get_association_from_room_alias(room_alias) - defer.returnValue(result) + return result @defer.inlineCallbacks def query_3pe(self, kind, protocol, fields): @@ -215,7 +215,7 @@ def query_3pe(self, kind, protocol, fields): if success: ret.extend(result) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_3pe_protocols(self, only_protocol=None): @@ -254,7 +254,7 @@ def _merge_instances(infos): for p in protocols.keys(): protocols[p] = _merge_instances(protocols[p]) - defer.returnValue(protocols) + return protocols @defer.inlineCallbacks def _get_services_for_event(self, event): @@ -276,7 +276,7 @@ def _get_services_for_event(self, event): if (yield s.is_interested(event, self.store)): interested_list.append(s) - defer.returnValue(interested_list) + return interested_list def _get_services_for_user(self, user_id): services = self.store.get_app_services() @@ -293,23 +293,23 @@ def _is_unknown_user(self, user_id): if not self.is_mine_id(user_id): # we don't know if they are unknown or not since it isn't one of our # users. We can't poke ASes. - defer.returnValue(False) + return False return user_info = yield self.store.get_user_by_id(user_id) if user_info: - defer.returnValue(False) + return False return # user not found; could be the AS though, so check. services = self.store.get_app_services() service_list = [s for s in services if s.sender == user_id] - defer.returnValue(len(service_list) == 0) + return len(service_list) == 0 @defer.inlineCallbacks def _check_user_exists(self, user_id): unknown_user = yield self._is_unknown_user(user_id) if unknown_user: exists = yield self.query_user_exists(user_id) - defer.returnValue(exists) - defer.returnValue(True) + return exists + return True diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d4d65749754e..05be5b7c4820 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -155,7 +155,7 @@ def validate_user_via_ui_auth(self, requester, request_body, clientip): if user_id != requester.user.to_string(): raise AuthError(403, "Invalid auth") - defer.returnValue(params) + return params @defer.inlineCallbacks def check_auth(self, flows, clientdict, clientip, password_servlet=False): @@ -280,7 +280,7 @@ def check_auth(self, flows, clientdict, clientip, password_servlet=False): creds, list(clientdict), ) - defer.returnValue((creds, clientdict, session["id"])) + return (creds, clientdict, session["id"]) ret = self._auth_dict_for_flows(flows, session) ret["completed"] = list(creds) @@ -307,8 +307,8 @@ def add_oob_auth(self, stagetype, authdict, clientip): if result: creds[stagetype] = result self._save_session(sess) - defer.returnValue(True) - defer.returnValue(False) + return True + return False def get_session_id(self, clientdict): """ @@ -379,7 +379,7 @@ def _check_auth_dict(self, authdict, clientip, password_servlet=False): res = yield checker( authdict, clientip=clientip, password_servlet=password_servlet ) - defer.returnValue(res) + return res # build a v1-login-style dict out of the authdict and fall back to the # v1 code @@ -389,7 +389,7 @@ def _check_auth_dict(self, authdict, clientip, password_servlet=False): raise SynapseError(400, "", Codes.MISSING_PARAM) (canonical_id, callback) = yield self.validate_login(user_id, authdict) - defer.returnValue(canonical_id) + return canonical_id @defer.inlineCallbacks def _check_recaptcha(self, authdict, clientip, **kwargs): @@ -433,7 +433,7 @@ def _check_recaptcha(self, authdict, clientip, **kwargs): resp_body.get("hostname"), ) if resp_body["success"]: - defer.returnValue(True) + return True raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) def _check_email_identity(self, authdict, **kwargs): @@ -502,7 +502,7 @@ def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs): threepid["threepid_creds"] = authdict["threepid_creds"] - defer.returnValue(threepid) + return threepid def _get_params_recaptcha(self): return {"public_key": self.hs.config.recaptcha_public_key} @@ -606,7 +606,7 @@ def get_access_token_for_user_id(self, user_id, device_id, valid_until_ms): yield self.store.delete_access_token(access_token) raise StoreError(400, "Login raced against device deletion") - defer.returnValue(access_token) + return access_token @defer.inlineCallbacks def check_user_exists(self, user_id): @@ -629,8 +629,8 @@ def check_user_exists(self, user_id): self.ratelimit_login_per_account(user_id) res = yield self._find_user_id_and_pwd_hash(user_id) if res is not None: - defer.returnValue(res[0]) - defer.returnValue(None) + return res[0] + return None @defer.inlineCallbacks def _find_user_id_and_pwd_hash(self, user_id): @@ -661,7 +661,7 @@ def _find_user_id_and_pwd_hash(self, user_id): user_id, user_infos.keys(), ) - defer.returnValue(result) + return result def get_supported_login_types(self): """Get a the login types supported for the /login API @@ -722,7 +722,7 @@ def validate_login(self, username, login_submission): known_login_type = True is_valid = yield provider.check_password(qualified_user_id, password) if is_valid: - defer.returnValue((qualified_user_id, None)) + return (qualified_user_id, None) if not hasattr(provider, "get_supported_login_types") or not hasattr( provider, "check_auth" @@ -756,7 +756,7 @@ def validate_login(self, username, login_submission): if result: if isinstance(result, str): result = (result, None) - defer.returnValue(result) + return result if login_type == LoginType.PASSWORD and self.hs.config.password_localdb_enabled: known_login_type = True @@ -766,7 +766,7 @@ def validate_login(self, username, login_submission): ) if canonical_user_id: - defer.returnValue((canonical_user_id, None)) + return (canonical_user_id, None) if not known_login_type: raise SynapseError(400, "Unknown login type %s" % login_type) @@ -814,9 +814,9 @@ def check_password_provider_3pid(self, medium, address, password): if isinstance(result, str): # If it's a str, set callback function to None result = (result, None) - defer.returnValue(result) + return result - defer.returnValue((None, None)) + return (None, None) @defer.inlineCallbacks def _check_local_password(self, user_id, password): @@ -838,7 +838,7 @@ def _check_local_password(self, user_id, password): """ lookupres = yield self._find_user_id_and_pwd_hash(user_id) if not lookupres: - defer.returnValue(None) + return None (user_id, password_hash) = lookupres # If the password hash is None, the account has likely been deactivated @@ -850,8 +850,8 @@ def _check_local_password(self, user_id, password): result = yield self.validate_hash(password, password_hash) if not result: logger.warn("Failed password login for user %s", user_id) - defer.returnValue(None) - defer.returnValue(user_id) + return None + return user_id @defer.inlineCallbacks def validate_short_term_login_token_and_get_user_id(self, login_token): @@ -865,7 +865,7 @@ def validate_short_term_login_token_and_get_user_id(self, login_token): raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN) self.ratelimit_login_per_account(user_id) yield self.auth.check_auth_blocking(user_id) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def delete_access_token(self, access_token): @@ -976,7 +976,7 @@ def delete_threepid(self, user_id, medium, address, id_server=None): ) yield self.store.user_delete_threepid(user_id, medium, address) - defer.returnValue(result) + return result def _save_session(self, session): # TODO: Persistent storage diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index e8f9da609893..5f804d1f1330 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -125,7 +125,7 @@ def deactivate_account(self, user_id, erase_data, id_server=None): # Mark the user as deactivated. yield self.store.set_user_deactivated_status(user_id, True) - defer.returnValue(identity_server_supports_unbinding) + return identity_server_supports_unbinding def _start_user_parting(self): """ diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 99e841309296..d6ab33778393 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -64,7 +64,7 @@ def get_devices_by_user(self, user_id): for device in devices: _update_device_from_client_ips(device, ips) - defer.returnValue(devices) + return devices @defer.inlineCallbacks def get_device(self, user_id, device_id): @@ -85,7 +85,7 @@ def get_device(self, user_id, device_id): raise errors.NotFoundError ips = yield self.store.get_last_client_ip_by_device(user_id, device_id) _update_device_from_client_ips(device, ips) - defer.returnValue(device) + return device @measure_func("device.get_user_ids_changed") @defer.inlineCallbacks @@ -200,9 +200,7 @@ def get_user_ids_changed(self, user_id, from_token): possibly_joined = [] possibly_left = [] - defer.returnValue( - {"changed": list(possibly_joined), "left": list(possibly_left)} - ) + return {"changed": list(possibly_joined), "left": list(possibly_left)} class DeviceHandler(DeviceWorkerHandler): @@ -250,7 +248,7 @@ def check_device_registered( ) if new_device: yield self.notify_device_update(user_id, [device_id]) - defer.returnValue(device_id) + return device_id # if the device id is not specified, we'll autogen one, but loop a few # times in case of a clash. @@ -264,7 +262,7 @@ def check_device_registered( ) if new_device: yield self.notify_device_update(user_id, [device_id]) - defer.returnValue(device_id) + return device_id attempts += 1 raise errors.StoreError(500, "Couldn't generate a device ID.") @@ -411,9 +409,7 @@ def notify_device_update(self, user_id, device_ids): @defer.inlineCallbacks def on_federation_query_user_devices(self, user_id): stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id) - defer.returnValue( - {"user_id": user_id, "stream_id": stream_id, "devices": devices} - ) + return {"user_id": user_id, "stream_id": stream_id, "devices": devices} @defer.inlineCallbacks def user_left_room(self, user, room_id): @@ -623,7 +619,7 @@ def _need_to_do_resync(self, user_id, updates): for _, stream_id, prev_ids, _ in updates: if not prev_ids: # We always do a resync if there are no previous IDs - defer.returnValue(True) + return True for prev_id in prev_ids: if prev_id == extremity: @@ -633,8 +629,8 @@ def _need_to_do_resync(self, user_id, updates): elif prev_id in stream_id_in_updates: continue else: - defer.returnValue(True) + return True stream_id_in_updates.add(stream_id) - defer.returnValue(False) + return False diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 42d5b3db30c6..0fd423197c93 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -210,7 +210,7 @@ def delete_association(self, requester, room_alias, send_event=True): except AuthError as e: logger.info("Failed to update alias events: %s", e) - defer.returnValue(room_id) + return room_id @defer.inlineCallbacks def delete_appservice_association(self, service, room_alias): @@ -229,7 +229,7 @@ def _delete_association(self, room_alias): room_id = yield self.store.delete_room_alias(room_alias) - defer.returnValue(room_id) + return room_id @defer.inlineCallbacks def get_association(self, room_alias): @@ -277,7 +277,7 @@ def get_association(self, room_alias): else: servers = list(servers) - defer.returnValue({"room_id": room_id, "servers": servers}) + return {"room_id": room_id, "servers": servers} return @defer.inlineCallbacks @@ -289,7 +289,7 @@ def on_directory_query(self, args): result = yield self.get_association_from_room_alias(room_alias) if result is not None: - defer.returnValue({"room_id": result.room_id, "servers": result.servers}) + return {"room_id": result.room_id, "servers": result.servers} else: raise SynapseError( 404, @@ -342,7 +342,7 @@ def get_association_from_room_alias(self, room_alias): # Query AS to see if it exists as_handler = self.appservice_handler result = yield as_handler.query_room_alias_exists(room_alias) - defer.returnValue(result) + return result def can_modify_alias(self, alias, user_id=None): # Any application service "interested" in an alias they are regexing on @@ -369,10 +369,10 @@ def _user_can_delete_alias(self, alias, user_id): creator = yield self.store.get_room_alias_creator(alias.to_string()) if creator is not None and creator == user_id: - defer.returnValue(True) + return True is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id)) - defer.returnValue(is_admin) + return is_admin @defer.inlineCallbacks def edit_published_room_list(self, requester, room_id, visibility): diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index fdfe8611b6ca..1300b540e34c 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -144,7 +144,7 @@ def do_remote_query(destination): ) ) - defer.returnValue({"device_keys": results, "failures": failures}) + return {"device_keys": results, "failures": failures} @defer.inlineCallbacks def query_local_devices(self, query): @@ -189,7 +189,7 @@ def query_local_devices(self, query): r["unsigned"]["device_display_name"] = display_name result_dict[user_id][device_id] = r - defer.returnValue(result_dict) + return result_dict @defer.inlineCallbacks def on_federation_query_client_keys(self, query_body): @@ -197,7 +197,7 @@ def on_federation_query_client_keys(self, query_body): """ device_keys_query = query_body.get("device_keys", {}) res = yield self.query_local_devices(device_keys_query) - defer.returnValue({"device_keys": res}) + return {"device_keys": res} @defer.inlineCallbacks def claim_one_time_keys(self, query, timeout): @@ -259,7 +259,7 @@ def claim_client_keys(destination): ), ) - defer.returnValue({"one_time_keys": json_result, "failures": failures}) + return {"one_time_keys": json_result, "failures": failures} @defer.inlineCallbacks def upload_keys_for_user(self, user_id, device_id, keys): @@ -297,7 +297,7 @@ def upload_keys_for_user(self, user_id, device_id, keys): result = yield self.store.count_e2e_one_time_keys(user_id, device_id) - defer.returnValue({"one_time_key_counts": result}) + return {"one_time_key_counts": result} @defer.inlineCallbacks def _upload_one_time_keys_for_user( diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index ebd807bca682..41b871fc5953 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -84,7 +84,7 @@ def get_room_keys(self, user_id, version, room_id=None, session_id=None): user_id, version, room_id, session_id ) - defer.returnValue(results) + return results @defer.inlineCallbacks def delete_room_keys(self, user_id, version, room_id=None, session_id=None): @@ -262,7 +262,7 @@ def create_version(self, user_id, version_info): new_version = yield self.store.create_e2e_room_keys_version( user_id, version_info ) - defer.returnValue(new_version) + return new_version @defer.inlineCallbacks def get_version_info(self, user_id, version=None): @@ -292,7 +292,7 @@ def get_version_info(self, user_id, version=None): raise NotFoundError("Unknown backup version") else: raise - defer.returnValue(res) + return res @defer.inlineCallbacks def delete_version(self, user_id, version=None): @@ -350,4 +350,4 @@ def update_version(self, user_id, version, version_info): user_id, version, version_info ) - defer.returnValue({}) + return {} diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 6a38328af316..2f1f10a9af82 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -143,7 +143,7 @@ def get_stream( "end": tokens[1].to_string(), } - defer.returnValue(chunk) + return chunk class EventHandler(BaseHandler): @@ -166,7 +166,7 @@ def get_event(self, user, room_id, event_id): event = yield self.store.get_event(event_id, check_room_id=room_id) if not event: - defer.returnValue(None) + return None return users = yield self.store.get_users_in_room(event.room_id) @@ -179,4 +179,4 @@ def get_event(self, user, room_id, event_id): if not filtered: raise AuthError(403, "You don't have permission to access that event.") - defer.returnValue(event) + return event diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 57be968c67c8..2aa208a2b80e 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -210,7 +210,7 @@ def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False): event_id, origin, ) - defer.returnValue(None) + return None state = None auth_chain = [] @@ -676,7 +676,7 @@ def backfill(self, dest, room_id, limit, extremities): events = [e for e in events if e.event_id not in seen_events] if not events: - defer.returnValue([]) + return [] event_map = {e.event_id: e for e in events} @@ -838,7 +838,7 @@ def backfill(self, dest, room_id, limit, extremities): # TODO: We can probably do something more clever here. yield self._handle_new_event(dest, event, backfilled=True) - defer.returnValue(events) + return events @defer.inlineCallbacks def maybe_backfill(self, room_id, current_depth): @@ -894,7 +894,7 @@ def maybe_backfill(self, room_id, current_depth): ) if not filtered_extremities: - defer.returnValue(False) + return False # Check if we reached a point where we should start backfilling. sorted_extremeties_tuple = sorted(extremities.items(), key=lambda e: -int(e[1])) @@ -965,7 +965,7 @@ def try_backfill(domains): # If this succeeded then we probably already have the # appropriate stuff. # TODO: We can probably do something more intelligent here. - defer.returnValue(True) + return True except SynapseError as e: logger.info("Failed to backfill from %s because %s", dom, e) continue @@ -985,11 +985,11 @@ def try_backfill(domains): logger.exception("Failed to backfill from %s because %s", dom, e) continue - defer.returnValue(False) + return False success = yield try_backfill(likely_domains) if success: - defer.returnValue(True) + return True # Huh, well *those* domains didn't work out. Lets try some domains # from the time. @@ -1031,11 +1031,11 @@ def try_backfill(domains): [dom for dom, _ in likely_domains if dom not in tried_domains] ) if success: - defer.returnValue(True) + return True tried_domains.update(dom for dom, _ in likely_domains) - defer.returnValue(False) + return False def _sanity_check_event(self, ev): """ @@ -1082,7 +1082,7 @@ def send_invite(self, target_host, event): pdu=event, ) - defer.returnValue(pdu) + return pdu @defer.inlineCallbacks def on_event_auth(self, event_id): @@ -1090,7 +1090,7 @@ def on_event_auth(self, event_id): auth = yield self.store.get_auth_chain( [auth_id for auth_id in event.auth_event_ids()], include_given=True ) - defer.returnValue([e for e in auth]) + return [e for e in auth] @log_function @defer.inlineCallbacks @@ -1177,7 +1177,7 @@ def do_invite_join(self, target_hosts, room_id, joinee, content): run_in_background(self._handle_queued_pdus, room_queue) - defer.returnValue(True) + return True @defer.inlineCallbacks def _handle_queued_pdus(self, room_queue): @@ -1247,7 +1247,7 @@ def on_make_join_request(self, room_id, user_id): room_version, event, context, do_sig_check=False ) - defer.returnValue(event) + return event @defer.inlineCallbacks @log_function @@ -1308,7 +1308,7 @@ def on_send_join_request(self, origin, pdu): state = yield self.store.get_events(list(prev_state_ids.values())) - defer.returnValue({"state": list(state.values()), "auth_chain": auth_chain}) + return {"state": list(state.values()), "auth_chain": auth_chain} @defer.inlineCallbacks def on_invite_request(self, origin, pdu): @@ -1364,7 +1364,7 @@ def on_invite_request(self, origin, pdu): context = yield self.state_handler.compute_event_context(event) yield self.persist_events_and_notify([(event, context)]) - defer.returnValue(event) + return event @defer.inlineCallbacks def do_remotely_reject_invite(self, target_hosts, room_id, user_id): @@ -1389,7 +1389,7 @@ def do_remotely_reject_invite(self, target_hosts, room_id, user_id): context = yield self.state_handler.compute_event_context(event) yield self.persist_events_and_notify([(event, context)]) - defer.returnValue(event) + return event @defer.inlineCallbacks def _make_and_verify_event( @@ -1407,7 +1407,7 @@ def _make_and_verify_event( assert event.user_id == user_id assert event.state_key == user_id assert event.room_id == room_id - defer.returnValue((origin, event, format_ver)) + return (origin, event, format_ver) @defer.inlineCallbacks @log_function @@ -1451,7 +1451,7 @@ def on_make_leave_request(self, room_id, user_id): logger.warn("Failed to create new leave %r because %s", event, e) raise e - defer.returnValue(event) + return event @defer.inlineCallbacks @log_function @@ -1484,7 +1484,7 @@ def on_send_leave_request(self, origin, pdu): event.signatures, ) - defer.returnValue(None) + return None @defer.inlineCallbacks def get_state_for_pdu(self, room_id, event_id): @@ -1512,9 +1512,9 @@ def get_state_for_pdu(self, room_id, event_id): del results[(event.type, event.state_key)] res = list(results.values()) - defer.returnValue(res) + return res else: - defer.returnValue([]) + return [] @defer.inlineCallbacks def get_state_ids_for_pdu(self, room_id, event_id): @@ -1539,9 +1539,9 @@ def get_state_ids_for_pdu(self, room_id, event_id): else: results.pop((event.type, event.state_key), None) - defer.returnValue(list(results.values())) + return list(results.values()) else: - defer.returnValue([]) + return [] @defer.inlineCallbacks @log_function @@ -1554,7 +1554,7 @@ def on_backfill_request(self, origin, room_id, pdu_list, limit): events = yield filter_events_for_server(self.store, origin, events) - defer.returnValue(events) + return events @defer.inlineCallbacks @log_function @@ -1584,9 +1584,9 @@ def get_persisted_pdu(self, origin, event_id): events = yield filter_events_for_server(self.store, origin, [event]) event = events[0] - defer.returnValue(event) + return event else: - defer.returnValue(None) + return None def get_min_depth_for_context(self, context): return self.store.get_min_depth(context) @@ -1618,7 +1618,7 @@ def _handle_new_event( self.store.remove_push_actions_from_staging, event.event_id ) - defer.returnValue(context) + return context @defer.inlineCallbacks def _handle_new_events(self, origin, event_infos, backfilled=False): @@ -1641,7 +1641,7 @@ def prep(ev_info): auth_events=ev_info.get("auth_events"), backfilled=backfilled, ) - defer.returnValue(res) + return res contexts = yield make_deferred_yieldable( defer.gatherResults( @@ -1800,7 +1800,7 @@ def _prep_event(self, origin, event, state, auth_events, backfilled): if event.type == EventTypes.GuestAccess and not context.rejected: yield self.maybe_kick_guest_users(event) - defer.returnValue(context) + return context @defer.inlineCallbacks def _check_for_soft_fail(self, event, state, backfilled): @@ -1919,7 +1919,7 @@ def on_query_auth( logger.debug("on_query_auth returning: %s", ret) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def on_get_missing_events( @@ -1942,7 +1942,7 @@ def on_get_missing_events( self.store, origin, missing_events ) - defer.returnValue(missing_events) + return missing_events @defer.inlineCallbacks @log_function @@ -2418,16 +2418,14 @@ def get_next(it, opt=None): logger.debug("construct_auth_difference returning") - defer.returnValue( - { - "auth_chain": local_auth, - "rejects": { - e.event_id: {"reason": reason_map[e.event_id], "proof": None} - for e in base_remote_rejected - }, - "missing": [e.event_id for e in missing_locals], - } - ) + return { + "auth_chain": local_auth, + "rejects": { + e.event_id: {"reason": reason_map[e.event_id], "proof": None} + for e in base_remote_rejected + }, + "missing": [e.event_id for e in missing_locals], + } @defer.inlineCallbacks @log_function @@ -2575,7 +2573,7 @@ def add_display_name_to_third_party_invite( builder=builder ) EventValidator().validate_new(event) - defer.returnValue((event, context)) + return (event, context) @defer.inlineCallbacks def _check_signature(self, event, context): diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 7da63bb64361..7b67c8ae0f5d 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -162,7 +162,7 @@ def get_group_summary(self, group_id, requester_user_id): res.setdefault("user", {})["is_publicised"] = is_publicised - defer.returnValue(res) + return res @defer.inlineCallbacks def create_group(self, group_id, user_id, content): @@ -207,7 +207,7 @@ def create_group(self, group_id, user_id, content): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_users_in_group(self, group_id, requester_user_id): @@ -217,7 +217,7 @@ def get_users_in_group(self, group_id, requester_user_id): res = yield self.groups_server_handler.get_users_in_group( group_id, requester_user_id ) - defer.returnValue(res) + return res group_server_name = get_domain_from_id(group_id) @@ -244,7 +244,7 @@ def get_users_in_group(self, group_id, requester_user_id): res["chunk"] = valid_entries - defer.returnValue(res) + return res @defer.inlineCallbacks def join_group(self, group_id, user_id, content): @@ -285,7 +285,7 @@ def join_group(self, group_id, user_id, content): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue({}) + return {} @defer.inlineCallbacks def accept_invite(self, group_id, user_id, content): @@ -326,7 +326,7 @@ def accept_invite(self, group_id, user_id, content): ) self.notifier.on_new_event("groups_key", token, users=[user_id]) - defer.returnValue({}) + return {} @defer.inlineCallbacks def invite(self, group_id, user_id, requester_user_id, config): @@ -346,7 +346,7 @@ def invite(self, group_id, user_id, requester_user_id, config): content, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def on_invite(self, group_id, user_id, content): @@ -377,7 +377,7 @@ def on_invite(self, group_id, user_id, content): logger.warn("No profile for user %s: %s", user_id, e) user_profile = {} - defer.returnValue({"state": "invite", "user_profile": user_profile}) + return {"state": "invite", "user_profile": user_profile} @defer.inlineCallbacks def remove_user_from_group(self, group_id, user_id, requester_user_id, content): @@ -406,7 +406,7 @@ def remove_user_from_group(self, group_id, user_id, requester_user_id, content): content, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def user_removed_from_group(self, group_id, user_id, content): @@ -421,7 +421,7 @@ def user_removed_from_group(self, group_id, user_id, content): @defer.inlineCallbacks def get_joined_groups(self, user_id): group_ids = yield self.store.get_joined_groups(user_id) - defer.returnValue({"groups": group_ids}) + return {"groups": group_ids} @defer.inlineCallbacks def get_publicised_groups_for_user(self, user_id): @@ -433,14 +433,14 @@ def get_publicised_groups_for_user(self, user_id): for app_service in self.store.get_app_services(): result.extend(app_service.get_groups_for_user(user_id)) - defer.returnValue({"groups": result}) + return {"groups": result} else: bulk_result = yield self.transport_client.bulk_get_publicised_groups( get_domain_from_id(user_id), [user_id] ) result = bulk_result.get("users", {}).get(user_id) # TODO: Verify attestations - defer.returnValue({"groups": result}) + return {"groups": result} @defer.inlineCallbacks def bulk_get_publicised_groups(self, user_ids, proxy=True): @@ -475,4 +475,4 @@ def bulk_get_publicised_groups(self, user_ids, proxy=True): for app_service in self.store.get_app_services(): results[uid].extend(app_service.get_groups_for_user(uid)) - defer.returnValue({"users": results}) + return {"users": results} diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 546d6169e9fa..d199521b5878 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -82,7 +82,7 @@ def threepid_from_creds(self, creds): "%s is not a trusted ID server: rejecting 3pid " + "credentials", id_server, ) - defer.returnValue(None) + return None try: data = yield self.http_client.get_json( @@ -95,8 +95,8 @@ def threepid_from_creds(self, creds): raise e.to_synapse_error() if "medium" in data: - defer.returnValue(data) - defer.returnValue(None) + return data + return None @defer.inlineCallbacks def bind_threepid(self, creds, mxid): @@ -133,7 +133,7 @@ def bind_threepid(self, creds, mxid): ) except CodeMessageException as e: data = json.loads(e.msg) # XXX WAT? - defer.returnValue(data) + return data @defer.inlineCallbacks def try_unbind_threepid(self, mxid, threepid): @@ -161,7 +161,7 @@ def try_unbind_threepid(self, mxid, threepid): # We don't know where to unbind, so we don't have a choice but to return if not id_servers: - defer.returnValue(False) + return False changed = True for id_server in id_servers: @@ -169,7 +169,7 @@ def try_unbind_threepid(self, mxid, threepid): mxid, threepid, id_server ) - defer.returnValue(changed) + return changed @defer.inlineCallbacks def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server): @@ -224,7 +224,7 @@ def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server): id_server=id_server, ) - defer.returnValue(changed) + return changed @defer.inlineCallbacks def requestEmailToken( @@ -250,7 +250,7 @@ def requestEmailToken( % (id_server, "/_matrix/identity/api/v1/validate/email/requestToken"), params, ) - defer.returnValue(data) + return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() @@ -278,7 +278,7 @@ def requestMsisdnToken( % (id_server, "/_matrix/identity/api/v1/validate/msisdn/requestToken"), params, ) - defer.returnValue(data) + return data except HttpResponseException as e: logger.info("Proxied requestToken failed: %r", e) raise e.to_synapse_error() diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 54c966c8a67b..42d6650ed96c 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -250,7 +250,7 @@ def handle_room(event): "end": now_token.to_string(), } - defer.returnValue(ret) + return ret @defer.inlineCallbacks def room_initial_sync(self, requester, room_id, pagin_config=None): @@ -301,7 +301,7 @@ def room_initial_sync(self, requester, room_id, pagin_config=None): result["account_data"] = account_data_events - defer.returnValue(result) + return result @defer.inlineCallbacks def _room_initial_sync_parted( @@ -330,28 +330,24 @@ def _room_initial_sync_parted( time_now = self.clock.time_msec() - defer.returnValue( - { - "membership": membership, - "room_id": room_id, - "messages": { - "chunk": ( - yield self._event_serializer.serialize_events( - messages, time_now - ) - ), - "start": start_token.to_string(), - "end": end_token.to_string(), - }, - "state": ( - yield self._event_serializer.serialize_events( - room_state.values(), time_now - ) + return { + "membership": membership, + "room_id": room_id, + "messages": { + "chunk": ( + yield self._event_serializer.serialize_events(messages, time_now) ), - "presence": [], - "receipts": [], - } - ) + "start": start_token.to_string(), + "end": end_token.to_string(), + }, + "state": ( + yield self._event_serializer.serialize_events( + room_state.values(), time_now + ) + ), + "presence": [], + "receipts": [], + } @defer.inlineCallbacks def _room_initial_sync_joined( @@ -384,13 +380,13 @@ def _room_initial_sync_joined( def get_presence(): # If presence is disabled, return an empty list if not self.hs.config.use_presence: - defer.returnValue([]) + return [] states = yield presence_handler.get_states( [m.user_id for m in room_members], as_event=True ) - defer.returnValue(states) + return states @defer.inlineCallbacks def get_receipts(): @@ -399,7 +395,7 @@ def get_receipts(): ) if not receipts: receipts = [] - defer.returnValue(receipts) + return receipts presence, receipts, (messages, token) = yield make_deferred_yieldable( defer.gatherResults( @@ -442,7 +438,7 @@ def get_receipts(): if not is_peeking: ret["membership"] = membership - defer.returnValue(ret) + return ret @defer.inlineCallbacks def _check_in_room_or_world_readable(self, room_id, user_id): @@ -453,7 +449,7 @@ def _check_in_room_or_world_readable(self, room_id, user_id): # * The user is a guest user, and has joined the room # else it will throw. member_event = yield self.auth.check_user_was_in_room(room_id, user_id) - defer.returnValue((member_event.membership, member_event.event_id)) + return (member_event.membership, member_event.event_id) return except AuthError: visibility = yield self.state_handler.get_current_state( @@ -463,7 +459,7 @@ def _check_in_room_or_world_readable(self, room_id, user_id): visibility and visibility.content["history_visibility"] == "world_readable" ): - defer.returnValue((Membership.JOIN, None)) + return (Membership.JOIN, None) return raise AuthError( 403, "Guest access not allowed", errcode=Codes.GUEST_ACCESS_FORBIDDEN diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6d7a987f1333..8b27e23378c9 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -87,7 +87,7 @@ def get_room_data( ) data = room_state[membership_event_id].get(key) - defer.returnValue(data) + return data @defer.inlineCallbacks def get_state_events( @@ -174,7 +174,7 @@ def get_state_events( # events, as clients won't use them. bundle_aggregations=False, ) - defer.returnValue(events) + return events @defer.inlineCallbacks def get_joined_members(self, requester, room_id): @@ -213,15 +213,13 @@ def get_joined_members(self, requester, room_id): # Loop fell through, AS has no interested users in room raise AuthError(403, "Appservice not in room") - defer.returnValue( - { - user_id: { - "avatar_url": profile.avatar_url, - "display_name": profile.display_name, - } - for user_id, profile in iteritems(users_with_profile) + return { + user_id: { + "avatar_url": profile.avatar_url, + "display_name": profile.display_name, } - ) + for user_id, profile in iteritems(users_with_profile) + } class EventCreationHandler(object): @@ -398,7 +396,7 @@ def create_event( self.validator.validate_new(event) - defer.returnValue((event, context)) + return (event, context) def _is_exempt_from_privacy_policy(self, builder, requester): """"Determine if an event to be sent is exempt from having to consent @@ -425,9 +423,9 @@ def _is_exempt_from_privacy_policy(self, builder, requester): @defer.inlineCallbacks def _is_server_notices_room(self, room_id): if self.config.server_notices_mxid is None: - defer.returnValue(False) + return False user_ids = yield self.store.get_users_in_room(room_id) - defer.returnValue(self.config.server_notices_mxid in user_ids) + return self.config.server_notices_mxid in user_ids @defer.inlineCallbacks def assert_accepted_privacy_policy(self, requester): @@ -507,7 +505,7 @@ def send_nonmember_event(self, requester, event, context, ratelimit=True): event.event_id, prev_state.event_id, ) - defer.returnValue(prev_state) + return prev_state yield self.handle_new_client_event( requester=requester, event=event, context=context, ratelimit=ratelimit @@ -531,7 +529,7 @@ def deduplicate_state_event(self, event, context): prev_content = encode_canonical_json(prev_event.content) next_content = encode_canonical_json(event.content) if prev_content == next_content: - defer.returnValue(prev_event) + return prev_event return @defer.inlineCallbacks @@ -563,7 +561,7 @@ def create_and_send_nonmember_event( yield self.send_nonmember_event( requester, event, context, ratelimit=ratelimit ) - defer.returnValue(event) + return event @measure_func("create_new_client_event") @defer.inlineCallbacks @@ -626,7 +624,7 @@ def create_new_client_event( logger.debug("Created event %s", event.event_id) - defer.returnValue((event, context)) + return (event, context) @measure_func("handle_new_client_event") @defer.inlineCallbacks diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 20bcfed334f9..d83aab3f74b5 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -242,13 +242,11 @@ def get_messages( ) if not events: - defer.returnValue( - { - "chunk": [], - "start": pagin_config.from_token.to_string(), - "end": next_token.to_string(), - } - ) + return { + "chunk": [], + "start": pagin_config.from_token.to_string(), + "end": next_token.to_string(), + } state = None if event_filter and event_filter.lazy_load_members() and len(events) > 0: @@ -286,4 +284,4 @@ def get_messages( ) ) - defer.returnValue(chunk) + return chunk diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6f3537e43545..ea54d0b991f5 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -461,7 +461,7 @@ def _user_syncing(): if affect_presence: run_in_background(_end) - defer.returnValue(_user_syncing()) + return _user_syncing() def get_currently_syncing_users(self): """Get the set of user ids that are currently syncing on this HS. @@ -556,7 +556,7 @@ def current_state_for_user(self, user_id): """Get the current presence state for a user. """ res = yield self.current_state_for_users([user_id]) - defer.returnValue(res[user_id]) + return res[user_id] @defer.inlineCallbacks def current_state_for_users(self, user_ids): @@ -585,7 +585,7 @@ def current_state_for_users(self, user_ids): states.update(new) self.user_to_current_state.update(new) - defer.returnValue(states) + return states @defer.inlineCallbacks def _persist_and_notify(self, states): @@ -681,7 +681,7 @@ def incoming_presence(self, origin, content): def get_state(self, target_user, as_event=False): results = yield self.get_states([target_user.to_string()], as_event=as_event) - defer.returnValue(results[0]) + return results[0] @defer.inlineCallbacks def get_states(self, target_user_ids, as_event=False): @@ -703,17 +703,15 @@ def get_states(self, target_user_ids, as_event=False): now = self.clock.time_msec() if as_event: - defer.returnValue( - [ - { - "type": "m.presence", - "content": format_user_presence_state(state, now), - } - for state in updates - ] - ) + return [ + { + "type": "m.presence", + "content": format_user_presence_state(state, now), + } + for state in updates + ] else: - defer.returnValue(updates) + return updates @defer.inlineCallbacks def set_state(self, target_user, state, ignore_status_msg=False): @@ -757,9 +755,9 @@ def is_visible(self, observed_user, observer_user): ) if observer_room_ids & observed_room_ids: - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def get_all_presence_updates(self, last_id, current_id): @@ -778,7 +776,7 @@ def get_all_presence_updates(self, last_id, current_id): # TODO(markjh): replicate the unpersisted changes. # This could use the in-memory stores for recent changes. rows = yield self.store.get_all_presence_updates(last_id, current_id) - defer.returnValue(rows) + return rows def notify_new_event(self): """Called when new events have happened. Handles users and servers @@ -1034,7 +1032,7 @@ def get_new_events( # # Hence this guard where we just return nothing so that the sync # doesn't return. C.f. #5503. - defer.returnValue(([], max_token)) + return ([], max_token) presence = self.get_presence_handler() stream_change_cache = self.store.presence_stream_cache @@ -1068,17 +1066,11 @@ def get_new_events( updates = yield presence.current_state_for_users(user_ids_changed) if include_offline: - defer.returnValue((list(updates.values()), max_token)) + return (list(updates.values()), max_token) else: - defer.returnValue( - ( - [ - s - for s in itervalues(updates) - if s.state != PresenceState.OFFLINE - ], - max_token, - ) + return ( + [s for s in itervalues(updates) if s.state != PresenceState.OFFLINE], + max_token, ) def get_current_key(self): @@ -1107,7 +1099,7 @@ def _get_interested_in(self, user, explicit_room_id, cache_context): ) users_interested_in.update(user_ids) - defer.returnValue(users_interested_in) + return users_interested_in def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): @@ -1287,7 +1279,7 @@ def get_interested_parties(store, states): # Always notify self users_to_states.setdefault(state.user_id, []).append(state) - defer.returnValue((room_ids_to_states, users_to_states)) + return (room_ids_to_states, users_to_states) @defer.inlineCallbacks @@ -1321,4 +1313,4 @@ def get_interested_remotes(store, states, state_handler): host = get_domain_from_id(user_id) hosts_and_states.append(([host], states)) - defer.returnValue(hosts_and_states) + return hosts_and_states diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a2388a709160..2cc237e6a53f 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -73,7 +73,7 @@ def get_profile(self, user_id): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue({"displayname": displayname, "avatar_url": avatar_url}) + return {"displayname": displayname, "avatar_url": avatar_url} else: try: result = yield self.federation.make_query( @@ -82,7 +82,7 @@ def get_profile(self, user_id): args={"user_id": user_id}, ignore_backoff=True, ) - defer.returnValue(result) + return result except RequestSendFailed as e: raise_from(SynapseError(502, "Failed to fetch profile"), e) except HttpResponseException as e: @@ -108,10 +108,10 @@ def get_profile_from_cache(self, user_id): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue({"displayname": displayname, "avatar_url": avatar_url}) + return {"displayname": displayname, "avatar_url": avatar_url} else: profile = yield self.store.get_from_remote_profile_cache(user_id) - defer.returnValue(profile or {}) + return profile or {} @defer.inlineCallbacks def get_displayname(self, target_user): @@ -125,7 +125,7 @@ def get_displayname(self, target_user): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(displayname) + return displayname else: try: result = yield self.federation.make_query( @@ -139,7 +139,7 @@ def get_displayname(self, target_user): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue(result["displayname"]) + return result["displayname"] @defer.inlineCallbacks def set_displayname(self, target_user, requester, new_displayname, by_admin=False): @@ -186,7 +186,7 @@ def get_avatar_url(self, target_user): if e.code == 404: raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(avatar_url) + return avatar_url else: try: result = yield self.federation.make_query( @@ -200,7 +200,7 @@ def get_avatar_url(self, target_user): except HttpResponseException as e: raise e.to_synapse_error() - defer.returnValue(result["avatar_url"]) + return result["avatar_url"] @defer.inlineCallbacks def set_avatar_url(self, target_user, requester, new_avatar_url, by_admin=False): @@ -251,7 +251,7 @@ def on_profile_query(self, args): raise SynapseError(404, "Profile was not found", Codes.NOT_FOUND) raise - defer.returnValue(response) + return response @defer.inlineCallbacks def _update_join_states(self, requester, target_user): diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index a85dd8cdee69..218d60f0c35b 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -84,7 +84,7 @@ def _handle_new_receipts(self, receipts): if min_batch_id is None: # no new receipts - defer.returnValue(False) + return False affected_room_ids = list(set([r.room_id for r in receipts])) @@ -94,7 +94,7 @@ def _handle_new_receipts(self, receipts): min_batch_id, max_batch_id, affected_room_ids ) - defer.returnValue(True) + return True @defer.inlineCallbacks def received_client_receipt(self, room_id, receipt_type, user_id, event_id): @@ -124,9 +124,9 @@ def get_receipts_for_room(self, room_id, to_key): ) if not result: - defer.returnValue([]) + return [] - defer.returnValue(result) + return result class ReceiptEventSource(object): @@ -139,13 +139,13 @@ def get_new_events(self, from_key, room_ids, **kwargs): to_key = yield self.get_current_key() if from_key == to_key: - defer.returnValue(([], to_key)) + return ([], to_key) events = yield self.store.get_linearized_receipts_for_rooms( room_ids, from_key=from_key, to_key=to_key ) - defer.returnValue((events, to_key)) + return (events, to_key) def get_current_key(self, direction="f"): return self.store.get_max_receipt_stream_id() @@ -164,4 +164,4 @@ def get_pagination_rows(self, user, config, key): room_ids, from_key=from_key, to_key=to_key ) - defer.returnValue((events, to_key)) + return (events, to_key) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index bb7cfd71b91d..4631fab94e39 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -265,7 +265,7 @@ def register_user( # Bind email to new account yield self._register_email_threepid(user_id, threepid_dict, None, False) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def _auto_join_rooms(self, user_id): @@ -360,7 +360,7 @@ def appservice_register(self, user_localpart, as_token): appservice_id=service_id, create_profile_with_displayname=user.localpart, ) - defer.returnValue(user_id) + return user_id @defer.inlineCallbacks def check_recaptcha(self, ip, private_key, challenge, response): @@ -461,7 +461,7 @@ def _generate_user_id(self, reseed=False): id = self._next_generated_user_id self._next_generated_user_id += 1 - defer.returnValue(str(id)) + return str(id) @defer.inlineCallbacks def _validate_captcha(self, ip_addr, private_key, challenge, response): @@ -481,7 +481,7 @@ def _validate_captcha(self, ip_addr, private_key, challenge, response): "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" + "error=%s" % lines[1], } - defer.returnValue(json) + return json @defer.inlineCallbacks def _submit_captcha(self, ip_addr, private_key, challenge, response): @@ -497,7 +497,7 @@ def _submit_captcha(self, ip_addr, private_key, challenge, response): "response": response, }, ) - defer.returnValue(data) + return data @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): @@ -622,7 +622,7 @@ def register_device(self, user_id, device_id, initial_display_name, is_guest=Fal initial_display_name=initial_display_name, is_guest=is_guest, ) - defer.returnValue((r["device_id"], r["access_token"])) + return (r["device_id"], r["access_token"]) valid_until_ms = None if self.session_lifetime is not None: @@ -645,7 +645,7 @@ def register_device(self, user_id, device_id, initial_display_name, is_guest=Fal user_id, device_id=device_id, valid_until_ms=valid_until_ms ) - defer.returnValue((device_id, access_token)) + return (device_id, access_token) @defer.inlineCallbacks def post_registration_actions( @@ -798,7 +798,7 @@ def _register_msisdn_threepid(self, user_id, threepid, bind_msisdn): if ex.errcode == Codes.MISSING_PARAM: # This will only happen if the ID server returns a malformed response logger.info("Can't add incomplete 3pid") - defer.returnValue(None) + return None raise yield self._auth_handler.add_threepid( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index db3f8cb76b9f..5caa90c3b709 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -128,7 +128,7 @@ def upgrade_room(self, requester, old_room_id, new_version): old_room_id, new_version, # args for _upgrade_room ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def _upgrade_room(self, requester, old_room_id, new_version): @@ -193,7 +193,7 @@ def _upgrade_room(self, requester, old_room_id, new_version): requester, old_room_id, new_room_id, old_room_state ) - defer.returnValue(new_room_id) + return new_room_id @defer.inlineCallbacks def _update_upgraded_room_pls( @@ -671,7 +671,7 @@ def create_room(self, requester, config, ratelimit=True, creator_join_profile=No result["room_alias"] = room_alias.to_string() yield directory_handler.send_room_alias_update_event(requester, room_id) - defer.returnValue(result) + return result @defer.inlineCallbacks def _send_events_for_new_room( @@ -796,7 +796,7 @@ def _generate_room_id(self, creator_id, is_public): room_creator_user_id=creator_id, is_public=is_public, ) - defer.returnValue(gen_room_id) + return gen_room_id except StoreError: attempts += 1 raise StoreError(500, "Couldn't generate a room ID.") @@ -839,7 +839,7 @@ def filter_evts(events): event_id, get_prev_content=True, allow_none=True ) if not event: - defer.returnValue(None) + return None return filtered = yield (filter_evts([event])) @@ -890,7 +890,7 @@ def filter_evts(events): results["end"] = token.copy_and_replace("room_key", results["end"]).to_string() - defer.returnValue(results) + return results class RoomEventSource(object): @@ -941,7 +941,7 @@ def get_new_events( else: end_key = to_key - defer.returnValue((events, end_key)) + return (events, end_key) def get_current_key(self): return self.store.get_room_events_max_id() @@ -959,4 +959,4 @@ def get_pagination_rows(self, user, config, key): limit=config.limit, ) - defer.returnValue((events, next_key)) + return (events, next_key) diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index aae696a7e8ce..e9094ad02b22 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -325,7 +325,7 @@ def get_order_for_room(room_id): current_limit=since_token.current_limit - 1, ).to_token() - defer.returnValue(results) + return results @defer.inlineCallbacks def _append_room_entry_to_chunk( @@ -420,7 +420,7 @@ def generate_room_entry( if join_rules_event: join_rule = join_rules_event.content.get("join_rule", None) if not allow_private and join_rule and join_rule != JoinRules.PUBLIC: - defer.returnValue(None) + return None # Return whether this room is open to federation users or not create_event = current_state.get((EventTypes.Create, "")) @@ -469,7 +469,7 @@ def generate_room_entry( if avatar_url: result["avatar_url"] = avatar_url - defer.returnValue(result) + return result @defer.inlineCallbacks def get_remote_public_room_list( @@ -482,7 +482,7 @@ def get_remote_public_room_list( third_party_instance_id=None, ): if not self.enable_room_list_search: - defer.returnValue({"chunk": [], "total_room_count_estimate": 0}) + return {"chunk": [], "total_room_count_estimate": 0} if search_filter: # We currently don't support searching across federation, so we have @@ -507,7 +507,7 @@ def get_remote_public_room_list( ] } - defer.returnValue(res) + return res def _get_remote_list_cached( self, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index e0196ef83e6d..baea08ddd07f 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -191,7 +191,7 @@ def _local_membership_update( ) if duplicate is not None: # Discard the new event since this membership change is a no-op. - defer.returnValue(duplicate) + return duplicate yield self.event_creation_handler.handle_new_client_event( requester, event, context, extra_users=[target], ratelimit=ratelimit @@ -233,7 +233,7 @@ def _local_membership_update( if prev_member_event.membership == Membership.JOIN: yield self._user_left_room(target, room_id) - defer.returnValue(event) + return event @defer.inlineCallbacks def copy_room_tags_and_direct_to_room(self, old_room_id, new_room_id, user_id): @@ -303,7 +303,7 @@ def update_membership( require_consent=require_consent, ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _update_membership( @@ -423,7 +423,7 @@ def _update_membership( same_membership = old_membership == effective_membership_state same_sender = requester.user.to_string() == old_state.sender if same_sender and same_membership and same_content: - defer.returnValue(old_state) + return old_state if old_membership in ["ban", "leave"] and action == "kick": raise AuthError(403, "The target user is not in the room") @@ -473,7 +473,7 @@ def _update_membership( ret = yield self._remote_join( requester, remote_room_hosts, room_id, target, content ) - defer.returnValue(ret) + return ret elif effective_membership_state == Membership.LEAVE: if not is_host_in_room: @@ -495,7 +495,7 @@ def _update_membership( res = yield self._remote_reject_invite( requester, remote_room_hosts, room_id, target ) - defer.returnValue(res) + return res res = yield self._local_membership_update( requester=requester, @@ -508,7 +508,7 @@ def _update_membership( content=content, require_consent=require_consent, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def send_membership_event( @@ -596,11 +596,11 @@ def _can_guest_join(self, current_state_ids): """ guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None) if not guest_access_id: - defer.returnValue(False) + return False guest_access = yield self.store.get_event(guest_access_id) - defer.returnValue( + return ( guest_access and guest_access.content and "guest_access" in guest_access.content @@ -635,7 +635,7 @@ def lookup_room_alias(self, room_alias): servers.remove(room_alias.domain) servers.insert(0, room_alias.domain) - defer.returnValue((RoomID.from_string(room_id), servers)) + return (RoomID.from_string(room_id), servers) @defer.inlineCallbacks def _get_inviter(self, user_id, room_id): @@ -643,7 +643,7 @@ def _get_inviter(self, user_id, room_id): user_id=user_id, room_id=room_id ) if invite: - defer.returnValue(UserID.from_string(invite.sender)) + return UserID.from_string(invite.sender) @defer.inlineCallbacks def do_3pid_invite( @@ -708,11 +708,11 @@ def _lookup_3pid(self, id_server, medium, address): if "signatures" not in data: raise AuthError(401, "No signatures on 3pid binding") yield self._verify_any_signature(data, id_server) - defer.returnValue(data["mxid"]) + return data["mxid"] except IOError as e: logger.warn("Error from identity server lookup: %s" % (e,)) - defer.returnValue(None) + return None @defer.inlineCallbacks def _verify_any_signature(self, data, server_hostname): @@ -904,7 +904,7 @@ def _ask_id_server_for_third_party_invite( if not public_keys: public_keys.append(fallback_public_key) display_name = data["display_name"] - defer.returnValue((token, public_keys, fallback_public_key, display_name)) + return (token, public_keys, fallback_public_key, display_name) @defer.inlineCallbacks def _is_host_in_room(self, current_state_ids): @@ -913,7 +913,7 @@ def _is_host_in_room(self, current_state_ids): create_event_id = current_state_ids.get(("m.room.create", "")) if len(current_state_ids) == 1 and create_event_id: # We can only get here if we're in the process of creating the room - defer.returnValue(True) + return True for etype, state_key in current_state_ids: if etype != EventTypes.Member or not self.hs.is_mine_id(state_key): @@ -925,16 +925,16 @@ def _is_host_in_room(self, current_state_ids): continue if event.membership == Membership.JOIN: - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def _is_server_notice_room(self, room_id): if self._server_notices_mxid is None: - defer.returnValue(False) + return False user_ids = yield self.store.get_users_in_room(room_id) - defer.returnValue(self._server_notices_mxid in user_ids) + return self._server_notices_mxid in user_ids class RoomMemberMasterHandler(RoomMemberHandler): @@ -978,7 +978,7 @@ def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): ret = yield fed_handler.do_remotely_reject_invite( remote_room_hosts, room_id, target.to_string() ) - defer.returnValue(ret) + return ret except Exception as e: # if we were unable to reject the exception, just mark # it as rejected on our end and plough ahead. @@ -989,7 +989,7 @@ def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): logger.warn("Failed to reject invite: %s", e) yield self.store.locally_reject_invite(target.to_string(), room_id) - defer.returnValue({}) + return {} def _user_joined_room(self, target, room_id): """Implements RoomMemberHandler._user_joined_room diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index fc873a3ba651..75e96ae1a211 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -53,7 +53,7 @@ def _remote_join(self, requester, remote_room_hosts, room_id, user, content): yield self._user_joined_room(user, room_id) - defer.returnValue(ret) + return ret def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): """Implements RoomMemberHandler._remote_reject_invite diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index ddc4430d03a1..cd5e90bacbe8 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -69,7 +69,7 @@ def get_old_rooms_from_upgraded_room(self, room_id): # Scan through the old room for further predecessors room_id = predecessor["room_id"] - defer.returnValue(historical_room_ids) + return historical_room_ids @defer.inlineCallbacks def search(self, user, content, batch=None): @@ -186,13 +186,11 @@ def search(self, user, content, batch=None): room_ids.intersection_update({batch_group_key}) if not room_ids: - defer.returnValue( - { - "search_categories": { - "room_events": {"results": [], "count": 0, "highlights": []} - } + return { + "search_categories": { + "room_events": {"results": [], "count": 0, "highlights": []} } - ) + } rank_map = {} # event_id -> rank of event allowed_events = [] @@ -455,4 +453,4 @@ def search(self, user, content, batch=None): if global_next_batch: rooms_cat_res["next_batch"] = global_next_batch - defer.returnValue({"search_categories": {"room_events": rooms_cat_res}}) + return {"search_categories": {"room_events": rooms_cat_res}} diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py index 6b364befd595..f065970c401a 100644 --- a/synapse/handlers/state_deltas.py +++ b/synapse/handlers/state_deltas.py @@ -48,7 +48,7 @@ def _get_key_change(self, prev_event_id, event_id, key_name, public_value): if not event and not prev_event: logger.debug("Neither event exists: %r %r", prev_event_id, event_id) - defer.returnValue(None) + return None prev_value = None value = None @@ -62,8 +62,8 @@ def _get_key_change(self, prev_event_id, event_id, key_name, public_value): logger.debug("prev_value: %r -> value: %r", prev_value, value) if value == public_value and prev_value != public_value: - defer.returnValue(True) + return True elif value != public_value and prev_value == public_value: - defer.returnValue(False) + return False else: - defer.returnValue(None) + return None diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index a0ee8db9884f..4449da6669ba 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -86,7 +86,7 @@ def _unsafe_process(self): # If still None then the initial background update hasn't happened yet if self.pos is None: - defer.returnValue(None) + return None # Loop round handling deltas until we're up to date while True: @@ -328,6 +328,6 @@ def _is_public_room(self, room_id): == "world_readable" ) ): - defer.returnValue(True) + return True else: - defer.returnValue(False) + return False diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index cd1ac0a27ab7..4007284e5b3d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -263,7 +263,7 @@ def wait_for_sync_for_user( timeout, full_state, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def _wait_for_sync_for_user(self, sync_config, since_token, timeout, full_state): @@ -303,7 +303,7 @@ def current_sync_callback(before_token, after_token): lazy_loaded = "false" non_empty_sync_counter.labels(sync_type, lazy_loaded).inc() - defer.returnValue(result) + return result def current_sync_for_user(self, sync_config, since_token=None, full_state=False): """Get the sync for client needed to match what the server has now. @@ -317,7 +317,7 @@ def push_rules_for_user(self, user): user_id = user.to_string() rules = yield self.store.get_push_rules_for_user(user_id) rules = format_push_rules_for_user(user, rules) - defer.returnValue(rules) + return rules @defer.inlineCallbacks def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None): @@ -378,7 +378,7 @@ def ephemeral_by_room(self, sync_result_builder, now_token, since_token=None): event_copy = {k: v for (k, v) in iteritems(event) if k != "room_id"} ephemeral_by_room.setdefault(room_id, []).append(event_copy) - defer.returnValue((now_token, ephemeral_by_room)) + return (now_token, ephemeral_by_room) @defer.inlineCallbacks def _load_filtered_recents( @@ -426,8 +426,8 @@ def _load_filtered_recents( recents = [] if not limited or block_all_timeline: - defer.returnValue( - TimelineBatch(events=recents, prev_batch=now_token, limited=False) + return TimelineBatch( + events=recents, prev_batch=now_token, limited=False ) filtering_factor = 2 @@ -490,12 +490,10 @@ def _load_filtered_recents( prev_batch_token = now_token.copy_and_replace("room_key", room_key) - defer.returnValue( - TimelineBatch( - events=recents, - prev_batch=prev_batch_token, - limited=limited or newly_joined_room, - ) + return TimelineBatch( + events=recents, + prev_batch=prev_batch_token, + limited=limited or newly_joined_room, ) @defer.inlineCallbacks @@ -517,7 +515,7 @@ def get_state_after_event(self, event, state_filter=StateFilter.all()): if event.is_state(): state_ids = state_ids.copy() state_ids[(event.type, event.state_key)] = event.event_id - defer.returnValue(state_ids) + return state_ids @defer.inlineCallbacks def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()): @@ -549,7 +547,7 @@ def get_state_at(self, room_id, stream_position, state_filter=StateFilter.all()) else: # no events in this room - so presumably no state state = {} - defer.returnValue(state) + return state @defer.inlineCallbacks def compute_summary(self, room_id, sync_config, batch, state, now_token): @@ -579,7 +577,7 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): ) if not last_events: - defer.returnValue(None) + return None return last_event = last_events[-1] @@ -611,14 +609,14 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): if name_id: name = yield self.store.get_event(name_id, allow_none=True) if name and name.content.get("name"): - defer.returnValue(summary) + return summary if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True ) if canonical_alias and canonical_alias.content.get("alias"): - defer.returnValue(summary) + return summary me = sync_config.user.to_string() @@ -652,7 +650,7 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): summary["m.heroes"] = sorted([user_id for user_id in gone_user_ids])[0:5] if not sync_config.filter_collection.lazy_load_members(): - defer.returnValue(summary) + return summary # ensure we send membership events for heroes if needed cache_key = (sync_config.user.to_string(), sync_config.device_id) @@ -686,7 +684,7 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): cache.set(s.state_key, s.event_id) state[(EventTypes.Member, s.state_key)] = s - defer.returnValue(summary) + return summary def get_lazy_loaded_members_cache(self, cache_key): cache = self.lazy_loaded_members_cache.get(cache_key) @@ -871,14 +869,12 @@ def compute_state_delta( if state_ids: state = yield self.store.get_events(list(state_ids.values())) - defer.returnValue( - { - (e.type, e.state_key): e - for e in sync_config.filter_collection.filter_room_state( - list(state.values()) - ) - } - ) + return { + (e.type, e.state_key): e + for e in sync_config.filter_collection.filter_room_state( + list(state.values()) + ) + } @defer.inlineCallbacks def unread_notifs_for_room_id(self, room_id, sync_config): @@ -894,11 +890,11 @@ def unread_notifs_for_room_id(self, room_id, sync_config): notifs = yield self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), last_unread_event_id ) - defer.returnValue(notifs) + return notifs # There is no new information in this period, so your notification # count is whatever it was last time. - defer.returnValue(None) + return None @defer.inlineCallbacks def generate_sync_result(self, sync_config, since_token=None, full_state=False): @@ -989,19 +985,17 @@ def generate_sync_result(self, sync_config, since_token=None, full_state=False): "Sync result for newly joined room %s: %r", room_id, joined_room ) - defer.returnValue( - SyncResult( - presence=sync_result_builder.presence, - account_data=sync_result_builder.account_data, - joined=sync_result_builder.joined, - invited=sync_result_builder.invited, - archived=sync_result_builder.archived, - to_device=sync_result_builder.to_device, - device_lists=device_lists, - groups=sync_result_builder.groups, - device_one_time_keys_count=one_time_key_counts, - next_batch=sync_result_builder.now_token, - ) + return SyncResult( + presence=sync_result_builder.presence, + account_data=sync_result_builder.account_data, + joined=sync_result_builder.joined, + invited=sync_result_builder.invited, + archived=sync_result_builder.archived, + to_device=sync_result_builder.to_device, + device_lists=device_lists, + groups=sync_result_builder.groups, + device_one_time_keys_count=one_time_key_counts, + next_batch=sync_result_builder.now_token, ) @measure_func("_generate_sync_entry_for_groups") @@ -1124,11 +1118,9 @@ def _generate_sync_entry_for_device_list( # Remove any users that we still share a room with. newly_left_users -= users_who_share_room - defer.returnValue( - DeviceLists(changed=users_that_have_changed, left=newly_left_users) - ) + return DeviceLists(changed=users_that_have_changed, left=newly_left_users) else: - defer.returnValue(DeviceLists(changed=[], left=[])) + return DeviceLists(changed=[], left=[]) @defer.inlineCallbacks def _generate_sync_entry_for_to_device(self, sync_result_builder): @@ -1225,7 +1217,7 @@ def _generate_sync_entry_for_account_data(self, sync_result_builder): sync_result_builder.account_data = account_data_for_user - defer.returnValue(account_data_by_room) + return account_data_by_room @defer.inlineCallbacks def _generate_sync_entry_for_presence( @@ -1325,7 +1317,7 @@ def _generate_sync_entry_for_rooms(self, sync_result_builder, account_data_by_ro ) if not tags_by_room: logger.debug("no-oping sync") - defer.returnValue(([], [], [], [])) + return ([], [], [], []) ignored_account_data = yield self.store.get_global_account_data_by_type_for_user( "m.ignored_user_list", user_id=user_id @@ -1388,13 +1380,11 @@ def handle_room_entries(room_entry): newly_left_users -= newly_joined_or_invited_users - defer.returnValue( - ( - newly_joined_rooms, - newly_joined_or_invited_users, - newly_left_rooms, - newly_left_users, - ) + return ( + newly_joined_rooms, + newly_joined_or_invited_users, + newly_left_rooms, + newly_left_users, ) @defer.inlineCallbacks @@ -1414,13 +1404,13 @@ def _have_rooms_changed(self, sync_result_builder): ) if rooms_changed: - defer.returnValue(True) + return True stream_id = RoomStreamToken.parse_stream_token(since_token.room_key).stream for room_id in sync_result_builder.joined_room_ids: if self.store.has_room_changed_since(room_id, stream_id): - defer.returnValue(True) - defer.returnValue(False) + return True + return False @defer.inlineCallbacks def _get_rooms_changed(self, sync_result_builder, ignored_users): @@ -1637,7 +1627,7 @@ def _get_rooms_changed(self, sync_result_builder, ignored_users): ) room_entries.append(entry) - defer.returnValue((room_entries, invited, newly_joined_rooms, newly_left_rooms)) + return (room_entries, invited, newly_joined_rooms, newly_left_rooms) @defer.inlineCallbacks def _get_all_rooms(self, sync_result_builder, ignored_users): @@ -1711,7 +1701,7 @@ def _get_all_rooms(self, sync_result_builder, ignored_users): ) ) - defer.returnValue((room_entries, invited, [])) + return (room_entries, invited, []) @defer.inlineCallbacks def _generate_room_entry( @@ -1912,7 +1902,7 @@ def get_rooms_for_user_at(self, user_id, stream_ordering): joined_room_ids.add(room_id) joined_room_ids = frozenset(joined_room_ids) - defer.returnValue(joined_room_ids) + return joined_room_ids def _action_has_highlight(actions): diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index c3e0c8fc7e37..6b661aa93da8 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -140,7 +140,7 @@ def started_typing(self, target_user, auth_user, room_id, timeout): if was_present: # No point sending another notification - defer.returnValue(None) + return None self._push_update(member=member, typing=True) @@ -173,7 +173,7 @@ def user_left_room(self, user, room_id): def _stopped_typing(self, member): if member.user_id not in self._room_typing.get(member.room_id, set()): # No point - defer.returnValue(None) + return None self._member_typing_until.pop(member, None) self._member_last_federation_poke.pop(member, None) diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 5de963095073..e53669e40de7 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -133,7 +133,7 @@ def _unsafe_process(self): # If still None then the initial background update hasn't happened yet if self.pos is None: - defer.returnValue(None) + return None # Loop round handling deltas until we're up to date while True: diff --git a/synapse/http/client.py b/synapse/http/client.py index 45d501095299..0ac20ebefce3 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -294,7 +294,7 @@ def request(self, method, uri, data=None, headers=None): logger.info( "Received response to %s %s: %s", method, redact_uri(uri), response.code ) - defer.returnValue(response) + return response except Exception as e: incoming_responses_counter.labels(method, "ERR").inc() logger.info( @@ -345,7 +345,7 @@ def post_urlencoded_get_json(self, uri, args={}, headers=None): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -385,7 +385,7 @@ def post_json_get_json(self, uri, post_json, headers=None): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -410,7 +410,7 @@ def get_json(self, uri, args={}, headers=None): ValueError: if the response was not JSON """ body = yield self.get_raw(uri, args, headers=headers) - defer.returnValue(json.loads(body)) + return json.loads(body) @defer.inlineCallbacks def put_json(self, uri, json_body, args={}, headers=None): @@ -453,7 +453,7 @@ def put_json(self, uri, json_body, args={}, headers=None): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(json.loads(body)) + return json.loads(body) else: raise HttpResponseException(response.code, response.phrase, body) @@ -488,7 +488,7 @@ def get_raw(self, uri, args={}, headers=None): body = yield make_deferred_yieldable(readBody(response)) if 200 <= response.code < 300: - defer.returnValue(body) + return body else: raise HttpResponseException(response.code, response.phrase, body) @@ -545,13 +545,11 @@ def get_file(self, url, output_stream, max_size=None, headers=None): except Exception as e: raise_from(SynapseError(502, ("Failed to download remote body: %s" % e)), e) - defer.returnValue( - ( - length, - resp_headers, - response.request.absoluteURI.decode("ascii"), - response.code, - ) + return ( + length, + resp_headers, + response.request.absoluteURI.decode("ascii"), + response.code, ) @@ -627,10 +625,10 @@ def post_urlencoded_get_raw(self, url, args={}): try: body = yield make_deferred_yieldable(readBody(response)) - defer.returnValue(body) + return body except PartialDownloadError as e: # twisted dislikes google's response, no content length. - defer.returnValue(e.response) + return e.response def encode_urlencode_args(args): diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 054c321a2015..c03ddb724ff5 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -177,7 +177,7 @@ def endpointForURI(_uri): res = yield make_deferred_yieldable( agent.request(method, uri, headers, bodyProducer) ) - defer.returnValue(res) + return res @defer.inlineCallbacks def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): @@ -205,24 +205,20 @@ def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): port = parsed_uri.port if port == -1: port = 8448 - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=parsed_uri.host, + target_port=port, ) if parsed_uri.port != -1: # there is an explicit port - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=parsed_uri.host, - target_port=parsed_uri.port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=parsed_uri.host, + target_port=parsed_uri.port, ) if lookup_well_known: @@ -259,7 +255,7 @@ def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): ) res = yield self._route_matrix_uri(new_uri, lookup_well_known=False) - defer.returnValue(res) + return res # try a SRV lookup service_name = b"_matrix._tcp.%s" % (parsed_uri.host,) @@ -283,13 +279,11 @@ def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): parsed_uri.host.decode("ascii"), ) - defer.returnValue( - _RoutingResult( - host_header=parsed_uri.netloc, - tls_server_name=parsed_uri.host, - target_host=target_host, - target_port=port, - ) + return _RoutingResult( + host_header=parsed_uri.netloc, + tls_server_name=parsed_uri.host, + target_host=target_host, + target_port=port, ) @defer.inlineCallbacks @@ -314,7 +308,7 @@ def _get_well_known(self, server_name): if cache_period > 0: self._well_known_cache.set(server_name, result, cache_period) - defer.returnValue(result) + return result @defer.inlineCallbacks def _do_get_well_known(self, server_name): @@ -354,7 +348,7 @@ def _do_get_well_known(self, server_name): # after startup cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) - defer.returnValue((None, cache_period)) + return (None, cache_period) result = parsed_body["m.server"].encode("ascii") @@ -369,7 +363,7 @@ def _do_get_well_known(self, server_name): else: cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) - defer.returnValue((result, cache_period)) + return (result, cache_period) @implementer(IStreamClientEndpoint) diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index ecc88f9b9667..b32188766de7 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -120,7 +120,7 @@ def resolve_service(self, service_name): if cache_entry: if all(s.expires > now for s in cache_entry): servers = list(cache_entry) - defer.returnValue(servers) + return servers try: answers, _, _ = yield make_deferred_yieldable( @@ -129,7 +129,7 @@ def resolve_service(self, service_name): except DNSNameError: # TODO: cache this. We can get the SOA out of the exception, and use # the negative-TTL value. - defer.returnValue([]) + return [] except DomainError as e: # We failed to resolve the name (other than a NameError) # Try something in the cache, else rereaise @@ -138,7 +138,7 @@ def resolve_service(self, service_name): logger.warn( "Failed to resolve %r, falling back to cache. %r", service_name, e ) - defer.returnValue(list(cache_entry)) + return list(cache_entry) else: raise e @@ -169,4 +169,4 @@ def resolve_service(self, service_name): ) self._cache[service_name] = list(servers) - defer.returnValue(servers) + return servers diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index e60334547e96..d07d35646403 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -158,7 +158,7 @@ def _handle_json_response(reactor, timeout_sec, request, response): response.code, response.phrase.decode("ascii", errors="replace"), ) - defer.returnValue(body) + return body class MatrixFederationHttpClient(object): @@ -256,7 +256,7 @@ def _send_request_with_optional_trailing_slash( response = yield self._send_request(request, **send_request_args) - defer.returnValue(response) + return response @defer.inlineCallbacks def _send_request( @@ -520,7 +520,7 @@ def _send_request( _flatten_response_never_received(e), ) raise - defer.returnValue(response) + return response def build_auth_headers( self, destination, method, url_bytes, content=None, destination_is=None @@ -644,7 +644,7 @@ def put_json( self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def post_json( @@ -713,7 +713,7 @@ def post_json( body = yield _handle_json_response( self.reactor, _sec_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def get_json( @@ -778,7 +778,7 @@ def get_json( self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def delete_json( @@ -836,7 +836,7 @@ def delete_json( body = yield _handle_json_response( self.reactor, self.default_timeout, request, response ) - defer.returnValue(body) + return body @defer.inlineCallbacks def get_file( @@ -902,7 +902,7 @@ def get_file( response.phrase.decode("ascii", errors="replace"), length, ) - defer.returnValue((length, headers)) + return (length, headers) class _ReadBodyToFileProtocol(protocol.Protocol): diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 96a4714d82de..fb338ca223d0 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -89,7 +89,7 @@ def deferred_function(*args, **kwargs): # We start yield we_wait # we finish - defer.returnValue(something_usual_and_useful) + return something_usual_and_useful Operation names can be explicitly set for functions by using ``trace_using_operation_name`` and @@ -113,7 +113,7 @@ def deferred_function(*args, **kwargs): # We start yield we_wait # we finish - defer.returnValue(something_usual_and_useful) + return something_usual_and_useful Contexts and carriers --------------------- @@ -694,7 +694,7 @@ def _trace_servlet_inner(request, *args, **kwargs): }, ): result = yield defer.maybeDeferred(func, request, *args, **kwargs) - defer.returnValue(result) + return result return _trace_servlet_inner diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 7bb020cb45be..41147d429290 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -101,7 +101,7 @@ def register(self, localpart, displayname=None, emails=[]): ) user_id = yield self.register_user(localpart, displayname, emails) _, access_token = yield self.register_device(user_id) - defer.returnValue((user_id, access_token)) + return (user_id, access_token) def register_user(self, localpart, displayname=None, emails=[]): """Registers a new user with given localpart and optional displayname, emails. diff --git a/synapse/notifier.py b/synapse/notifier.py index 918ef64897be..bd80c801b6ec 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -365,7 +365,7 @@ def wait_for_events( current_token = user_stream.current_token result = yield callback(prev_token, current_token) - defer.returnValue(result) + return result @defer.inlineCallbacks def get_events_for( @@ -400,7 +400,7 @@ def get_events_for( @defer.inlineCallbacks def check_for_updates(before_token, after_token): if not after_token.is_after(before_token): - defer.returnValue(EventStreamResult([], (from_token, from_token))) + return EventStreamResult([], (from_token, from_token)) events = [] end_token = from_token @@ -440,7 +440,7 @@ def check_for_updates(before_token, after_token): events.extend(new_events) end_token = end_token.copy_and_replace(keyname, new_key) - defer.returnValue(EventStreamResult(events, (from_token, end_token))) + return EventStreamResult(events, (from_token, end_token)) user_id_for_stream = user.to_string() if is_peeking: @@ -465,18 +465,18 @@ def check_for_updates(before_token, after_token): from_token=from_token, ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _get_room_ids(self, user, explicit_room_id): joined_room_ids = yield self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: - defer.returnValue(([explicit_room_id], True)) + return ([explicit_room_id], True) if (yield self._is_world_readable(explicit_room_id)): - defer.returnValue(([explicit_room_id], False)) + return ([explicit_room_id], False) raise AuthError(403, "Non-joined access not allowed") - defer.returnValue((joined_room_ids, True)) + return (joined_room_ids, True) @defer.inlineCallbacks def _is_world_readable(self, room_id): @@ -484,9 +484,9 @@ def _is_world_readable(self, room_id): room_id, EventTypes.RoomHistoryVisibility, "" ) if state and "history_visibility" in state.content: - defer.returnValue(state.content["history_visibility"] == "world_readable") + return state.content["history_visibility"] == "world_readable" else: - defer.returnValue(False) + return False @log_function def remove_expired_streams(self): diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index c8a5b381daec..c83197563542 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -95,7 +95,7 @@ def _get_rules_for_event(self, event, context): invited ) - defer.returnValue(rules_by_user) + return rules_by_user @cached() def _get_rules_for_room(self, room_id): @@ -134,7 +134,7 @@ def _get_power_levels_and_sender_level(self, event, context): pl_event = auth_events.get(POWER_KEY) - defer.returnValue((pl_event.content if pl_event else {}, sender_level)) + return (pl_event.content if pl_event else {}, sender_level) @defer.inlineCallbacks def action_for_event_by_user(self, event, context): @@ -283,13 +283,13 @@ def get_rules(self, event, context): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - defer.returnValue(self.rules_by_user) + return self.rules_by_user with (yield self.linearizer.queue(())): if state_group and self.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - defer.returnValue(self.rules_by_user) + return self.rules_by_user self.room_push_rule_cache_metrics.inc_misses() @@ -366,7 +366,7 @@ def get_rules(self, event, context): logger.debug( "Returning push rules for %r %r", self.room_id, ret_rules_by_user.keys() ) - defer.returnValue(ret_rules_by_user) + return ret_rules_by_user @defer.inlineCallbacks def _update_rules_with_member_event_ids( diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 4e7b6a553124..5b15b0dbe7c7 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -258,17 +258,17 @@ def _unsafe_process(self): @defer.inlineCallbacks def _process_one(self, push_action): if "notify" not in push_action["actions"]: - defer.returnValue(True) + return True tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"]) badge = yield push_tools.get_badge_count(self.hs.get_datastore(), self.user_id) event = yield self.store.get_event(push_action["event_id"], allow_none=True) if event is None: - defer.returnValue(True) # It's been redacted + return True # It's been redacted rejected = yield self.dispatch_push(event, tweaks, badge) if rejected is False: - defer.returnValue(False) + return False if isinstance(rejected, list) or isinstance(rejected, tuple): for pk in rejected: @@ -282,7 +282,7 @@ def _process_one(self, push_action): else: logger.info("Pushkey %s was rejected: removing", pk) yield self.hs.remove_pusher(self.app_id, pk, self.user_id) - defer.returnValue(True) + return True @defer.inlineCallbacks def _build_notification_dict(self, event, tweaks, badge): @@ -302,7 +302,7 @@ def _build_notification_dict(self, event, tweaks, badge): ], } } - defer.returnValue(d) + return d ctx = yield push_tools.get_context_for_event( self.store, self.state_handler, event, self.user_id @@ -345,13 +345,13 @@ def _build_notification_dict(self, event, tweaks, badge): if "name" in ctx and len(ctx["name"]) > 0: d["notification"]["room_name"] = ctx["name"] - defer.returnValue(d) + return d @defer.inlineCallbacks def dispatch_push(self, event, tweaks, badge): notification_dict = yield self._build_notification_dict(event, tweaks, badge) if not notification_dict: - defer.returnValue([]) + return [] try: resp = yield self.http_client.post_json_get_json( self.url, notification_dict @@ -364,11 +364,11 @@ def dispatch_push(self, event, tweaks, badge): type(e), e, ) - defer.returnValue(False) + return False rejected = [] if "rejected" in resp: rejected = resp["rejected"] - defer.returnValue(rejected) + return rejected @defer.inlineCallbacks def _send_badge(self, badge): diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 521c6e2cd7f8..4245ce26f344 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -316,7 +316,7 @@ def get_room_vars(self, room_id, user_id, notifs, notif_events, room_state_ids): if not merge: room_vars["notifs"].append(notifvars) - defer.returnValue(room_vars) + return room_vars @defer.inlineCallbacks def get_notif_vars(self, notif, user_id, notif_event, room_state_ids): @@ -343,7 +343,7 @@ def get_notif_vars(self, notif, user_id, notif_event, room_state_ids): if messagevars is not None: ret["messages"].append(messagevars) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_message_vars(self, notif, event, room_state_ids): @@ -379,7 +379,7 @@ def get_message_vars(self, notif, event, room_state_ids): if "body" in event.content: ret["body_text_plain"] = event.content["body"] - defer.returnValue(ret) + return ret def add_text_message_vars(self, messagevars, event): msgformat = event.content.get("format") @@ -428,19 +428,16 @@ def make_summary_text( inviter_name = name_from_member_event(inviter_member_event) if room_name is None: - defer.returnValue( - INVITE_FROM_PERSON - % {"person": inviter_name, "app": self.app_name} - ) + return INVITE_FROM_PERSON % { + "person": inviter_name, + "app": self.app_name, + } else: - defer.returnValue( - INVITE_FROM_PERSON_TO_ROOM - % { - "person": inviter_name, - "room": room_name, - "app": self.app_name, - } - ) + return INVITE_FROM_PERSON_TO_ROOM % { + "person": inviter_name, + "room": room_name, + "app": self.app_name, + } sender_name = None if len(notifs_by_room[room_id]) == 1: @@ -454,26 +451,21 @@ def make_summary_text( sender_name = name_from_member_event(state_event) if sender_name is not None and room_name is not None: - defer.returnValue( - MESSAGE_FROM_PERSON_IN_ROOM - % { - "person": sender_name, - "room": room_name, - "app": self.app_name, - } - ) + return MESSAGE_FROM_PERSON_IN_ROOM % { + "person": sender_name, + "room": room_name, + "app": self.app_name, + } elif sender_name is not None: - defer.returnValue( - MESSAGE_FROM_PERSON - % {"person": sender_name, "app": self.app_name} - ) + return MESSAGE_FROM_PERSON % { + "person": sender_name, + "app": self.app_name, + } else: # There's more than one notification for this room, so just # say there are several if room_name is not None: - defer.returnValue( - MESSAGES_IN_ROOM % {"room": room_name, "app": self.app_name} - ) + return MESSAGES_IN_ROOM % {"room": room_name, "app": self.app_name} else: # If the room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" @@ -493,24 +485,19 @@ def make_summary_text( ] ) - defer.returnValue( - MESSAGES_FROM_PERSON - % { - "person": descriptor_from_member_events( - member_events.values() - ), - "app": self.app_name, - } - ) + return MESSAGES_FROM_PERSON % { + "person": descriptor_from_member_events(member_events.values()), + "app": self.app_name, + } else: # Stuff's happened in multiple different rooms # ...but we still refer to the 'reason' room which triggered the mail if reason["room_name"] is not None: - defer.returnValue( - MESSAGES_IN_ROOM_AND_OTHERS - % {"room": reason["room_name"], "app": self.app_name} - ) + return MESSAGES_IN_ROOM_AND_OTHERS % { + "room": reason["room_name"], + "app": self.app_name, + } else: # If the reason room doesn't have a name, say who the messages # are from explicitly to avoid, "messages in the Bob room" @@ -527,13 +514,10 @@ def make_summary_text( [room_state_ids[room_id][("m.room.member", s)] for s in sender_ids] ) - defer.returnValue( - MESSAGES_FROM_PERSON_AND_OTHERS - % { - "person": descriptor_from_member_events(member_events.values()), - "app": self.app_name, - } - ) + return MESSAGES_FROM_PERSON_AND_OTHERS % { + "person": descriptor_from_member_events(member_events.values()), + "app": self.app_name, + } def make_room_link(self, room_id): if self.hs.config.email_riot_base_url: diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 06056fbf4fae..16a7e8e31db1 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -55,7 +55,7 @@ def calculate_room_name( room_state_ids[("m.room.name", "")], allow_none=True ) if m_room_name and m_room_name.content and m_room_name.content["name"]: - defer.returnValue(m_room_name.content["name"]) + return m_room_name.content["name"] # does it have a canonical alias? if ("m.room.canonical_alias", "") in room_state_ids: @@ -68,7 +68,7 @@ def calculate_room_name( and canon_alias.content["alias"] and _looks_like_an_alias(canon_alias.content["alias"]) ): - defer.returnValue(canon_alias.content["alias"]) + return canon_alias.content["alias"] # at this point we're going to need to search the state by all state keys # for an event type, so rearrange the data structure @@ -82,10 +82,10 @@ def calculate_room_name( if alias_event and alias_event.content.get("aliases"): the_aliases = alias_event.content["aliases"] if len(the_aliases) > 0 and _looks_like_an_alias(the_aliases[0]): - defer.returnValue(the_aliases[0]) + return the_aliases[0] if not fallback_to_members: - defer.returnValue(None) + return None my_member_event = None if ("m.room.member", user_id) in room_state_ids: @@ -104,14 +104,13 @@ def calculate_room_name( ) if inviter_member_event: if fallback_to_single_member: - defer.returnValue( - "Invite from %s" - % (name_from_member_event(inviter_member_event),) + return "Invite from %s" % ( + name_from_member_event(inviter_member_event), ) else: return else: - defer.returnValue("Room Invite") + return "Room Invite" # we're going to have to generate a name based on who's in the room, # so find out who is in the room that isn't the user. @@ -154,17 +153,17 @@ def calculate_room_name( # return "Inviting %s" % ( # descriptor_from_member_events(third_party_invites) # ) - defer.returnValue("Inviting email address") + return "Inviting email address" else: - defer.returnValue(ALL_ALONE) + return ALL_ALONE else: - defer.returnValue(name_from_member_event(all_members[0])) + return name_from_member_event(all_members[0]) else: - defer.returnValue(ALL_ALONE) + return ALL_ALONE elif len(other_members) == 1 and not fallback_to_single_member: return else: - defer.returnValue(descriptor_from_member_events(other_members)) + return descriptor_from_member_events(other_members) def descriptor_from_member_events(member_events): diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index e37269cdb934..a54051a726d1 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -39,7 +39,7 @@ def get_badge_count(store, user_id): # return one badge count per conversation, as count per # message is so noisy as to be almost useless badge += 1 if notifs["notify_count"] else 0 - defer.returnValue(badge) + return badge @defer.inlineCallbacks @@ -61,4 +61,4 @@ def get_context_for_event(store, state_handler, ev, user_id): sender_state_event = yield store.get_event(sender_state_event_id) ctx["sender_display_name"] = name_from_member_event(sender_state_event) - defer.returnValue(ctx) + return ctx diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index df6f67074033..08e840fdc263 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -123,7 +123,7 @@ def add_pusher( ) pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id) - defer.returnValue(pusher) + return pusher @defer.inlineCallbacks def remove_pushers_by_app_id_and_pushkey_not_user( @@ -224,7 +224,7 @@ def start_pusher_by_id(self, app_id, pushkey, user_id): if pusher_dict: pusher = yield self._start_pusher(pusher_dict) - defer.returnValue(pusher) + return pusher @defer.inlineCallbacks def _start_pushers(self): @@ -293,7 +293,7 @@ def _start_pusher(self, pusherdict): p.on_started(have_notifs) - defer.returnValue(p) + return p @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index fe482e279fd1..f5074b101af4 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -185,7 +185,7 @@ def send_request(**kwargs): except RequestSendFailed as e: raise_from(SynapseError(502, "Failed to talk to master"), e) - defer.returnValue(result) + return result return send_request diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 61eafbe708bd..fed4f08820e3 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -80,7 +80,7 @@ def _serialize_payload(store, event_and_contexts, backfilled): payload = {"events": event_payloads, "backfilled": backfilled} - defer.returnValue(payload) + return payload @defer.inlineCallbacks def _handle_request(self, request): @@ -113,7 +113,7 @@ def _handle_request(self, request): event_and_contexts, backfilled ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationFederationSendEduRestServlet(ReplicationEndpoint): @@ -156,7 +156,7 @@ def _handle_request(self, request, edu_type): result = yield self.registry.on_edu(edu_type, origin, edu_content) - defer.returnValue((200, result)) + return (200, result) class ReplicationGetQueryRestServlet(ReplicationEndpoint): @@ -204,7 +204,7 @@ def _handle_request(self, request, query_type): result = yield self.registry.on_query(query_type, args) - defer.returnValue((200, result)) + return (200, result) class ReplicationCleanRoomRestServlet(ReplicationEndpoint): @@ -238,7 +238,7 @@ def _serialize_payload(room_id, args): def _handle_request(self, request, room_id): yield self.store.clean_room_for_join(room_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index 7c1197e5ddcc..f17d3a2da488 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -64,7 +64,7 @@ def _handle_request(self, request, user_id): user_id, device_id, initial_display_name, is_guest ) - defer.returnValue((200, {"device_id": device_id, "access_token": access_token})) + return (200, {"device_id": device_id, "access_token": access_token}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 2d9cbbaefc24..4217335d8815 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -83,7 +83,7 @@ def _handle_request(self, request, room_id, user_id): remote_room_hosts, room_id, user_id, event_content ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): @@ -153,7 +153,7 @@ def _handle_request(self, request, room_id, user_id): yield self.store.locally_reject_invite(user_id, room_id) ret = {} - defer.returnValue((200, ret)) + return (200, ret) class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 2bf2173895ec..3341320a87b2 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -90,7 +90,7 @@ def _handle_request(self, request, user_id): address=content["address"], ) - defer.returnValue((200, {})) + return (200, {}) class ReplicationPostRegisterActionsServlet(ReplicationEndpoint): @@ -143,7 +143,7 @@ def _handle_request(self, request, user_id): bind_msisdn=bind_msisdn, ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 034763fe993a..eff7bd73059b 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -85,7 +85,7 @@ def _serialize_payload( "extra_users": [u.to_string() for u in extra_users], } - defer.returnValue(payload) + return payload @defer.inlineCallbacks def _handle_request(self, request, event_id): @@ -117,7 +117,7 @@ def _handle_request(self, request, event_id): requester, event, context, ratelimit=ratelimit, extra_users=extra_users ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 7ef67a5a73fc..c10b85d2ff8b 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -158,7 +158,7 @@ def get_updates(self): updates, current_token = yield self.get_updates_since(self.last_token) self.last_token = current_token - defer.returnValue((updates, current_token)) + return (updates, current_token) @defer.inlineCallbacks def get_updates_since(self, from_token): @@ -172,14 +172,14 @@ def get_updates_since(self, from_token): sent over the replication steam. """ if from_token in ("NOW", "now"): - defer.returnValue(([], self.upto_token)) + return ([], self.upto_token) current_token = self.upto_token from_token = int(from_token) if from_token == current_token: - defer.returnValue(([], current_token)) + return ([], current_token) if self._LIMITED: rows = yield self.update_function( @@ -198,7 +198,7 @@ def get_updates_since(self, from_token): if self._LIMITED and len(updates) >= MAX_EVENTS_BEHIND: raise Exception("stream %s has fallen behind" % (self.NAME)) - defer.returnValue((updates, current_token)) + return (updates, current_token) def current_token(self): """Gets the current token of the underlying streams. Should be provided @@ -297,7 +297,7 @@ def current_token(self): @defer.inlineCallbacks def update_function(self, from_token, to_token, limit): rows = yield self.store.get_all_push_rule_updates(from_token, to_token, limit) - defer.returnValue([(row[0], row[2]) for row in rows]) + return [(row[0], row[2]) for row in rows] class PushersStream(Stream): @@ -424,7 +424,7 @@ def update_function(self, from_token, to_token, limit): for stream_id, user_id, account_data_type, content in global_results ) - defer.returnValue(results) + return results class GroupServerStream(Stream): diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 3d0694bb1121..d97669c886b2 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -134,7 +134,7 @@ def update_function(self, from_token, current_token, limit=None): all_updates = heapq.merge(event_updates, state_updates) - defer.returnValue(all_updates) + return all_updates @classmethod def parse_row(cls, row): diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 6888ae559002..0a7d9b81b27b 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -84,7 +84,7 @@ def on_GET(self, request, user_id): ret = yield self.handlers.admin_handler.get_users() - defer.returnValue((200, ret)) + return (200, ret) class VersionServlet(RestServlet): @@ -227,7 +227,7 @@ def on_POST(self, request): ) result = yield register._create_registration_details(user_id, body) - defer.returnValue((200, result)) + return (200, result) class WhoisRestServlet(RestServlet): @@ -252,7 +252,7 @@ def on_GET(self, request, user_id): ret = yield self.handlers.admin_handler.get_whois(target_user) - defer.returnValue((200, ret)) + return (200, ret) class PurgeMediaCacheRestServlet(RestServlet): @@ -271,7 +271,7 @@ def on_POST(self, request): ret = yield self.media_repository.delete_old_remote_media(before_ts) - defer.returnValue((200, ret)) + return (200, ret) class PurgeHistoryRestServlet(RestServlet): @@ -356,7 +356,7 @@ def on_POST(self, request, room_id, event_id): room_id, token, delete_local_events=delete_local_events ) - defer.returnValue((200, {"purge_id": purge_id})) + return (200, {"purge_id": purge_id}) class PurgeHistoryStatusRestServlet(RestServlet): @@ -381,7 +381,7 @@ def on_GET(self, request, purge_id): if purge_status is None: raise NotFoundError("purge id '%s' not found" % purge_id) - defer.returnValue((200, purge_status.asdict())) + return (200, purge_status.asdict()) class DeactivateAccountRestServlet(RestServlet): @@ -413,7 +413,7 @@ def on_POST(self, request, target_user_id): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class ShutdownRoomRestServlet(RestServlet): @@ -531,16 +531,14 @@ def on_POST(self, request, room_id): room_id, new_room_id, requester_user_id ) - defer.returnValue( - ( - 200, - { - "kicked_users": kicked_users, - "failed_to_kick_users": failed_to_kick_users, - "local_aliases": aliases_for_room, - "new_room_id": new_room_id, - }, - ) + return ( + 200, + { + "kicked_users": kicked_users, + "failed_to_kick_users": failed_to_kick_users, + "local_aliases": aliases_for_room, + "new_room_id": new_room_id, + }, ) @@ -564,7 +562,7 @@ def on_POST(self, request, room_id): room_id, requester.user.to_string() ) - defer.returnValue((200, {"num_quarantined": num_quarantined})) + return (200, {"num_quarantined": num_quarantined}) class ListMediaInRoom(RestServlet): @@ -585,7 +583,7 @@ def on_GET(self, request, room_id): local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id) - defer.returnValue((200, {"local": local_mxcs, "remote": remote_mxcs})) + return (200, {"local": local_mxcs, "remote": remote_mxcs}) class ResetPasswordRestServlet(RestServlet): @@ -629,7 +627,7 @@ def on_POST(self, request, target_user_id): yield self._set_password_handler.set_password( target_user_id, new_password, requester ) - defer.returnValue((200, {})) + return (200, {}) class GetUsersPaginatedRestServlet(RestServlet): @@ -671,7 +669,7 @@ def on_GET(self, request, target_user_id): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_POST(self, request, target_user_id): @@ -699,7 +697,7 @@ def on_POST(self, request, target_user_id): logger.info("limit: %s, start: %s", limit, start) ret = yield self.handlers.admin_handler.get_users_paginate(order, start, limit) - defer.returnValue((200, ret)) + return (200, ret) class SearchUsersRestServlet(RestServlet): @@ -742,7 +740,7 @@ def on_GET(self, request, target_user_id): logger.info("term: %s ", term) ret = yield self.handlers.admin_handler.search_users(term) - defer.returnValue((200, ret)) + return (200, ret) class DeleteGroupAdminRestServlet(RestServlet): @@ -765,7 +763,7 @@ def on_POST(self, request, group_id): raise SynapseError(400, "Can only delete local groups") yield self.group_server.delete_group(group_id, requester.user.to_string()) - defer.returnValue((200, {})) + return (200, {}) class AccountValidityRenewServlet(RestServlet): @@ -796,7 +794,7 @@ def on_POST(self, request): ) res = {"expiration_ts": expiration_ts} - defer.returnValue((200, res)) + return (200, res) ######################################################################################## diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index ee66838a0d34..90c0ee15dcdb 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -87,7 +87,7 @@ def on_POST(self, request, txn_id=None): event_content=body["content"], ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_PUT(self, request, txn_id): return self.txns.fetch_or_execute_request( diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 57542c2b4b9c..428473802117 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -54,7 +54,7 @@ def on_GET(self, request, room_alias): dir_handler = self.handlers.directory_handler res = yield dir_handler.get_association(room_alias) - defer.returnValue((200, res)) + return (200, res) @defer.inlineCallbacks def on_PUT(self, request, room_alias): @@ -87,7 +87,7 @@ def on_PUT(self, request, room_alias): requester, room_alias, room_id, servers ) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, room_alias): @@ -102,7 +102,7 @@ def on_DELETE(self, request, room_alias): service.url, room_alias.to_string(), ) - defer.returnValue((200, {})) + return (200, {}) except InvalidClientCredentialsError: # fallback to default user behaviour if they aren't an AS pass @@ -118,7 +118,7 @@ def on_DELETE(self, request, room_alias): "User %s deleted alias %s", user.to_string(), room_alias.to_string() ) - defer.returnValue((200, {})) + return (200, {}) class ClientDirectoryListServer(RestServlet): @@ -136,9 +136,7 @@ def on_GET(self, request, room_id): if room is None: raise NotFoundError("Unknown room") - defer.returnValue( - (200, {"visibility": "public" if room["is_public"] else "private"}) - ) + return (200, {"visibility": "public" if room["is_public"] else "private"}) @defer.inlineCallbacks def on_PUT(self, request, room_id): @@ -151,7 +149,7 @@ def on_PUT(self, request, room_id): requester, room_id, visibility ) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, room_id): @@ -161,7 +159,7 @@ def on_DELETE(self, request, room_id): requester, room_id, "private" ) - defer.returnValue((200, {})) + return (200, {}) class ClientAppserviceDirectoryListServer(RestServlet): @@ -195,4 +193,4 @@ def _edit(self, request, network_id, room_id, visibility): requester.app_service.id, network_id, room_id, visibility ) - defer.returnValue((200, {})) + return (200, {}) diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index d6de2b73604b..53ebed22030d 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -67,7 +67,7 @@ def on_GET(self, request): is_guest=is_guest, ) - defer.returnValue((200, chunk)) + return (200, chunk) def on_OPTIONS(self, request): return (200, {}) @@ -91,9 +91,9 @@ def on_GET(self, request, event_id): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - defer.returnValue((200, event)) + return (200, event) else: - defer.returnValue((404, "Event not found.")) + return (404, "Event not found.") def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 0fe5f2d79bd6..70b8478e90b9 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -42,7 +42,7 @@ def on_GET(self, request): include_archived=include_archived, ) - defer.returnValue((200, content)) + return (200, content) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 0d05945f0aa6..5762b9fd0648 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -152,7 +152,7 @@ def on_POST(self, request): well_known_data = self._well_known_builder.get_well_known() if well_known_data: result["well_known"] = well_known_data - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def _do_other_login(self, login_submission): @@ -212,7 +212,7 @@ def _do_other_login(self, login_submission): result = yield self._register_device_with_callback( canonical_user_id, login_submission, callback_3pid ) - defer.returnValue(result) + return result # No password providers were able to handle this 3pid # Check local store @@ -241,7 +241,7 @@ def _do_other_login(self, login_submission): result = yield self._register_device_with_callback( canonical_user_id, login_submission, callback ) - defer.returnValue(result) + return result @defer.inlineCallbacks def _register_device_with_callback(self, user_id, login_submission, callback=None): @@ -273,7 +273,7 @@ def _register_device_with_callback(self, user_id, login_submission, callback=Non if callback is not None: yield callback(result) - defer.returnValue(result) + return result @defer.inlineCallbacks def do_token_login(self, login_submission): @@ -284,7 +284,7 @@ def do_token_login(self, login_submission): ) result = yield self._register_device_with_callback(user_id, login_submission) - defer.returnValue(result) + return result @defer.inlineCallbacks def do_jwt_login(self, login_submission): @@ -321,7 +321,7 @@ def do_jwt_login(self, login_submission): result = yield self._register_device_with_callback( registered_user_id, login_submission ) - defer.returnValue(result) + return result class BaseSSORedirectServlet(RestServlet): @@ -395,7 +395,7 @@ def on_GET(self, request): # even if that's being used old-http style to signal end-of-data body = pde.response result = yield self.handle_cas_response(request, body, client_redirect_url) - defer.returnValue(result) + return result def handle_cas_response(self, request, cas_response_body, client_redirect_url): user, attributes = self.parse_cas_response(cas_response_body) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index cd711be5190e..2769f3a1899c 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -49,7 +49,7 @@ def on_POST(self, request): requester.user.to_string(), requester.device_id ) - defer.returnValue((200, {})) + return (200, {}) class LogoutAllRestServlet(RestServlet): @@ -75,7 +75,7 @@ def on_POST(self, request): # .. and then delete any access tokens which weren't associated with # devices. yield self._auth_handler.delete_access_tokens_for_user(user_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 3e87f0fdb397..1eb1068c98eb 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -56,7 +56,7 @@ def on_GET(self, request, user_id): state = yield self.presence_handler.get_state(target_user=user) state = format_user_presence_state(state, self.clock.time_msec()) - defer.returnValue((200, state)) + return (200, state) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -88,7 +88,7 @@ def on_PUT(self, request, user_id): if self.hs.config.use_presence: yield self.presence_handler.set_state(user, state) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request): return (200, {}) diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 4d8ab1f47e9e..2657ae45bb98 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -48,7 +48,7 @@ def on_GET(self, request, user_id): if displayname is not None: ret["displayname"] = displayname - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -61,11 +61,11 @@ def on_PUT(self, request, user_id): try: new_name = content["displayname"] except Exception: - defer.returnValue((400, "Unable to parse name")) + return (400, "Unable to parse name") yield self.profile_handler.set_displayname(user, requester, new_name, is_admin) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request, user_id): return (200, {}) @@ -98,7 +98,7 @@ def on_GET(self, request, user_id): if avatar_url is not None: ret["avatar_url"] = avatar_url - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def on_PUT(self, request, user_id): @@ -110,11 +110,11 @@ def on_PUT(self, request, user_id): try: new_name = content["avatar_url"] except Exception: - defer.returnValue((400, "Unable to parse name")) + return (400, "Unable to parse name") yield self.profile_handler.set_avatar_url(user, requester, new_name, is_admin) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, request, user_id): return (200, {}) @@ -150,7 +150,7 @@ def on_GET(self, request, user_id): if avatar_url is not None: ret["avatar_url"] = avatar_url - defer.returnValue((200, ret)) + return (200, ret) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index e635efb420c5..c3ae8b98a809 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -69,7 +69,7 @@ def on_PUT(self, request, path): if "attr" in spec: yield self.set_rule_attr(user_id, spec, content) self.notify_user(user_id) - defer.returnValue((200, {})) + return (200, {}) if spec["rule_id"].startswith("."): # Rule ids starting with '.' are reserved for server default rules. @@ -106,7 +106,7 @@ def on_PUT(self, request, path): except RuleNotFoundException as e: raise SynapseError(400, str(e)) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, path): @@ -123,7 +123,7 @@ def on_DELETE(self, request, path): try: yield self.store.delete_push_rule(user_id, namespaced_rule_id) self.notify_user(user_id) - defer.returnValue((200, {})) + return (200, {}) except StoreError as e: if e.code == 404: raise NotFoundError() @@ -151,10 +151,10 @@ def on_GET(self, request, path): ) if path[0] == "": - defer.returnValue((200, rules)) + return (200, rules) elif path[0] == "global": result = _filter_ruleset_with_path(rules["global"], path[1:]) - defer.returnValue((200, result)) + return (200, result) else: raise UnrecognizedRequestError() diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index e9246018df71..ebc3dec516af 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -62,7 +62,7 @@ def on_GET(self, request): if k not in allowed_keys: del p[k] - defer.returnValue((200, {"pushers": pushers})) + return (200, {"pushers": pushers}) def on_OPTIONS(self, _): return 200, {} @@ -94,7 +94,7 @@ def on_POST(self, request): yield self.pusher_pool.remove_pusher( content["app_id"], content["pushkey"], user_id=user.to_string() ) - defer.returnValue((200, {})) + return (200, {}) assert_params_in_dict( content, @@ -143,7 +143,7 @@ def on_POST(self, request): self.notifier.on_new_replication_data() - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, _): return 200, {} @@ -190,7 +190,7 @@ def on_GET(self, request): ) request.write(PushersRemoveRestServlet.SUCCESS_HTML) finish_request(request) - defer.returnValue(None) + return None def on_OPTIONS(self, _): return 200, {} diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7709c2d705c2..012e7a44a630 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -85,7 +85,7 @@ def on_POST(self, request): requester, self.get_room_config(request) ) - defer.returnValue((200, info)) + return (200, info) def get_room_config(self, request): user_supplied_config = parse_json_object_from_request(request) @@ -155,9 +155,9 @@ def on_GET(self, request, room_id, event_type, state_key): if format == "event": event = format_event_for_client_v2(data.get_dict()) - defer.returnValue((200, event)) + return (200, event) elif format == "content": - defer.returnValue((200, data.get_dict()["content"])) + return (200, data.get_dict()["content"]) @defer.inlineCallbacks def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): @@ -192,7 +192,7 @@ def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): ret = {} if event: ret = {"event_id": event.event_id} - defer.returnValue((200, ret)) + return (200, ret) # TODO: Needs unit testing for generic events + feedback @@ -226,7 +226,7 @@ def on_POST(self, request, room_id, event_type, txn_id=None): requester, event_dict, txn_id=txn_id ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_GET(self, request, room_id, event_type, txn_id): return (200, "Not implemented") @@ -289,7 +289,7 @@ def on_POST(self, request, room_identifier, txn_id=None): third_party_signed=content.get("third_party_signed", None), ) - defer.returnValue((200, {"room_id": room_id})) + return (200, {"room_id": room_id}) def on_PUT(self, request, room_identifier, txn_id): return self.txns.fetch_or_execute_request( @@ -342,7 +342,7 @@ def on_GET(self, request): limit=limit, since_token=since_token ) - defer.returnValue((200, data)) + return (200, data) @defer.inlineCallbacks def on_POST(self, request): @@ -387,7 +387,7 @@ def on_POST(self, request): network_tuple=network_tuple, ) - defer.returnValue((200, data)) + return (200, data) # TODO: Needs unit testing @@ -438,7 +438,7 @@ def on_GET(self, request, room_id): continue chunk.append(event) - defer.returnValue((200, {"chunk": chunk})) + return (200, {"chunk": chunk}) # deprecated in favour of /members?membership=join? @@ -459,7 +459,7 @@ def on_GET(self, request, room_id): requester, room_id ) - defer.returnValue((200, {"joined": users_with_profile})) + return (200, {"joined": users_with_profile}) # TODO: Needs better unit testing @@ -492,7 +492,7 @@ def on_GET(self, request, room_id): event_filter=event_filter, ) - defer.returnValue((200, msgs)) + return (200, msgs) # TODO: Needs unit testing @@ -513,7 +513,7 @@ def on_GET(self, request, room_id): user_id=requester.user.to_string(), is_guest=requester.is_guest, ) - defer.returnValue((200, events)) + return (200, events) # TODO: Needs unit testing @@ -532,7 +532,7 @@ def on_GET(self, request, room_id): content = yield self.initial_sync_handler.room_initial_sync( room_id=room_id, requester=requester, pagin_config=pagination_config ) - defer.returnValue((200, content)) + return (200, content) class RoomEventServlet(RestServlet): @@ -555,9 +555,9 @@ def on_GET(self, request, room_id, event_id): time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) - defer.returnValue((200, event)) + return (200, event) else: - defer.returnValue((404, "Event not found.")) + return (404, "Event not found.") class RoomEventContextServlet(RestServlet): @@ -607,7 +607,7 @@ def on_GET(self, request, room_id, event_id): results["state"], time_now ) - defer.returnValue((200, results)) + return (200, results) class RoomForgetRestServlet(TransactionRestServlet): @@ -626,7 +626,7 @@ def on_POST(self, request, room_id, txn_id=None): yield self.room_member_handler.forget(user=requester.user, room_id=room_id) - defer.returnValue((200, {})) + return (200, {}) def on_PUT(self, request, room_id, txn_id): return self.txns.fetch_or_execute_request( @@ -676,7 +676,7 @@ def on_POST(self, request, room_id, membership_action, txn_id=None): requester, txn_id, ) - defer.returnValue((200, {})) + return (200, {}) return target = requester.user @@ -703,7 +703,7 @@ def on_POST(self, request, room_id, membership_action, txn_id=None): if membership_action == "join": return_value["room_id"] = room_id - defer.returnValue((200, return_value)) + return (200, return_value) def _has_3pid_invite_keys(self, content): for key in {"id_server", "medium", "address"}: @@ -745,7 +745,7 @@ def on_POST(self, request, room_id, event_id, txn_id=None): txn_id=txn_id, ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) def on_PUT(self, request, room_id, event_id, txn_id): return self.txns.fetch_or_execute_request( @@ -790,7 +790,7 @@ def on_PUT(self, request, room_id, user_id): target_user=target_user, auth_user=requester.user, room_id=room_id ) - defer.returnValue((200, {})) + return (200, {}) class SearchRestServlet(RestServlet): @@ -812,7 +812,7 @@ def on_POST(self, request): requester.user, content, batch ) - defer.returnValue((200, results)) + return (200, results) class JoinedRoomsRestServlet(RestServlet): @@ -828,7 +828,7 @@ def on_GET(self, request): requester = yield self.auth.get_user_by_req(request, allow_guest=True) room_ids = yield self.store.get_rooms_for_user(requester.user.to_string()) - defer.returnValue((200, {"joined_rooms": list(room_ids)})) + return (200, {"joined_rooms": list(room_ids)}) def register_txn_path(servlet, regex_string, http_server, with_get=False): diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 41b3171ac828..497cddf8b8d6 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -60,18 +60,16 @@ def on_GET(self, request): password = turnPassword else: - defer.returnValue((200, {})) - - defer.returnValue( - ( - 200, - { - "username": username, - "password": password, - "ttl": userLifetime / 1000, - "uris": turnUris, - }, - ) + return (200, {}) + + return ( + 200, + { + "username": username, + "password": password, + "ttl": userLifetime / 1000, + "uris": turnUris, + }, ) def on_OPTIONS(self, request): diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index f143d8b85cfb..7ac456812a12 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -117,7 +117,7 @@ def on_POST(self, request): # Wrap the session id in a JSON object ret = {"sid": sid} - defer.returnValue((200, ret)) + return (200, ret) @defer.inlineCallbacks def send_password_reset(self, email, client_secret, send_attempt, next_link=None): @@ -149,7 +149,7 @@ def send_password_reset(self, email, client_secret, send_attempt, next_link=None # Check that the send_attempt is higher than previous attempts if send_attempt <= last_send_attempt: # If not, just return a success without sending an email - defer.returnValue(session_id) + return session_id else: # An non-validated session does not exist yet. # Generate a session id @@ -185,7 +185,7 @@ def send_password_reset(self, email, client_secret, send_attempt, next_link=None token_expires, ) - defer.returnValue(session_id) + return session_id class MsisdnPasswordRequestTokenRestServlet(RestServlet): @@ -221,7 +221,7 @@ def on_POST(self, request): raise SynapseError(400, "MSISDN not found", Codes.THREEPID_NOT_FOUND) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class PasswordResetSubmitTokenServlet(RestServlet): @@ -279,7 +279,7 @@ def on_GET(self, request, medium): request.setResponseCode(302) request.setHeader("Location", next_link) finish_request(request) - defer.returnValue(None) + return None # Otherwise show the success template html = self.config.email_password_reset_success_html_content @@ -295,7 +295,7 @@ def on_GET(self, request, medium): request.write(html.encode("utf-8")) finish_request(request) - defer.returnValue(None) + return None def load_jinja2_template(self, template_dir, template_filename, template_vars): """Loads a jinja2 template with variables to insert @@ -330,7 +330,7 @@ def on_POST(self, request, medium): ) response_code = 200 if valid else 400 - defer.returnValue((response_code, {"success": valid})) + return (response_code, {"success": valid}) class PasswordRestServlet(RestServlet): @@ -399,7 +399,7 @@ def on_POST(self, request): yield self._set_password_handler.set_password(user_id, new_password, requester) - defer.returnValue((200, {})) + return (200, {}) def on_OPTIONS(self, _): return 200, {} @@ -434,7 +434,7 @@ def on_POST(self, request): yield self._deactivate_account_handler.deactivate_account( requester.user.to_string(), erase ) - defer.returnValue((200, {})) + return (200, {}) yield self.auth_handler.validate_user_via_ui_auth( requester, body, self.hs.get_ip_from_request(request) @@ -447,7 +447,7 @@ def on_POST(self, request): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class EmailThreepidRequestTokenRestServlet(RestServlet): @@ -481,7 +481,7 @@ def on_POST(self, request): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class MsisdnThreepidRequestTokenRestServlet(RestServlet): @@ -516,7 +516,7 @@ def on_POST(self, request): raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class ThreepidRestServlet(RestServlet): @@ -536,7 +536,7 @@ def on_GET(self, request): threepids = yield self.datastore.user_get_threepids(requester.user.to_string()) - defer.returnValue((200, {"threepids": threepids})) + return (200, {"threepids": threepids}) @defer.inlineCallbacks def on_POST(self, request): @@ -568,7 +568,7 @@ def on_POST(self, request): logger.debug("Binding threepid %s to %s", threepid, user_id) yield self.identity_handler.bind_threepid(threePidCreds, user_id) - defer.returnValue((200, {})) + return (200, {}) class ThreepidDeleteRestServlet(RestServlet): @@ -603,7 +603,7 @@ def on_POST(self, request): else: id_server_unbind_result = "no-support" - defer.returnValue((200, {"id_server_unbind_result": id_server_unbind_result})) + return (200, {"id_server_unbind_result": id_server_unbind_result}) class WhoamiRestServlet(RestServlet): @@ -617,7 +617,7 @@ def __init__(self, hs): def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) - defer.returnValue((200, {"user_id": requester.user.to_string()})) + return (200, {"user_id": requester.user.to_string()}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index f155c26259d8..98f2f6f4b52e 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -55,7 +55,7 @@ def on_PUT(self, request, user_id, account_data_type): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, user_id, account_data_type): @@ -70,7 +70,7 @@ def on_GET(self, request, user_id, account_data_type): if event is None: raise NotFoundError("Account data not found") - defer.returnValue((200, event)) + return (200, event) class RoomAccountDataServlet(RestServlet): @@ -112,7 +112,7 @@ def on_PUT(self, request, user_id, room_id, account_data_type): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, user_id, room_id, account_data_type): @@ -127,7 +127,7 @@ def on_GET(self, request, user_id, room_id, account_data_type): if event is None: raise NotFoundError("Room account data not found") - defer.returnValue((200, event)) + return (200, event) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index d29c10b83d32..133c61900a5d 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -58,7 +58,7 @@ def on_GET(self, request): ) request.write(AccountValidityRenewServlet.SUCCESS_HTML) finish_request(request) - defer.returnValue(None) + return None class AccountValiditySendMailServlet(RestServlet): @@ -87,7 +87,7 @@ def on_POST(self, request): user_id = requester.user.to_string() yield self.account_activity_handler.send_renewal_email_to_user(user_id) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index bebc2951e7d0..f21aff39e596 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -207,7 +207,7 @@ def on_POST(self, request, stagetype): request.write(html_bytes) finish_request(request) - defer.returnValue(None) + return None elif stagetype == LoginType.TERMS: if ("session" not in request.args or len(request.args["session"])) == 0: raise SynapseError(400, "No session supplied") @@ -239,7 +239,7 @@ def on_POST(self, request, stagetype): request.write(html_bytes) finish_request(request) - defer.returnValue(None) + return None else: raise SynapseError(404, "Unknown auth stage type") diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index fc7e2f4dd56b..a4fa45fe1126 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -58,7 +58,7 @@ def on_GET(self, request): "m.change_password": {"enabled": change_password}, } } - defer.returnValue((200, response)) + return (200, response) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index d279229d74e7..9adf76cc0ce6 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -48,7 +48,7 @@ def on_GET(self, request): devices = yield self.device_handler.get_devices_by_user( requester.user.to_string() ) - defer.returnValue((200, {"devices": devices})) + return (200, {"devices": devices}) class DeleteDevicesRestServlet(RestServlet): @@ -91,7 +91,7 @@ def on_POST(self, request): yield self.device_handler.delete_devices( requester.user.to_string(), body["devices"] ) - defer.returnValue((200, {})) + return (200, {}) class DeviceRestServlet(RestServlet): @@ -114,7 +114,7 @@ def on_GET(self, request, device_id): device = yield self.device_handler.get_device( requester.user.to_string(), device_id ) - defer.returnValue((200, device)) + return (200, device) @interactive_auth_handler @defer.inlineCallbacks @@ -137,7 +137,7 @@ def on_DELETE(self, request, device_id): ) yield self.device_handler.delete_device(requester.user.to_string(), device_id) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_PUT(self, request, device_id): @@ -147,7 +147,7 @@ def on_PUT(self, request, device_id): yield self.device_handler.update_device( requester.user.to_string(), device_id, body ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 3f0adf4a21cf..22be0ee3c55b 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -56,7 +56,7 @@ def on_GET(self, request, user_id, filter_id): user_localpart=target_user.localpart, filter_id=filter_id ) - defer.returnValue((200, filter.get_filter_json())) + return (200, filter.get_filter_json()) except (KeyError, StoreError): raise SynapseError(400, "No such filter", errcode=Codes.NOT_FOUND) @@ -89,7 +89,7 @@ def on_POST(self, request, user_id): user_localpart=target_user.localpart, user_filter=content ) - defer.returnValue((200, {"filter_id": str(filter_id)})) + return (200, {"filter_id": str(filter_id)}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index a312dd259389..e629c4256d6c 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -47,7 +47,7 @@ def on_GET(self, request, group_id): group_id, requester_user_id ) - defer.returnValue((200, group_description)) + return (200, group_description) @defer.inlineCallbacks def on_POST(self, request, group_id): @@ -59,7 +59,7 @@ def on_POST(self, request, group_id): group_id, requester_user_id, content ) - defer.returnValue((200, {})) + return (200, {}) class GroupSummaryServlet(RestServlet): @@ -83,7 +83,7 @@ def on_GET(self, request, group_id): group_id, requester_user_id ) - defer.returnValue((200, get_group_summary)) + return (200, get_group_summary) class GroupSummaryRoomsCatServlet(RestServlet): @@ -120,7 +120,7 @@ def on_PUT(self, request, group_id, category_id, room_id): content=content, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id, room_id): @@ -131,7 +131,7 @@ def on_DELETE(self, request, group_id, category_id, room_id): group_id, requester_user_id, room_id=room_id, category_id=category_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupCategoryServlet(RestServlet): @@ -157,7 +157,7 @@ def on_GET(self, request, group_id, category_id): group_id, requester_user_id, category_id=category_id ) - defer.returnValue((200, category)) + return (200, category) @defer.inlineCallbacks def on_PUT(self, request, group_id, category_id): @@ -169,7 +169,7 @@ def on_PUT(self, request, group_id, category_id): group_id, requester_user_id, category_id=category_id, content=content ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, category_id): @@ -180,7 +180,7 @@ def on_DELETE(self, request, group_id, category_id): group_id, requester_user_id, category_id=category_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupCategoriesServlet(RestServlet): @@ -204,7 +204,7 @@ def on_GET(self, request, group_id): group_id, requester_user_id ) - defer.returnValue((200, category)) + return (200, category) class GroupRoleServlet(RestServlet): @@ -228,7 +228,7 @@ def on_GET(self, request, group_id, role_id): group_id, requester_user_id, role_id=role_id ) - defer.returnValue((200, category)) + return (200, category) @defer.inlineCallbacks def on_PUT(self, request, group_id, role_id): @@ -240,7 +240,7 @@ def on_PUT(self, request, group_id, role_id): group_id, requester_user_id, role_id=role_id, content=content ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id): @@ -251,7 +251,7 @@ def on_DELETE(self, request, group_id, role_id): group_id, requester_user_id, role_id=role_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupRolesServlet(RestServlet): @@ -275,7 +275,7 @@ def on_GET(self, request, group_id): group_id, requester_user_id ) - defer.returnValue((200, category)) + return (200, category) class GroupSummaryUsersRoleServlet(RestServlet): @@ -312,7 +312,7 @@ def on_PUT(self, request, group_id, role_id, user_id): content=content, ) - defer.returnValue((200, resp)) + return (200, resp) @defer.inlineCallbacks def on_DELETE(self, request, group_id, role_id, user_id): @@ -323,7 +323,7 @@ def on_DELETE(self, request, group_id, role_id, user_id): group_id, requester_user_id, user_id=user_id, role_id=role_id ) - defer.returnValue((200, resp)) + return (200, resp) class GroupRoomServlet(RestServlet): @@ -347,7 +347,7 @@ def on_GET(self, request, group_id): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupUsersServlet(RestServlet): @@ -371,7 +371,7 @@ def on_GET(self, request, group_id): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupInvitedUsersServlet(RestServlet): @@ -395,7 +395,7 @@ def on_GET(self, request, group_id): group_id, requester_user_id ) - defer.returnValue((200, result)) + return (200, result) class GroupSettingJoinPolicyServlet(RestServlet): @@ -420,7 +420,7 @@ def on_PUT(self, request, group_id): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupCreateServlet(RestServlet): @@ -450,7 +450,7 @@ def on_POST(self, request): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminRoomsServlet(RestServlet): @@ -477,7 +477,7 @@ def on_PUT(self, request, group_id, room_id): group_id, requester_user_id, room_id, content ) - defer.returnValue((200, result)) + return (200, result) @defer.inlineCallbacks def on_DELETE(self, request, group_id, room_id): @@ -488,7 +488,7 @@ def on_DELETE(self, request, group_id, room_id): group_id, requester_user_id, room_id ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminRoomsConfigServlet(RestServlet): @@ -516,7 +516,7 @@ def on_PUT(self, request, group_id, room_id, config_key): group_id, requester_user_id, room_id, config_key, content ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminUsersInviteServlet(RestServlet): @@ -546,7 +546,7 @@ def on_PUT(self, request, group_id, user_id): group_id, user_id, requester_user_id, config ) - defer.returnValue((200, result)) + return (200, result) class GroupAdminUsersKickServlet(RestServlet): @@ -573,7 +573,7 @@ def on_PUT(self, request, group_id, user_id): group_id, user_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfLeaveServlet(RestServlet): @@ -598,7 +598,7 @@ def on_PUT(self, request, group_id): group_id, requester_user_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfJoinServlet(RestServlet): @@ -623,7 +623,7 @@ def on_PUT(self, request, group_id): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfAcceptInviteServlet(RestServlet): @@ -648,7 +648,7 @@ def on_PUT(self, request, group_id): group_id, requester_user_id, content ) - defer.returnValue((200, result)) + return (200, result) class GroupSelfUpdatePublicityServlet(RestServlet): @@ -672,7 +672,7 @@ def on_PUT(self, request, group_id): publicise = content["publicise"] yield self.store.update_group_publicity(group_id, requester_user_id, publicise) - defer.returnValue((200, {})) + return (200, {}) class PublicisedGroupsForUserServlet(RestServlet): @@ -694,7 +694,7 @@ def on_GET(self, request, user_id): result = yield self.groups_handler.get_publicised_groups_for_user(user_id) - defer.returnValue((200, result)) + return (200, result) class PublicisedGroupsForUsersServlet(RestServlet): @@ -719,7 +719,7 @@ def on_POST(self, request): result = yield self.groups_handler.bulk_get_publicised_groups(user_ids) - defer.returnValue((200, result)) + return (200, result) class GroupsForUserServlet(RestServlet): @@ -741,7 +741,7 @@ def on_GET(self, request): result = yield self.groups_handler.get_joined_groups(requester_user_id) - defer.returnValue((200, result)) + return (200, result) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 45c9928b6510..6008adec7cf3 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -95,7 +95,7 @@ def on_POST(self, request, device_id): result = yield self.e2e_keys_handler.upload_keys_for_user( user_id, device_id, body ) - defer.returnValue((200, result)) + return (200, result) class KeyQueryServlet(RestServlet): @@ -149,7 +149,7 @@ def on_POST(self, request): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.query_devices(body, timeout) - defer.returnValue((200, result)) + return (200, result) class KeyChangesServlet(RestServlet): @@ -189,7 +189,7 @@ def on_GET(self, request): results = yield self.device_handler.get_user_ids_changed(user_id, from_token) - defer.returnValue((200, results)) + return (200, results) class OneTimeKeyServlet(RestServlet): @@ -224,7 +224,7 @@ def on_POST(self, request): timeout = parse_integer(request, "timeout", 10 * 1000) body = parse_json_object_from_request(request) result = yield self.e2e_keys_handler.claim_one_time_keys(body, timeout) - defer.returnValue((200, result)) + return (200, result) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 728a52328f18..d034863a3c4f 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -88,9 +88,7 @@ def on_GET(self, request): returned_push_actions.append(returned_pa) next_token = str(pa["stream_ordering"]) - defer.returnValue( - (200, {"notifications": returned_push_actions, "next_token": next_token}) - ) + return (200, {"notifications": returned_push_actions, "next_token": next_token}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index b1b5385b09ef..b4925c0f5933 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -83,16 +83,14 @@ def on_POST(self, request, user_id): yield self.store.insert_open_id_token(token, ts_valid_until_ms, user_id) - defer.returnValue( - ( - 200, - { - "access_token": token, - "token_type": "Bearer", - "matrix_server_name": self.server_name, - "expires_in": self.EXPIRES_MS / 1000, - }, - ) + return ( + 200, + { + "access_token": token, + "token_type": "Bearer", + "matrix_server_name": self.server_name, + "expires_in": self.EXPIRES_MS / 1000, + }, ) diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index e75664279b51..d93d6a9f242d 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -59,7 +59,7 @@ def on_POST(self, request, room_id): event_id=read_marker_event_id, ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 488905626a0f..98a97b70593b 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -52,7 +52,7 @@ def on_POST(self, request, room_id, receipt_type, event_id): room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index f327999e5990..05ea1459e356 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -95,7 +95,7 @@ def on_POST(self, request): raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) ret = yield self.identity_handler.requestEmailToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class MsisdnRegisterRequestTokenRestServlet(RestServlet): @@ -138,7 +138,7 @@ def on_POST(self, request): ) ret = yield self.identity_handler.requestMsisdnToken(**body) - defer.returnValue((200, ret)) + return (200, ret) class UsernameAvailabilityRestServlet(RestServlet): @@ -178,7 +178,7 @@ def on_GET(self, request): yield self.registration_handler.check_username(username) - defer.returnValue((200, {"available": True})) + return (200, {"available": True}) class RegisterRestServlet(RestServlet): @@ -230,7 +230,7 @@ def on_POST(self, request): if kind == b"guest": ret = yield self._do_guest_registration(body, address=client_addr) - defer.returnValue(ret) + return ret return elif kind != b"user": raise UnrecognizedRequestError( @@ -282,7 +282,7 @@ def on_POST(self, request): result = yield self._do_appservice_registration( desired_username, access_token, body ) - defer.returnValue((200, result)) # we throw for non 200 responses + return (200, result) # we throw for non 200 responses return # for either shared secret or regular registration, downcase the @@ -301,7 +301,7 @@ def on_POST(self, request): result = yield self._do_shared_secret_registration( desired_username, desired_password, body ) - defer.returnValue((200, result)) # we throw for non 200 responses + return (200, result) # we throw for non 200 responses return # == Normal User Registration == (everyone else) @@ -500,7 +500,7 @@ def on_POST(self, request): bind_msisdn=params.get("bind_msisdn"), ) - defer.returnValue((200, return_dict)) + return (200, return_dict) def on_OPTIONS(self, _): return 200, {} @@ -510,7 +510,7 @@ def _do_appservice_registration(self, username, as_token, body): user_id = yield self.registration_handler.appservice_register( username, as_token ) - defer.returnValue((yield self._create_registration_details(user_id, body))) + return (yield self._create_registration_details(user_id, body)) @defer.inlineCallbacks def _do_shared_secret_registration(self, username, password, body): @@ -546,7 +546,7 @@ def _do_shared_secret_registration(self, username, password, body): ) result = yield self._create_registration_details(user_id, body) - defer.returnValue(result) + return result @defer.inlineCallbacks def _create_registration_details(self, user_id, params): @@ -570,7 +570,7 @@ def _create_registration_details(self, user_id, params): ) result.update({"access_token": access_token, "device_id": device_id}) - defer.returnValue(result) + return result @defer.inlineCallbacks def _do_guest_registration(self, params, address=None): @@ -588,16 +588,14 @@ def _do_guest_registration(self, params, address=None): user_id, device_id, initial_display_name, is_guest=True ) - defer.returnValue( - ( - 200, - { - "user_id": user_id, - "device_id": device_id, - "access_token": access_token, - "home_server": self.hs.hostname, - }, - ) + return ( + 200, + { + "user_id": user_id, + "device_id": device_id, + "access_token": access_token, + "home_server": self.hs.hostname, + }, ) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 6e52f6d284f7..6fde3decdbb4 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -116,7 +116,7 @@ def on_PUT_or_POST( requester, event_dict=event_dict, txn_id=txn_id ) - defer.returnValue((200, {"event_id": event.event_id})) + return (200, {"event_id": event.event_id}) class RelationPaginationServlet(RestServlet): @@ -196,7 +196,7 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non return_value["chunk"] = events return_value["original_event"] = original_event - defer.returnValue((200, return_value)) + return (200, return_value) class RelationAggregationPaginationServlet(RestServlet): @@ -268,7 +268,7 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non to_token=to_token, ) - defer.returnValue((200, pagination_chunk.to_dict())) + return (200, pagination_chunk.to_dict()) class RelationAggregationGroupPaginationServlet(RestServlet): @@ -354,7 +354,7 @@ def on_GET(self, request, room_id, parent_id, relation_type, event_type, key): return_value = result.to_dict() return_value["chunk"] = events - defer.returnValue((200, return_value)) + return (200, return_value) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index e7578af8040a..3fdd4584a361 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -72,7 +72,7 @@ def on_POST(self, request, room_id, event_id): received_ts=self.clock.time_msec(), ) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 8d1b810565ee..10dec96208b6 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -135,7 +135,7 @@ def on_PUT(self, request, room_id, session_id): body = {"rooms": {room_id: body}} yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): @@ -218,7 +218,7 @@ def on_GET(self, request, room_id, session_id): else: room_keys = room_keys["rooms"][room_id] - defer.returnValue((200, room_keys)) + return (200, room_keys) @defer.inlineCallbacks def on_DELETE(self, request, room_id, session_id): @@ -242,7 +242,7 @@ def on_DELETE(self, request, room_id, session_id): yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id ) - defer.returnValue((200, {})) + return (200, {}) class RoomKeysNewVersionServlet(RestServlet): @@ -293,7 +293,7 @@ def on_POST(self, request): info = parse_json_object_from_request(request) new_version = yield self.e2e_room_keys_handler.create_version(user_id, info) - defer.returnValue((200, {"version": new_version})) + return (200, {"version": new_version}) # we deliberately don't have a PUT /version, as these things really should # be immutable to avoid people footgunning @@ -338,7 +338,7 @@ def on_GET(self, request, version): except SynapseError as e: if e.code == 404: raise SynapseError(404, "No backup found", Codes.NOT_FOUND) - defer.returnValue((200, info)) + return (200, info) @defer.inlineCallbacks def on_DELETE(self, request, version): @@ -358,7 +358,7 @@ def on_DELETE(self, request, version): user_id = requester.user.to_string() yield self.e2e_room_keys_handler.delete_version(user_id, version) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_PUT(self, request, version): @@ -392,7 +392,7 @@ def on_PUT(self, request, version): ) yield self.e2e_room_keys_handler.update_version(user_id, version, info) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index d7f7faa029b5..14ba61a63e54 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -80,7 +80,7 @@ def on_POST(self, request, room_id): ret = {"replacement_room": new_room_id} - defer.returnValue((200, ret)) + return (200, ret) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 78075b8fc0f8..2613648d821b 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -60,7 +60,7 @@ def _put(self, request, message_type, txn_id): ) response = (200, {}) - defer.returnValue(response) + return response def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 02d56dee6cfb..7b32dd221209 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -174,7 +174,7 @@ def on_GET(self, request): time_now, sync_result, requester.access_token_id, filter ) - defer.returnValue((200, response_content)) + return (200, response_content) @defer.inlineCallbacks def encode_response(self, time_now, sync_result, access_token_id, filter): @@ -205,27 +205,23 @@ def encode_response(self, time_now, sync_result, access_token_id, filter): event_formatter, ) - defer.returnValue( - { - "account_data": {"events": sync_result.account_data}, - "to_device": {"events": sync_result.to_device}, - "device_lists": { - "changed": list(sync_result.device_lists.changed), - "left": list(sync_result.device_lists.left), - }, - "presence": SyncRestServlet.encode_presence( - sync_result.presence, time_now - ), - "rooms": {"join": joined, "invite": invited, "leave": archived}, - "groups": { - "join": sync_result.groups.join, - "invite": sync_result.groups.invite, - "leave": sync_result.groups.leave, - }, - "device_one_time_keys_count": sync_result.device_one_time_keys_count, - "next_batch": sync_result.next_batch.to_string(), - } - ) + return { + "account_data": {"events": sync_result.account_data}, + "to_device": {"events": sync_result.to_device}, + "device_lists": { + "changed": list(sync_result.device_lists.changed), + "left": list(sync_result.device_lists.left), + }, + "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now), + "rooms": {"join": joined, "invite": invited, "leave": archived}, + "groups": { + "join": sync_result.groups.join, + "invite": sync_result.groups.invite, + "leave": sync_result.groups.leave, + }, + "device_one_time_keys_count": sync_result.device_one_time_keys_count, + "next_batch": sync_result.next_batch.to_string(), + } @staticmethod def encode_presence(events, time_now): @@ -273,7 +269,7 @@ def encode_joined(self, rooms, time_now, token_id, event_fields, event_formatter event_formatter=event_formatter, ) - defer.returnValue(joined) + return joined @defer.inlineCallbacks def encode_invited(self, rooms, time_now, token_id, event_formatter): @@ -309,7 +305,7 @@ def encode_invited(self, rooms, time_now, token_id, event_formatter): invited_state.append(invite) invited[room.room_id] = {"invite_state": {"events": invited_state}} - defer.returnValue(invited) + return invited @defer.inlineCallbacks def encode_archived(self, rooms, time_now, token_id, event_fields, event_formatter): @@ -342,7 +338,7 @@ def encode_archived(self, rooms, time_now, token_id, event_fields, event_formatt event_formatter=event_formatter, ) - defer.returnValue(joined) + return joined @defer.inlineCallbacks def encode_room( @@ -414,7 +410,7 @@ def serialize(events): result["unread_notifications"] = room.unread_notifications result["summary"] = room.summary - defer.returnValue(result) + return result def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 07b6ede6030c..d1735443558c 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -45,7 +45,7 @@ def on_GET(self, request, user_id, room_id): tags = yield self.store.get_tags_for_room(user_id, room_id) - defer.returnValue((200, {"tags": tags})) + return (200, {"tags": tags}) class TagServlet(RestServlet): @@ -76,7 +76,7 @@ def on_PUT(self, request, user_id, room_id, tag): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) @defer.inlineCallbacks def on_DELETE(self, request, user_id, room_id, tag): @@ -88,7 +88,7 @@ def on_DELETE(self, request, user_id, room_id, tag): self.notifier.on_new_event("account_data_key", max_id, users=[user_id]) - defer.returnValue((200, {})) + return (200, {}) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 1e66662a056b..158e686b01d0 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -40,7 +40,7 @@ def on_GET(self, request): yield self.auth.get_user_by_req(request, allow_guest=True) protocols = yield self.appservice_handler.get_3pe_protocols() - defer.returnValue((200, protocols)) + return (200, protocols) class ThirdPartyProtocolServlet(RestServlet): @@ -60,9 +60,9 @@ def on_GET(self, request, protocol): only_protocol=protocol ) if protocol in protocols: - defer.returnValue((200, protocols[protocol])) + return (200, protocols[protocol]) else: - defer.returnValue((404, {"error": "Unknown protocol"})) + return (404, {"error": "Unknown protocol"}) class ThirdPartyUserServlet(RestServlet): @@ -85,7 +85,7 @@ def on_GET(self, request, protocol): ThirdPartyEntityKind.USER, protocol, fields ) - defer.returnValue((200, results)) + return (200, results) class ThirdPartyLocationServlet(RestServlet): @@ -108,7 +108,7 @@ def on_GET(self, request, protocol): ThirdPartyEntityKind.LOCATION, protocol, fields ) - defer.returnValue((200, results)) + return (200, results) def register_servlets(hs, http_server): diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index e19fb6d58385..7ab2b80e46aa 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -60,7 +60,7 @@ def on_POST(self, request): user_id = requester.user.to_string() if not self.hs.config.user_directory_search_enabled: - defer.returnValue((200, {"limited": False, "results": []})) + return (200, {"limited": False, "results": []}) body = parse_json_object_from_request(request) @@ -76,7 +76,7 @@ def on_POST(self, request): user_id, search_term, limit ) - defer.returnValue((200, results)) + return (200, results) def register_servlets(hs, http_server): diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 65afffbb4230..92beefa1766c 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -171,7 +171,7 @@ def create_content( yield self._generate_thumbnails(None, media_id, media_id, media_type) - defer.returnValue("mxc://%s/%s" % (self.server_name, media_id)) + return "mxc://%s/%s" % (self.server_name, media_id) @defer.inlineCallbacks def get_local_media(self, request, media_id, name): @@ -282,7 +282,7 @@ def get_remote_media_info(self, server_name, media_id): with responder: pass - defer.returnValue(media_info) + return media_info @defer.inlineCallbacks def _get_remote_media_impl(self, server_name, media_id): @@ -317,14 +317,14 @@ def _get_remote_media_impl(self, server_name, media_id): responder = yield self.media_storage.fetch_media(file_info) if responder: - defer.returnValue((responder, media_info)) + return (responder, media_info) # Failed to find the file anywhere, lets download it. media_info = yield self._download_remote_file(server_name, media_id, file_id) responder = yield self.media_storage.fetch_media(file_info) - defer.returnValue((responder, media_info)) + return (responder, media_info) @defer.inlineCallbacks def _download_remote_file(self, server_name, media_id, file_id): @@ -421,7 +421,7 @@ def _download_remote_file(self, server_name, media_id, file_id): yield self._generate_thumbnails(server_name, media_id, file_id, media_type) - defer.returnValue(media_info) + return media_info def _get_thumbnail_requirements(self, media_type): return self.thumbnail_requirements.get(media_type, ()) @@ -500,7 +500,7 @@ def generate_local_exact_thumbnail( media_id, t_width, t_height, t_type, t_method, t_len ) - defer.returnValue(output_path) + return output_path @defer.inlineCallbacks def generate_remote_exact_thumbnail( @@ -554,7 +554,7 @@ def generate_remote_exact_thumbnail( t_len, ) - defer.returnValue(output_path) + return output_path @defer.inlineCallbacks def _generate_thumbnails( @@ -667,7 +667,7 @@ def _generate_thumbnails( media_id, t_width, t_height, t_type, t_method, t_len ) - defer.returnValue({"width": m_width, "height": m_height}) + return {"width": m_width, "height": m_height} @defer.inlineCallbacks def delete_old_remote_media(self, before_ts): @@ -704,7 +704,7 @@ def delete_old_remote_media(self, before_ts): yield self.store.delete_remote_media(origin, media_id) deleted += 1 - defer.returnValue({"deleted": deleted}) + return {"deleted": deleted} class MediaRepositoryResource(Resource): diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index 25e5ac284818..3b87717a5aa4 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -69,7 +69,7 @@ def store_file(self, source, file_info): ) yield finish_cb() - defer.returnValue(fname) + return fname @contextlib.contextmanager def store_into_file(self, file_info): @@ -143,14 +143,14 @@ def fetch_media(self, file_info): path = self._file_info_to_path(file_info) local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): - defer.returnValue(FileResponder(open(local_path, "rb"))) + return FileResponder(open(local_path, "rb")) for provider in self.storage_providers: res = yield provider.fetch(path, file_info) if res: - defer.returnValue(res) + return res - defer.returnValue(None) + return None @defer.inlineCallbacks def ensure_media_is_in_local_cache(self, file_info): @@ -166,7 +166,7 @@ def ensure_media_is_in_local_cache(self, file_info): path = self._file_info_to_path(file_info) local_path = os.path.join(self.local_media_directory, path) if os.path.exists(local_path): - defer.returnValue(local_path) + return local_path dirname = os.path.dirname(local_path) if not os.path.exists(dirname): @@ -181,7 +181,7 @@ def ensure_media_is_in_local_cache(self, file_info): ) yield res.write_to_consumer(consumer) yield consumer.wait() - defer.returnValue(local_path) + return local_path raise Exception("file could not be found") diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 5871737bfdfb..bd40891a7f70 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -182,7 +182,7 @@ def _do_preview(self, url, user, ts): og = cache_result["og"] if isinstance(og, six.text_type): og = og.encode("utf8") - defer.returnValue(og) + return og return media_info = yield self._download_url(url, user) @@ -284,7 +284,7 @@ def _do_preview(self, url, user, ts): media_info["created_ts"], ) - defer.returnValue(jsonog) + return jsonog @defer.inlineCallbacks def _download_url(self, url, user): @@ -354,22 +354,20 @@ def _download_url(self, url, user): # therefore not expire it. raise - defer.returnValue( - { - "media_type": media_type, - "media_length": length, - "download_name": download_name, - "created_ts": time_now_ms, - "filesystem_id": file_id, - "filename": fname, - "uri": uri, - "response_code": code, - # FIXME: we should calculate a proper expiration based on the - # Cache-Control and Expire headers. But for now, assume 1 hour. - "expires": 60 * 60 * 1000, - "etag": headers["ETag"][0] if "ETag" in headers else None, - } - ) + return { + "media_type": media_type, + "media_length": length, + "download_name": download_name, + "created_ts": time_now_ms, + "filesystem_id": file_id, + "filename": fname, + "uri": uri, + "response_code": code, + # FIXME: we should calculate a proper expiration based on the + # Cache-Control and Expire headers. But for now, assume 1 hour. + "expires": 60 * 60 * 1000, + "etag": headers["ETag"][0] if "ETag" in headers else None, + } def _start_expire_url_cache_data(self): return run_as_background_process( diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index f183743f3181..729c097e6dd1 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -193,4 +193,4 @@ def _is_room_currently_blocked(self, room_id): if event_id in referenced_events: referenced_events.remove(event.event_id) - defer.returnValue((currently_blocked, referenced_events)) + return (currently_blocked, referenced_events) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 71e7e7532025..2dac90578c71 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -86,7 +86,7 @@ def send_notice( res = yield self._event_creation_handler.create_and_send_nonmember_event( requester, event_dict, ratelimit=False ) - defer.returnValue(res) + return res @cachedInlineCallbacks() def get_notice_room_for_user(self, user_id): @@ -120,7 +120,7 @@ def get_notice_room_for_user(self, user_id): # we found a room which our user shares with the system notice # user logger.info("Using room %s", room.room_id) - defer.returnValue(room.room_id) + return room.room_id # apparently no existing notice room: create a new one logger.info("Creating server notices room for %s", user_id) @@ -158,4 +158,4 @@ def get_notice_room_for_user(self, user_id): self._notifier.on_new_event("account_data_key", max_id, users=[user_id]) logger.info("Created server notices room %s for %s", room_id, user_id) - defer.returnValue(room_id) + return room_id diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9f708fa20546..a0d34f16ea1e 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -135,7 +135,7 @@ def get_current_state( event = None if event_id: event = yield self.store.get_event(event_id, allow_none=True) - defer.returnValue(event) + return event return state_map = yield self.store.get_events( @@ -145,7 +145,7 @@ def get_current_state( key: state_map[e_id] for key, e_id in iteritems(state) if e_id in state_map } - defer.returnValue(state) + return state @defer.inlineCallbacks def get_current_state_ids(self, room_id, latest_event_ids=None): @@ -169,7 +169,7 @@ def get_current_state_ids(self, room_id, latest_event_ids=None): ret = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) state = ret.state - defer.returnValue(state) + return state @defer.inlineCallbacks def get_current_users_in_room(self, room_id, latest_event_ids=None): @@ -189,7 +189,7 @@ def get_current_users_in_room(self, room_id, latest_event_ids=None): logger.debug("calling resolve_state_groups from get_current_users_in_room") entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) joined_users = yield self.store.get_joined_users_from_state(room_id, entry) - defer.returnValue(joined_users) + return joined_users @defer.inlineCallbacks def get_current_hosts_in_room(self, room_id, latest_event_ids=None): @@ -198,7 +198,7 @@ def get_current_hosts_in_room(self, room_id, latest_event_ids=None): logger.debug("calling resolve_state_groups from get_current_hosts_in_room") entry = yield self.resolve_state_groups_for_events(room_id, latest_event_ids) joined_hosts = yield self.store.get_joined_hosts(room_id, entry) - defer.returnValue(joined_hosts) + return joined_hosts @defer.inlineCallbacks def compute_event_context(self, event, old_state=None): @@ -241,7 +241,7 @@ def compute_event_context(self, event, old_state=None): prev_state_ids=prev_state_ids, ) - defer.returnValue(context) + return context if old_state: # We already have the state, so we don't need to calculate it. @@ -275,7 +275,7 @@ def compute_event_context(self, event, old_state=None): prev_state_ids=prev_state_ids, ) - defer.returnValue(context) + return context logger.debug("calling resolve_state_groups from compute_event_context") @@ -343,7 +343,7 @@ def compute_event_context(self, event, old_state=None): delta_ids=delta_ids, ) - defer.returnValue(context) + return context @defer.inlineCallbacks def resolve_state_groups_for_events(self, room_id, event_ids): @@ -368,19 +368,17 @@ def resolve_state_groups_for_events(self, room_id, event_ids): state_groups_ids = yield self.store.get_state_groups_ids(room_id, event_ids) if len(state_groups_ids) == 0: - defer.returnValue(_StateCacheEntry(state={}, state_group=None)) + return _StateCacheEntry(state={}, state_group=None) elif len(state_groups_ids) == 1: name, state_list = list(state_groups_ids.items()).pop() prev_group, delta_ids = yield self.store.get_state_group_delta(name) - defer.returnValue( - _StateCacheEntry( - state=state_list, - state_group=name, - prev_group=prev_group, - delta_ids=delta_ids, - ) + return _StateCacheEntry( + state=state_list, + state_group=name, + prev_group=prev_group, + delta_ids=delta_ids, ) room_version = yield self.store.get_room_version(room_id) @@ -392,7 +390,7 @@ def resolve_state_groups_for_events(self, room_id, event_ids): None, state_res_store=StateResolutionStore(self.store), ) - defer.returnValue(result) + return result @defer.inlineCallbacks def resolve_events(self, room_version, state_sets, event): @@ -415,7 +413,7 @@ def resolve_events(self, room_version, state_sets, event): new_state = {key: state_map[ev_id] for key, ev_id in iteritems(new_state)} - defer.returnValue(new_state) + return new_state class StateResolutionHandler(object): @@ -479,7 +477,7 @@ def resolve_state_groups( if self._state_cache is not None: cache = self._state_cache.get(group_names, None) if cache: - defer.returnValue(cache) + return cache logger.info( "Resolving state for %s with %d groups", room_id, len(state_groups_ids) @@ -525,7 +523,7 @@ def resolve_state_groups( if self._state_cache is not None: self._state_cache[group_names] = cache - defer.returnValue(cache) + return cache def _make_state_cache_entry(new_state, state_groups_ids): diff --git a/synapse/state/v1.py b/synapse/state/v1.py index 88acd4817e6e..a2f92d9ff954 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -55,7 +55,7 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory): a map from (type, state_key) to event_id. """ if len(state_sets) == 1: - defer.returnValue(state_sets[0]) + return state_sets[0] unconflicted_state, conflicted_state = _seperate(state_sets) @@ -97,10 +97,8 @@ def resolve_events_with_store(state_sets, event_map, state_map_factory): state_map_new = yield state_map_factory(new_needed_events) state_map.update(state_map_new) - defer.returnValue( - _resolve_with_state( - unconflicted_state, conflicted_state, auth_events, state_map - ) + return _resolve_with_state( + unconflicted_state, conflicted_state, auth_events, state_map ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index db969e899752..b327c86f4016 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -63,7 +63,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto unconflicted_state, conflicted_state = _seperate(state_sets) if not conflicted_state: - defer.returnValue(unconflicted_state) + return unconflicted_state logger.debug("%d conflicted state entries", len(conflicted_state)) logger.debug("Calculating auth chain difference") @@ -137,7 +137,7 @@ def resolve_events_with_store(room_version, state_sets, event_map, state_res_sto logger.debug("done") - defer.returnValue(resolved_state) + return resolved_state @defer.inlineCallbacks @@ -168,18 +168,18 @@ def _get_power_level_for_sender(event_id, event_map, state_res_store): aev = yield _get_event(aid, event_map, state_res_store) if (aev.type, aev.state_key) == (EventTypes.Create, ""): if aev.content.get("creator") == event.sender: - defer.returnValue(100) + return 100 break - defer.returnValue(0) + return 0 level = pl.content.get("users", {}).get(event.sender) if level is None: level = pl.content.get("users_default", 0) if level is None: - defer.returnValue(0) + return 0 else: - defer.returnValue(int(level)) + return int(level) @defer.inlineCallbacks @@ -224,7 +224,7 @@ def _get_auth_chain_difference(state_sets, event_map, state_res_store): intersection = set(auth_sets[0]).intersection(*auth_sets[1:]) union = set().union(*auth_sets) - defer.returnValue(union - intersection) + return union - intersection def _seperate(state_sets): @@ -343,7 +343,7 @@ def _get_power_order(event_id): it = lexicographical_topological_sort(graph, key=_get_power_order) sorted_events = list(it) - defer.returnValue(sorted_events) + return sorted_events @defer.inlineCallbacks @@ -396,7 +396,7 @@ def _iterative_auth_checks( except AuthError: pass - defer.returnValue(resolved_state) + return resolved_state @defer.inlineCallbacks @@ -439,7 +439,7 @@ def _mainline_sort(event_ids, resolved_power_event_id, event_map, state_res_stor event_ids.sort(key=lambda ev_id: order_map[ev_id]) - defer.returnValue(event_ids) + return event_ids @defer.inlineCallbacks @@ -462,7 +462,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor while event: depth = mainline_map.get(event.event_id) if depth is not None: - defer.returnValue(depth) + return depth auth_events = event.auth_event_ids() event = None @@ -474,7 +474,7 @@ def _get_mainline_depth_for_event(event, mainline_map, event_map, state_res_stor break # Didn't find a power level auth event, so we just return 0 - defer.returnValue(0) + return 0 @defer.inlineCallbacks @@ -493,7 +493,7 @@ def _get_event(event_id, event_map, state_res_store): if event_id not in event_map: events = yield state_res_store.get_events([event_id], allow_rejected=True) event_map.update(events) - defer.returnValue(event_map[event_id]) + return event_map[event_id] def lexicographical_topological_sort(graph, key): diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 86a333a91916..e7f6ea728630 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -498,7 +498,7 @@ def get_users_paginate(self, order, start, limit): ) count = yield self.runInteraction("get_users_paginate", self.get_user_count_txn) retval = {"users": users, "total": count} - defer.returnValue(retval) + return retval def search_users(self, term): """Function to search users list for one or more users with diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index a7c93efa4654..489ce82fae3b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -513,7 +513,7 @@ def runInteraction(self, desc, func, *args, **kwargs): after_callback(*after_args, **after_kwargs) raise - defer.returnValue(result) + return result @defer.inlineCallbacks def runWithConnection(self, func, *args, **kwargs): @@ -553,7 +553,7 @@ def inner_func(conn, *args, **kwargs): with PreserveLoggingContext(): result = yield self._db_pool.runWithConnection(inner_func, *args, **kwargs) - defer.returnValue(result) + return result @staticmethod def cursor_to_dict(cursor): @@ -615,8 +615,8 @@ def _simple_insert(self, table, values, or_ignore=False, desc="_simple_insert"): # a cursor after we receive an error from the db. if not or_ignore: raise - defer.returnValue(False) - defer.returnValue(True) + return False + return True @staticmethod def _simple_insert_txn(txn, table, values): @@ -708,7 +708,7 @@ def _simple_upsert( insertion_values, lock=lock, ) - defer.returnValue(result) + return result except self.database_engine.module.IntegrityError as e: attempts += 1 if attempts >= 5: @@ -1121,7 +1121,7 @@ def _simple_select_many_batch( results = [] if not iterable: - defer.returnValue(results) + return results # iterables can not be sliced, so convert it to a list first it_list = list(iterable) @@ -1142,7 +1142,7 @@ def _simple_select_many_batch( results.extend(rows) - defer.returnValue(results) + return results @classmethod def _simple_select_many_txn(cls, txn, table, column, iterable, keyvalues, retcols): diff --git a/synapse/storage/account_data.py b/synapse/storage/account_data.py index 8394389073f4..9fa5b4f3d6b3 100644 --- a/synapse/storage/account_data.py +++ b/synapse/storage/account_data.py @@ -111,9 +111,9 @@ def get_global_account_data_by_type_for_user(self, data_type, user_id): ) if result: - defer.returnValue(json.loads(result)) + return json.loads(result) else: - defer.returnValue(None) + return None @cached(num_args=2) def get_account_data_for_room(self, user_id, room_id): @@ -264,11 +264,9 @@ def is_ignored_by(self, ignored_user_id, ignorer_user_id, cache_context): on_invalidate=cache_context.invalidate, ) if not ignored_account_data: - defer.returnValue(False) + return False - defer.returnValue( - ignored_user_id in ignored_account_data.get("ignored_users", {}) - ) + return ignored_user_id in ignored_account_data.get("ignored_users", {}) class AccountDataStore(AccountDataWorkerStore): @@ -332,7 +330,7 @@ def add_account_data_to_room(self, user_id, room_id, account_data_type, content) ) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result @defer.inlineCallbacks def add_account_data_for_user(self, user_id, account_data_type, content): @@ -373,7 +371,7 @@ def add_account_data_for_user(self, user_id, account_data_type, content): ) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result def _update_max_stream_id(self, next_id): """Update the max stream_id diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index eb329ebd8b43..05d9c05c3feb 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -145,7 +145,7 @@ def get_appservices_by_state(self, state): for service in as_list: if service.id == res["as_id"]: services.append(service) - defer.returnValue(services) + return services @defer.inlineCallbacks def get_appservice_state(self, service): @@ -164,9 +164,9 @@ def get_appservice_state(self, service): desc="get_appservice_state", ) if result: - defer.returnValue(result.get("state")) + return result.get("state") return - defer.returnValue(None) + return None def set_appservice_state(self, service, state): """Set the application service state. @@ -298,15 +298,13 @@ def _get_oldest_unsent_txn(txn): ) if not entry: - defer.returnValue(None) + return None event_ids = json.loads(entry["event_ids"]) events = yield self.get_events_as_list(event_ids) - defer.returnValue( - AppServiceTransaction(service=service, id=entry["txn_id"], events=events) - ) + return AppServiceTransaction(service=service, id=entry["txn_id"], events=events) def _get_last_txn(self, txn, service_id): txn.execute( @@ -360,7 +358,7 @@ def get_new_events_for_appservice_txn(txn): events = yield self.get_events_as_list(event_ids) - defer.returnValue((upper_bound, events)) + return (upper_bound, events) class ApplicationServiceTransactionStore(ApplicationServiceTransactionWorkerStore): diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index 50f913a414a5..e5f0668f0909 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -115,7 +115,7 @@ def _run_background_updates(self): " Unscheduling background update task." ) self._all_done = True - defer.returnValue(None) + return None @defer.inlineCallbacks def has_completed_background_updates(self): @@ -127,11 +127,11 @@ def has_completed_background_updates(self): # if we've previously determined that there is nothing left to do, that # is easy if self._all_done: - defer.returnValue(True) + return True # obviously, if we have things in our queue, we're not done. if self._background_update_queue: - defer.returnValue(False) + return False # otherwise, check if there are updates to be run. This is important, # as we may be running on a worker which doesn't perform the bg updates @@ -144,9 +144,9 @@ def has_completed_background_updates(self): ) if not updates: self._all_done = True - defer.returnValue(True) + return True - defer.returnValue(False) + return False @defer.inlineCallbacks def do_next_background_update(self, desired_duration_ms): @@ -173,14 +173,14 @@ def do_next_background_update(self, desired_duration_ms): if not self._background_update_queue: # no work left to do - defer.returnValue(None) + return None # pop from the front, and add back to the back update_name = self._background_update_queue.pop(0) self._background_update_queue.append(update_name) res = yield self._do_background_update(update_name, desired_duration_ms) - defer.returnValue(res) + return res @defer.inlineCallbacks def _do_background_update(self, update_name, desired_duration_ms): @@ -231,7 +231,7 @@ def _do_background_update(self, update_name, desired_duration_ms): performance.update(items_updated, duration_ms) - defer.returnValue(len(self._background_update_performance)) + return len(self._background_update_performance) def register_background_update_handler(self, update_name, update_handler): """Register a handler for doing a background update. @@ -266,7 +266,7 @@ def register_noop_background_update(self, update_name): @defer.inlineCallbacks def noop_update(progress, batch_size): yield self._end_background_update(update_name) - defer.returnValue(1) + return 1 self.register_background_update_handler(update_name, noop_update) @@ -370,7 +370,7 @@ def updater(progress, batch_size): logger.info("Adding index %s to %s", index_name, table) yield self.runWithConnection(runner) yield self._end_background_update(update_name) - defer.returnValue(1) + return 1 self.register_background_update_handler(update_name, updater) diff --git a/synapse/storage/client_ips.py b/synapse/storage/client_ips.py index bda68de5be2b..6db8c5407724 100644 --- a/synapse/storage/client_ips.py +++ b/synapse/storage/client_ips.py @@ -104,7 +104,7 @@ def f(conn): yield self.runWithConnection(f) yield self._end_background_update("user_ips_drop_nonunique_index") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _analyze_user_ip(self, progress, batch_size): @@ -121,7 +121,7 @@ def user_ips_analyze(txn): yield self._end_background_update("user_ips_analyze") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _remove_user_ip_dupes(self, progress, batch_size): @@ -291,7 +291,7 @@ def remove(txn): if last: yield self._end_background_update("user_ips_remove_dupes") - defer.returnValue(batch_size) + return batch_size @defer.inlineCallbacks def insert_client_ip( @@ -401,7 +401,7 @@ def get_last_client_ip_by_device(self, user_id, device_id): "device_id": did, "last_seen": last_seen, } - defer.returnValue(ret) + return ret @classmethod def _get_last_client_ip_by_device_txn(cls, txn, user_id, device_id, retcols): @@ -461,14 +461,12 @@ def get_user_ip_and_agents(self, user): ((row["access_token"], row["ip"]), (row["user_agent"], row["last_seen"])) for row in rows ) - defer.returnValue( - list( - { - "access_token": access_token, - "ip": ip, - "user_agent": user_agent, - "last_seen": last_seen, - } - for (access_token, ip), (user_agent, last_seen) in iteritems(results) - ) + return list( + { + "access_token": access_token, + "ip": ip, + "user_agent": user_agent, + "last_seen": last_seen, + } + for (access_token, ip), (user_agent, last_seen) in iteritems(results) ) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 4ea0deea4ff8..79bb0ea46db5 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -92,7 +92,7 @@ def delete_messages_for_device(self, user_id, device_id, up_to_stream_id): user_id, last_deleted_stream_id ) if not has_changed: - defer.returnValue(0) + return 0 def delete_messages_for_device_txn(txn): sql = ( @@ -115,7 +115,7 @@ def delete_messages_for_device_txn(txn): last_deleted_stream_id, up_to_stream_id ) - defer.returnValue(count) + return count def get_new_device_msgs_for_remote( self, destination, last_stream_id, current_stream_id, limit @@ -263,7 +263,7 @@ def add_messages_txn(txn, now_ms, stream_id): destination, stream_id ) - defer.returnValue(self._device_inbox_id_gen.get_current_token()) + return self._device_inbox_id_gen.get_current_token() @defer.inlineCallbacks def add_messages_from_remote_to_device_inbox( @@ -312,7 +312,7 @@ def add_messages_txn(txn, now_ms, stream_id): for user_id in local_messages_by_user_then_device.keys(): self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id) - defer.returnValue(stream_id) + return stream_id def _add_messages_to_local_device_inbox_txn( self, txn, stream_id, messages_by_user_then_device @@ -426,4 +426,4 @@ def reindex_txn(conn): yield self._end_background_update(self.DEVICE_INBOX_STREAM_ID) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index d2b113a4e76d..8f72d9289555 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -71,7 +71,7 @@ def get_devices_by_user(self, user_id): desc="get_devices_by_user", ) - defer.returnValue({d["device_id"]: d for d in devices}) + return {d["device_id"]: d for d in devices} @defer.inlineCallbacks def get_devices_by_remote(self, destination, from_stream_id, limit): @@ -88,7 +88,7 @@ def get_devices_by_remote(self, destination, from_stream_id, limit): destination, int(from_stream_id) ) if not has_changed: - defer.returnValue((now_stream_id, [])) + return (now_stream_id, []) # We retrieve n+1 devices from the list of outbound pokes where n is # our outbound device update limit. We then check if the very last @@ -111,7 +111,7 @@ def get_devices_by_remote(self, destination, from_stream_id, limit): # Return an empty list if there are no updates if not updates: - defer.returnValue((now_stream_id, [])) + return (now_stream_id, []) # if we have exceeded the limit, we need to exclude any results with the # same stream_id as the last row. @@ -147,13 +147,13 @@ def get_devices_by_remote(self, destination, from_stream_id, limit): # skip that stream_id and return an empty list, and continue with the next # stream_id next time. if not query_map: - defer.returnValue((stream_id_cutoff, [])) + return (stream_id_cutoff, []) results = yield self._get_device_update_edus_by_remote( destination, from_stream_id, query_map ) - defer.returnValue((now_stream_id, results)) + return (now_stream_id, results) def _get_devices_by_remote_txn( self, txn, destination, from_stream_id, now_stream_id, limit @@ -232,7 +232,7 @@ def _get_device_update_edus_by_remote(self, destination, from_stream_id, query_m results.append(result) - defer.returnValue(results) + return results def _get_last_device_update_for_remote_user( self, destination, user_id, from_stream_id @@ -330,7 +330,7 @@ def get_user_devices_from_cache(self, query_list): else: results[user_id] = yield self._get_cached_devices_for_user(user_id) - defer.returnValue((user_ids_not_in_cache, results)) + return (user_ids_not_in_cache, results) @cachedInlineCallbacks(num_args=2, tree=True) def _get_cached_user_device(self, user_id, device_id): @@ -340,7 +340,7 @@ def _get_cached_user_device(self, user_id, device_id): retcol="content", desc="_get_cached_user_device", ) - defer.returnValue(db_to_json(content)) + return db_to_json(content) @cachedInlineCallbacks() def _get_cached_devices_for_user(self, user_id): @@ -350,9 +350,9 @@ def _get_cached_devices_for_user(self, user_id): retcols=("device_id", "content"), desc="_get_cached_devices_for_user", ) - defer.returnValue( - {device["device_id"]: db_to_json(device["content"]) for device in devices} - ) + return { + device["device_id"]: db_to_json(device["content"]) for device in devices + } def get_devices_with_keys_by_user(self, user_id): """Get all devices (with any device keys) for a user @@ -482,7 +482,7 @@ def get_device_list_last_stream_id_for_remotes(self, user_ids): results = {user_id: None for user_id in user_ids} results.update({row["user_id"]: row["stream_id"] for row in rows}) - defer.returnValue(results) + return results class DeviceStore(DeviceWorkerStore, BackgroundUpdateStore): @@ -543,7 +543,7 @@ def store_device(self, user_id, device_id, initial_device_display_name): """ key = (user_id, device_id) if self.device_id_exists_cache.get(key, None): - defer.returnValue(False) + return False try: inserted = yield self._simple_insert( @@ -557,7 +557,7 @@ def store_device(self, user_id, device_id, initial_device_display_name): or_ignore=True, ) self.device_id_exists_cache.prefill(key, True) - defer.returnValue(inserted) + return inserted except Exception as e: logger.error( "store_device with device_id=%s(%r) user_id=%s(%r)" @@ -780,7 +780,7 @@ def add_device_change_to_streams(self, user_id, device_ids, hosts): hosts, stream_id, ) - defer.returnValue(stream_id) + return stream_id def _add_device_change_txn(self, txn, user_id, device_ids, hosts, stream_id): now = self._clock.time_msec() @@ -889,4 +889,4 @@ def f(conn): yield self.runWithConnection(f) yield self._end_background_update(DROP_DEVICE_LIST_STREAMS_NON_UNIQUE_INDEXES) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/directory.py b/synapse/storage/directory.py index 201bbd430cc0..e966a73f3dc5 100644 --- a/synapse/storage/directory.py +++ b/synapse/storage/directory.py @@ -46,7 +46,7 @@ def get_association_from_room_alias(self, room_alias): ) if not room_id: - defer.returnValue(None) + return None return servers = yield self._simple_select_onecol( @@ -57,10 +57,10 @@ def get_association_from_room_alias(self, room_alias): ) if not servers: - defer.returnValue(None) + return None return - defer.returnValue(RoomAliasMapping(room_id, room_alias.to_string(), servers)) + return RoomAliasMapping(room_id, room_alias.to_string(), servers) def get_room_alias_creator(self, room_alias): return self._simple_select_one_onecol( @@ -125,7 +125,7 @@ def alias_txn(txn): raise SynapseError( 409, "Room alias %s already exists" % room_alias.to_string() ) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def delete_room_alias(self, room_alias): @@ -133,7 +133,7 @@ def delete_room_alias(self, room_alias): "delete_room_alias", self._delete_room_alias_txn, room_alias ) - defer.returnValue(room_id) + return room_id def _delete_room_alias_txn(self, txn, room_alias): txn.execute( diff --git a/synapse/storage/e2e_room_keys.py b/synapse/storage/e2e_room_keys.py index f40ef2ab6451..99128f2df70e 100644 --- a/synapse/storage/e2e_room_keys.py +++ b/synapse/storage/e2e_room_keys.py @@ -61,7 +61,7 @@ def get_e2e_room_key(self, user_id, version, room_id, session_id): row["session_data"] = json.loads(row["session_data"]) - defer.returnValue(row) + return row @defer.inlineCallbacks def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): @@ -118,7 +118,7 @@ def get_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): try: version = int(version) except ValueError: - defer.returnValue({"rooms": {}}) + return {"rooms": {}} keyvalues = {"user_id": user_id, "version": version} if room_id: @@ -151,7 +151,7 @@ def get_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): "session_data": json.loads(row["session_data"]), } - defer.returnValue(sessions) + return sessions @defer.inlineCallbacks def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 2fabb9e2cbd2..1e07474e706a 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -41,7 +41,7 @@ def get_e2e_device_keys( dict containing "key_json", "device_display_name". """ if not query_list: - defer.returnValue({}) + return {} results = yield self.runInteraction( "get_e2e_device_keys", @@ -55,7 +55,7 @@ def get_e2e_device_keys( for device_id, device_info in iteritems(device_keys): device_info["keys"] = db_to_json(device_info.pop("key_json")) - defer.returnValue(results) + return results def _get_e2e_device_keys_txn( self, txn, query_list, include_all_devices=False, include_deleted_devices=False @@ -130,9 +130,7 @@ def get_e2e_one_time_keys(self, user_id, device_id, key_ids): desc="add_e2e_one_time_keys_check", ) - defer.returnValue( - {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} - ) + return {(row["algorithm"], row["key_id"]): row["key_json"] for row in rows} @defer.inlineCallbacks def add_e2e_one_time_keys(self, user_id, device_id, time_now, new_keys): diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index cb4478342f17..4f500d893e74 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -131,9 +131,9 @@ def get_max_depth_of(self, event_ids): ) if not rows: - defer.returnValue(0) + return 0 else: - defer.returnValue(max(row["depth"] for row in rows)) + return max(row["depth"] for row in rows) def _get_oldest_events_in_room_txn(self, txn, room_id): return self._simple_select_onecol_txn( @@ -169,7 +169,7 @@ def get_prev_events_for_room(self, room_id): # make sure that we don't completely ignore the older events. res = res[0:5] + random.sample(res[5:], 5) - defer.returnValue(res) + return res def get_latest_event_ids_and_hashes_in_room(self, room_id): """ @@ -411,7 +411,7 @@ def get_missing_events(self, room_id, earliest_events, latest_events, limit): limit, ) events = yield self.get_events_as_list(ids) - defer.returnValue(events) + return events def _get_missing_events(self, txn, room_id, earliest_events, latest_events, limit): @@ -463,7 +463,7 @@ def get_successor_events(self, event_ids): desc="get_successor_events", ) - defer.returnValue([row["event_id"] for row in rows]) + return [row["event_id"] for row in rows] class EventFederationStore(EventFederationWorkerStore): @@ -654,4 +654,4 @@ def delete_event_auth(txn): if not result: yield self._end_background_update(self.EVENT_AUTH_STATE_ONLY) - defer.returnValue(batch_size) + return batch_size diff --git a/synapse/storage/event_push_actions.py b/synapse/storage/event_push_actions.py index dcfb67e0294e..22025effbc83 100644 --- a/synapse/storage/event_push_actions.py +++ b/synapse/storage/event_push_actions.py @@ -100,7 +100,7 @@ def get_unread_event_push_actions_by_room_for_user( user_id, last_read_event_id, ) - defer.returnValue(ret) + return ret def _get_unread_counts_by_receipt_txn( self, txn, room_id, user_id, last_read_event_id @@ -178,7 +178,7 @@ def f(txn): return [r[0] for r in txn] ret = yield self.runInteraction("get_push_action_users_in_range", f) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_unread_push_actions_for_user_in_range_for_http( @@ -279,7 +279,7 @@ def get_no_receipt(txn): # Take only up to the limit. We have to stop at the limit because # one of the subqueries may have hit the limit. - defer.returnValue(notifs[:limit]) + return notifs[:limit] @defer.inlineCallbacks def get_unread_push_actions_for_user_in_range_for_email( @@ -380,7 +380,7 @@ def get_no_receipt(txn): notifs.sort(key=lambda r: -(r["received_ts"] or 0)) # Now return the first `limit` - defer.returnValue(notifs[:limit]) + return notifs[:limit] def get_if_maybe_push_in_range_for_user(self, user_id, min_stream_ordering): """A fast check to see if there might be something to push for the @@ -477,7 +477,7 @@ def remove_push_actions_from_staging(self, event_id): keyvalues={"event_id": event_id}, desc="remove_push_actions_from_staging", ) - defer.returnValue(res) + return res except Exception: # this method is called from an exception handler, so propagating # another exception here really isn't helpful - there's nothing @@ -732,7 +732,7 @@ def f(txn): push_actions = yield self.runInteraction("get_push_actions_for_user", f) for pa in push_actions: pa["actions"] = _deserialize_action(pa["actions"], pa["highlight"]) - defer.returnValue(push_actions) + return push_actions @defer.inlineCallbacks def get_time_of_last_push_action_before(self, stream_ordering): @@ -749,7 +749,7 @@ def f(txn): return txn.fetchone() result = yield self.runInteraction("get_time_of_last_push_action_before", f) - defer.returnValue(result[0] if result else None) + return result[0] if result else None @defer.inlineCallbacks def get_latest_push_action_stream_ordering(self): @@ -758,7 +758,7 @@ def f(txn): return txn.fetchone() result = yield self.runInteraction("get_latest_push_action_stream_ordering", f) - defer.returnValue(result[0] or 0) + return result[0] or 0 def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id): # Sad that we have to blow away the cache for the whole room here diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b70457bfc6aa..88c01801164f 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -223,7 +223,7 @@ def f(self, *args, **kwargs): except self.database_engine.module.IntegrityError: logger.exception("IntegrityError, retrying.") res = yield func(self, *args, delete_existing=True, **kwargs) - defer.returnValue(res) + return res return f @@ -309,7 +309,7 @@ def persist_events(self, events_and_contexts, backfilled=False): max_persisted_id = yield self._stream_id_gen.get_current_token() - defer.returnValue(max_persisted_id) + return max_persisted_id @defer.inlineCallbacks @log_function @@ -334,7 +334,7 @@ def persist_event(self, event, context, backfilled=False): yield make_deferred_yieldable(deferred) max_persisted_id = yield self._stream_id_gen.get_current_token() - defer.returnValue((event.internal_metadata.stream_ordering, max_persisted_id)) + return (event.internal_metadata.stream_ordering, max_persisted_id) def _maybe_start_persisting(self, room_id): @defer.inlineCallbacks @@ -595,7 +595,7 @@ def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids): stale = latest_event_ids & result stale_forward_extremities_counter.observe(len(stale)) - defer.returnValue(result) + return result @defer.inlineCallbacks def _get_events_which_are_prevs(self, event_ids): @@ -633,7 +633,7 @@ def _get_events_which_are_prevs_txn(txn, batch): "_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk ) - defer.returnValue(results) + return results @defer.inlineCallbacks def _get_prevs_before_rejected(self, event_ids): @@ -695,7 +695,7 @@ def _get_prevs_before_rejected_txn(txn, batch): "_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk ) - defer.returnValue(existing_prevs) + return existing_prevs @defer.inlineCallbacks def _get_new_state_after_events( @@ -796,7 +796,7 @@ def _get_new_state_after_events( # If they old and new groups are the same then we don't need to do # anything. if old_state_groups == new_state_groups: - defer.returnValue((None, None)) + return (None, None) if len(new_state_groups) == 1 and len(old_state_groups) == 1: # If we're going from one state group to another, lets check if @@ -813,7 +813,7 @@ def _get_new_state_after_events( # the current state in memory then lets also return that, # but it doesn't matter if we don't. new_state = state_groups_map.get(new_state_group) - defer.returnValue((new_state, delta_ids)) + return (new_state, delta_ids) # Now that we have calculated new_state_groups we need to get # their state IDs so we can resolve to a single state set. @@ -825,7 +825,7 @@ def _get_new_state_after_events( if len(new_state_groups) == 1: # If there is only one state group, then we know what the current # state is. - defer.returnValue((state_groups_map[new_state_groups.pop()], None)) + return (state_groups_map[new_state_groups.pop()], None) # Ok, we need to defer to the state handler to resolve our state sets. @@ -854,7 +854,7 @@ def _get_new_state_after_events( state_res_store=StateResolutionStore(self), ) - defer.returnValue((res.state, None)) + return (res.state, None) @defer.inlineCallbacks def _calculate_state_delta(self, room_id, current_state): @@ -877,7 +877,7 @@ def _calculate_state_delta(self, room_id, current_state): if ev_id != existing_state.get(key) } - defer.returnValue((to_delete, to_insert)) + return (to_delete, to_insert) @log_function def _persist_events_txn( @@ -1564,7 +1564,7 @@ def _count_messages(txn): return count ret = yield self.runInteraction("count_messages", _count_messages) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def count_daily_sent_messages(self): @@ -1585,7 +1585,7 @@ def _count_messages(txn): return count ret = yield self.runInteraction("count_daily_sent_messages", _count_messages) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def count_daily_active_rooms(self): @@ -1600,7 +1600,7 @@ def _count(txn): return count ret = yield self.runInteraction("count_daily_active_rooms", _count) - defer.returnValue(ret) + return ret def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" @@ -2183,7 +2183,7 @@ def is_event_after(self, event_id1, event_id2): """ to_1, so_1 = yield self._get_event_ordering(event_id1) to_2, so_2 = yield self._get_event_ordering(event_id2) - defer.returnValue((to_1, so_1) > (to_2, so_2)) + return (to_1, so_1) > (to_2, so_2) @cachedInlineCallbacks(max_entries=5000) def _get_event_ordering(self, event_id): @@ -2197,9 +2197,7 @@ def _get_event_ordering(self, event_id): if not res: raise SynapseError(404, "Could not find event %s" % (event_id,)) - defer.returnValue( - (int(res["topological_ordering"]), int(res["stream_ordering"])) - ) + return (int(res["topological_ordering"]), int(res["stream_ordering"])) def get_all_updated_current_state_deltas(self, from_token, to_token, limit): def get_all_updated_current_state_deltas_txn(txn): diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 1ce21d190c85..6587f31e2bab 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -135,7 +135,7 @@ def reindex_txn(txn): if not result: yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_reindex_origin_server_ts(self, progress, batch_size): @@ -212,7 +212,7 @@ def reindex_search_txn(txn): if not result: yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _cleanup_extremities_bg_update(self, progress, batch_size): @@ -396,4 +396,4 @@ def _drop_table_txn(txn): "_cleanup_extremities_bg_update_drop_table", _drop_table_txn ) - defer.returnValue(num_handled) + return num_handled diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 858fc755a121..44441957dbdf 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -157,7 +157,7 @@ def get_event( if event is None and not allow_none: raise NotFoundError("Could not find event %s" % (event_id,)) - defer.returnValue(event) + return event @defer.inlineCallbacks def get_events( @@ -187,7 +187,7 @@ def get_events( allow_rejected=allow_rejected, ) - defer.returnValue({e.event_id: e for e in events}) + return {e.event_id: e for e in events} @defer.inlineCallbacks def get_events_as_list( @@ -217,7 +217,7 @@ def get_events_as_list( """ if not event_ids: - defer.returnValue([]) + return [] # there may be duplicates so we cast the list to a set event_entry_map = yield self._get_events_from_cache_or_db( @@ -305,7 +305,7 @@ def get_events_as_list( event.unsigned["prev_content"] = prev.content event.unsigned["prev_sender"] = prev.sender - defer.returnValue(events) + return events @defer.inlineCallbacks def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False): @@ -452,7 +452,7 @@ def _enqueue_events(self, events, allow_rejected=False): without having to create a new transaction for each request for events. """ if not events: - defer.returnValue({}) + return {} events_d = defer.Deferred() with self._event_fetch_lock: @@ -496,7 +496,7 @@ def _enqueue_events(self, events, allow_rejected=False): ) ) - defer.returnValue({e.event.event_id: e for e in res if e}) + return {e.event.event_id: e for e in res if e} def _fetch_event_rows(self, txn, event_ids): """Fetch event rows from the database @@ -609,7 +609,7 @@ def _get_event_from_row( self._get_event_cache.prefill((original_ev.event_id,), cache_entry) - defer.returnValue(cache_entry) + return cache_entry @defer.inlineCallbacks def _maybe_redact_event_row(self, original_ev, redactions): @@ -679,7 +679,7 @@ def have_events_in_timeline(self, event_ids): desc="have_events_in_timeline", ) - defer.returnValue(set(r["event_id"] for r in rows)) + return set(r["event_id"] for r in rows) @defer.inlineCallbacks def have_seen_events(self, event_ids): @@ -705,7 +705,7 @@ def have_seen_events_txn(txn, chunk): input_iterator = iter(event_ids) for chunk in iter(lambda: list(itertools.islice(input_iterator, 100)), []): yield self.runInteraction("have_seen_events", have_seen_events_txn, chunk) - defer.returnValue(results) + return results def get_seen_events_with_rejections(self, event_ids): """Given a list of event ids, check if we rejected them. @@ -816,4 +816,4 @@ def get_room_complexity(self, room_id): # it. complexity_v1 = round(state_events / 500, 2) - defer.returnValue({"v1": complexity_v1}) + return {"v1": complexity_v1} diff --git a/synapse/storage/filtering.py b/synapse/storage/filtering.py index b195dc66a0ce..23b48f6ceade 100644 --- a/synapse/storage/filtering.py +++ b/synapse/storage/filtering.py @@ -15,8 +15,6 @@ from canonicaljson import encode_canonical_json -from twisted.internet import defer - from synapse.api.errors import Codes, SynapseError from synapse.util.caches.descriptors import cachedInlineCallbacks @@ -41,7 +39,7 @@ def get_user_filter(self, user_localpart, filter_id): desc="get_user_filter", ) - defer.returnValue(db_to_json(def_json)) + return db_to_json(def_json) def add_user_filter(self, user_localpart, user_filter): def_json = encode_canonical_json(user_filter) diff --git a/synapse/storage/group_server.py b/synapse/storage/group_server.py index 73e6fc6de2b0..15b01c6958e6 100644 --- a/synapse/storage/group_server.py +++ b/synapse/storage/group_server.py @@ -307,15 +307,13 @@ def get_group_categories(self, group_id): desc="get_group_categories", ) - defer.returnValue( - { - row["category_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows + return { + row["category_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), } - ) + for row in rows + } @defer.inlineCallbacks def get_group_category(self, group_id, category_id): @@ -328,7 +326,7 @@ def get_group_category(self, group_id, category_id): category["profile"] = json.loads(category["profile"]) - defer.returnValue(category) + return category def upsert_group_category(self, group_id, category_id, profile, is_public): """Add/update room category for group @@ -370,15 +368,13 @@ def get_group_roles(self, group_id): desc="get_group_roles", ) - defer.returnValue( - { - row["role_id"]: { - "is_public": row["is_public"], - "profile": json.loads(row["profile"]), - } - for row in rows + return { + row["role_id"]: { + "is_public": row["is_public"], + "profile": json.loads(row["profile"]), } - ) + for row in rows + } @defer.inlineCallbacks def get_group_role(self, group_id, role_id): @@ -391,7 +387,7 @@ def get_group_role(self, group_id, role_id): role["profile"] = json.loads(role["profile"]) - defer.returnValue(role) + return role def upsert_group_role(self, group_id, role_id, profile, is_public): """Add/remove user role @@ -960,7 +956,7 @@ def _register_user_group_membership_txn(txn, next_id): _register_user_group_membership_txn, next_id, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def create_group( @@ -1057,9 +1053,9 @@ def get_remote_attestation(self, group_id, user_id): now = int(self._clock.time_msec()) if row and now < row["valid_until_ms"]: - defer.returnValue(json.loads(row["attestation_json"])) + return json.loads(row["attestation_json"]) - defer.returnValue(None) + return None def get_joined_groups(self, user_id): return self._simple_select_onecol( diff --git a/synapse/storage/monthly_active_users.py b/synapse/storage/monthly_active_users.py index 081564360fb6..752e9788a29f 100644 --- a/synapse/storage/monthly_active_users.py +++ b/synapse/storage/monthly_active_users.py @@ -173,7 +173,7 @@ def get_registered_reserved_users_count(self): ) if user_id: count = count + 1 - defer.returnValue(count) + return count @defer.inlineCallbacks def upsert_monthly_active_user(self, user_id): diff --git a/synapse/storage/presence.py b/synapse/storage/presence.py index 42ec8c6bb88b..1a0f2d5768fc 100644 --- a/synapse/storage/presence.py +++ b/synapse/storage/presence.py @@ -90,9 +90,7 @@ def update_presence(self, presence_states): presence_states, ) - defer.returnValue( - (stream_orderings[-1], self._presence_id_gen.get_current_token()) - ) + return (stream_orderings[-1], self._presence_id_gen.get_current_token()) def _update_presence_txn(self, txn, stream_orderings, presence_states): for stream_id, state in zip(stream_orderings, presence_states): @@ -180,7 +178,7 @@ def get_presence_for_users(self, user_ids): for row in rows: row["currently_active"] = bool(row["currently_active"]) - defer.returnValue({row["user_id"]: UserPresenceState(**row) for row in rows}) + return {row["user_id"]: UserPresenceState(**row) for row in rows} def get_current_presence_token(self): return self._presence_id_gen.get_current_token() diff --git a/synapse/storage/profile.py b/synapse/storage/profile.py index 0ff392bdb4d8..8a5d8e9b1842 100644 --- a/synapse/storage/profile.py +++ b/synapse/storage/profile.py @@ -34,15 +34,13 @@ def get_profileinfo(self, user_localpart): except StoreError as e: if e.code == 404: # no match - defer.returnValue(ProfileInfo(None, None)) + return ProfileInfo(None, None) return else: raise - defer.returnValue( - ProfileInfo( - avatar_url=profile["avatar_url"], display_name=profile["displayname"] - ) + return ProfileInfo( + avatar_url=profile["avatar_url"], display_name=profile["displayname"] ) def get_profile_displayname(self, user_localpart): @@ -168,7 +166,7 @@ def is_subscribed_remote_profile_for_user(self, user_id): ) if res: - defer.returnValue(True) + return True res = yield self._simple_select_one_onecol( table="group_invites", @@ -179,4 +177,4 @@ def is_subscribed_remote_profile_for_user(self, user_id): ) if res: - defer.returnValue(True) + return True diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index 98cec8c82bf6..a6517c4cf3f3 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -120,7 +120,7 @@ def get_push_rules_for_user(self, user_id): rules = _load_rules(rows, enabled_map) - defer.returnValue(rules) + return rules @cachedInlineCallbacks(max_entries=5000) def get_push_rules_enabled_for_user(self, user_id): @@ -130,9 +130,7 @@ def get_push_rules_enabled_for_user(self, user_id): retcols=("user_name", "rule_id", "enabled"), desc="get_push_rules_enabled_for_user", ) - defer.returnValue( - {r["rule_id"]: False if r["enabled"] == 0 else True for r in results} - ) + return {r["rule_id"]: False if r["enabled"] == 0 else True for r in results} def have_push_rules_changed_for_user(self, user_id, last_id): if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id): @@ -160,7 +158,7 @@ def have_push_rules_changed_txn(txn): ) def bulk_get_push_rules(self, user_ids): if not user_ids: - defer.returnValue({}) + return {} results = {user_id: [] for user_id in user_ids} @@ -182,7 +180,7 @@ def bulk_get_push_rules(self, user_ids): for user_id, rules in results.items(): results[user_id] = _load_rules(rules, enabled_map_by_user.get(user_id, {})) - defer.returnValue(results) + return results @defer.inlineCallbacks def move_push_rule_from_room_to_room(self, new_room_id, user_id, rule): @@ -253,7 +251,7 @@ def bulk_get_push_rules_for_room(self, event, context): result = yield self._bulk_get_push_rules_for_room( event.room_id, state_group, current_state_ids, event=event ) - defer.returnValue(result) + return result @cachedInlineCallbacks(num_args=2, cache_context=True) def _bulk_get_push_rules_for_room( @@ -312,7 +310,7 @@ def _bulk_get_push_rules_for_room( rules_by_user = {k: v for k, v in rules_by_user.items() if v is not None} - defer.returnValue(rules_by_user) + return rules_by_user @cachedList( cached_method_name="get_push_rules_enabled_for_user", @@ -322,7 +320,7 @@ def _bulk_get_push_rules_for_room( ) def bulk_get_push_rules_enabled(self, user_ids): if not user_ids: - defer.returnValue({}) + return {} results = {user_id: {} for user_id in user_ids} @@ -336,7 +334,7 @@ def bulk_get_push_rules_enabled(self, user_ids): for row in rows: enabled = bool(row["enabled"]) results.setdefault(row["user_name"], {})[row["rule_id"]] = enabled - defer.returnValue(results) + return results class PushRuleStore(PushRulesWorkerStore): diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index cfe0a94330fb..be3d4d9ded1f 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -63,7 +63,7 @@ def user_has_pusher(self, user_id): ret = yield self._simple_select_one_onecol( "pushers", {"user_name": user_id}, "id", allow_none=True ) - defer.returnValue(ret is not None) + return ret is not None def get_pushers_by_app_id_and_pushkey(self, app_id, pushkey): return self.get_pushers_by({"app_id": app_id, "pushkey": pushkey}) @@ -95,7 +95,7 @@ def get_pushers_by(self, keyvalues): ], desc="get_pushers_by", ) - defer.returnValue(self._decode_pushers_rows(ret)) + return self._decode_pushers_rows(ret) @defer.inlineCallbacks def get_all_pushers(self): @@ -106,7 +106,7 @@ def get_pushers(txn): return self._decode_pushers_rows(rows) rows = yield self.runInteraction("get_all_pushers", get_pushers) - defer.returnValue(rows) + return rows def get_all_updated_pushers(self, last_id, current_id, limit): if last_id == current_id: @@ -205,7 +205,7 @@ def get_if_users_have_pushers(self, user_ids): result = {user_id: False for user_id in user_ids} result.update({r["user_name"]: True for r in rows}) - defer.returnValue(result) + return result class PusherStore(PusherWorkerStore): @@ -343,7 +343,7 @@ def get_throttle_params_by_room(self, pusher_id): "throttle_ms": row["throttle_ms"], } - defer.returnValue(params_by_room) + return params_by_room @defer.inlineCallbacks def set_throttle_params(self, pusher_id, room_id, params): diff --git a/synapse/storage/receipts.py b/synapse/storage/receipts.py index b477da12b1df..6aa6d98ebb30 100644 --- a/synapse/storage/receipts.py +++ b/synapse/storage/receipts.py @@ -58,7 +58,7 @@ def get_max_receipt_stream_id(self): @cachedInlineCallbacks() def get_users_with_read_receipts_in_room(self, room_id): receipts = yield self.get_receipts_for_room(room_id, "m.read") - defer.returnValue(set(r["user_id"] for r in receipts)) + return set(r["user_id"] for r in receipts) @cached(num_args=2) def get_receipts_for_room(self, room_id, receipt_type): @@ -92,7 +92,7 @@ def get_receipts_for_user(self, user_id, receipt_type): desc="get_receipts_for_user", ) - defer.returnValue({row["room_id"]: row["event_id"] for row in rows}) + return {row["room_id"]: row["event_id"] for row in rows} @defer.inlineCallbacks def get_receipts_for_user_with_orderings(self, user_id, receipt_type): @@ -110,16 +110,14 @@ def f(txn): return txn.fetchall() rows = yield self.runInteraction("get_receipts_for_user_with_orderings", f) - defer.returnValue( - { - row[0]: { - "event_id": row[1], - "topological_ordering": row[2], - "stream_ordering": row[3], - } - for row in rows + return { + row[0]: { + "event_id": row[1], + "topological_ordering": row[2], + "stream_ordering": row[3], } - ) + for row in rows + } @defer.inlineCallbacks def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): @@ -147,7 +145,7 @@ def get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): room_ids, to_key, from_key=from_key ) - defer.returnValue([ev for res in results.values() for ev in res]) + return [ev for res in results.values() for ev in res] def get_linearized_receipts_for_room(self, room_id, to_key, from_key=None): """Get receipts for a single room for sending to clients. @@ -197,7 +195,7 @@ def f(txn): rows = yield self.runInteraction("get_linearized_receipts_for_room", f) if not rows: - defer.returnValue([]) + return [] content = {} for row in rows: @@ -205,9 +203,7 @@ def f(txn): row["user_id"] ] = json.loads(row["data"]) - defer.returnValue( - [{"type": "m.receipt", "room_id": room_id, "content": content}] - ) + return [{"type": "m.receipt", "room_id": room_id, "content": content}] @cachedList( cached_method_name="_get_linearized_receipts_for_room", @@ -217,7 +213,7 @@ def f(txn): ) def _get_linearized_receipts_for_rooms(self, room_ids, to_key, from_key=None): if not room_ids: - defer.returnValue({}) + return {} def f(txn): if from_key: @@ -264,7 +260,7 @@ def f(txn): room_id: [results[room_id]] if room_id in results else [] for room_id in room_ids } - defer.returnValue(results) + return results def get_all_updated_receipts(self, last_id, current_id, limit=None): if last_id == current_id: @@ -468,7 +464,7 @@ def graph_to_linear(txn): ) if event_ts is None: - defer.returnValue(None) + return None now = self._clock.time_msec() logger.debug( @@ -482,7 +478,7 @@ def graph_to_linear(txn): max_persisted_id = self._receipts_id_gen.get_current_token() - defer.returnValue((stream_id, max_persisted_id)) + return (stream_id, max_persisted_id) def insert_graph_receipt(self, room_id, receipt_type, user_id, event_ids, data): return self.runInteraction( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 8b2c2a97ab46..999c10a3086d 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -75,12 +75,12 @@ def is_trial_user(self, user_id): info = yield self.get_user_by_id(user_id) if not info: - defer.returnValue(False) + return False now = self.clock.time_msec() trial_duration_ms = self.config.mau_trial_days * 24 * 60 * 60 * 1000 is_trial = (now - info["creation_ts"] * 1000) < trial_duration_ms - defer.returnValue(is_trial) + return is_trial @cached() def get_user_by_access_token(self, token): @@ -115,7 +115,7 @@ def get_expiration_ts_for_user(self, user_id): allow_none=True, desc="get_expiration_ts_for_user", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def set_account_validity_for_user( @@ -190,7 +190,7 @@ def get_user_from_renewal_token(self, renewal_token): desc="get_user_from_renewal_token", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_renewal_token_for_user(self, user_id): @@ -209,7 +209,7 @@ def get_renewal_token_for_user(self, user_id): desc="get_renewal_token_for_user", ) - defer.returnValue(res) + return res @defer.inlineCallbacks def get_users_expiring_soon(self): @@ -237,7 +237,7 @@ def select_users_txn(txn, now_ms, renew_at): self.config.account_validity.renew_at, ) - defer.returnValue(res) + return res @defer.inlineCallbacks def set_renewal_mail_status(self, user_id, email_sent): @@ -280,7 +280,7 @@ def is_server_admin(self, user): desc="is_server_admin", ) - defer.returnValue(res if res else False) + return res if res else False def _query_for_auth(self, txn, token): sql = ( @@ -311,7 +311,7 @@ def is_support_user(self, user_id): res = yield self.runInteraction( "is_support_user", self.is_support_user_txn, user_id ) - defer.returnValue(res) + return res def is_support_user_txn(self, txn, user_id): res = self._simple_select_one_onecol_txn( @@ -349,7 +349,7 @@ def _count_users(txn): return 0 ret = yield self.runInteraction("count_users", _count_users) - defer.returnValue(ret) + return ret def count_daily_user_type(self): """ @@ -395,7 +395,7 @@ def _count_users(txn): return count ret = yield self.runInteraction("count_users", _count_users) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def find_next_generated_user_id_localpart(self): @@ -425,7 +425,7 @@ def _find_next_generated_user_id(txn): if i not in found: return i - defer.returnValue( + return ( ( yield self.runInteraction( "find_next_generated_user_id", _find_next_generated_user_id @@ -447,7 +447,7 @@ def get_user_id_by_threepid(self, medium, address, require_verified=False): user_id = yield self.runInteraction( "get_user_id_by_threepid", self.get_user_id_by_threepid_txn, medium, address ) - defer.returnValue(user_id) + return user_id def get_user_id_by_threepid_txn(self, txn, medium, address): """Returns user id from threepid @@ -487,7 +487,7 @@ def user_get_threepids(self, user_id): ["medium", "address", "validated_at", "added_at"], "user_get_threepids", ) - defer.returnValue(ret) + return ret def user_delete_threepid(self, user_id, medium, address): return self._simple_delete( @@ -677,7 +677,7 @@ def _background_update_set_deactivated_flag_txn(txn): if end: yield self._end_background_update("users_set_deactivated_flag") - defer.returnValue(batch_size) + return batch_size @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id, valid_until_ms): @@ -957,7 +957,7 @@ def is_guest(self, user_id): desc="is_guest", ) - defer.returnValue(res if res else False) + return res if res else False def add_user_pending_deactivation(self, user_id): """ @@ -1024,7 +1024,7 @@ def _bg_user_threepids_grandfather_txn(txn): yield self._end_background_update("user_threepids_grandfather") - defer.returnValue(1) + return 1 def get_threepid_validation_session( self, medium, client_secret, address=None, sid=None, validated=True @@ -1337,4 +1337,4 @@ def get_user_deactivated_status(self, user_id): ) # Convert the integer into a boolean. - defer.returnValue(res == 1) + return res == 1 diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 9954bc094f8d..fcb5f2f23aef 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -17,8 +17,6 @@ import attr -from twisted.internet import defer - from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore @@ -363,7 +361,7 @@ def _get_applicable_edit_txn(txn): return edit_event = yield self.get_event(edit_id, allow_none=True) - defer.returnValue(edit_event) + return edit_event def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender): """Check if a user has already annotated an event with the same key diff --git a/synapse/storage/room.py b/synapse/storage/room.py index fe9d79d7929e..bc606292b82f 100644 --- a/synapse/storage/room.py +++ b/synapse/storage/room.py @@ -193,14 +193,12 @@ def get_ratelimit_for_user(self, user_id): ) if row: - defer.returnValue( - RatelimitOverride( - messages_per_second=row["messages_per_second"], - burst_count=row["burst_count"], - ) + return RatelimitOverride( + messages_per_second=row["messages_per_second"], + burst_count=row["burst_count"], ) else: - defer.returnValue(None) + return None class RoomStore(RoomWorkerStore, SearchStore): diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index b3c002b9eb92..cb88e49b5168 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -108,7 +108,7 @@ def get_hosts_in_room(self, room_id, cache_context): room_id, on_invalidate=cache_context.invalidate ) hosts = frozenset(get_domain_from_id(user_id) for user_id in user_ids) - defer.returnValue(hosts) + return hosts @cached(max_entries=100000, iterable=True) def get_users_in_room(self, room_id): @@ -253,8 +253,8 @@ def get_invite_for_user_in_room(self, user_id, room_id): invites = yield self.get_invited_rooms_for_user(user_id) for invite in invites: if invite.room_id == room_id: - defer.returnValue(invite) - defer.returnValue(None) + return invite + return None def get_rooms_for_user_where_membership_is(self, user_id, membership_list): """ Get all the rooms for this user where the membership for this user @@ -347,11 +347,9 @@ def get_rooms_for_user_with_stream_ordering(self, user_id): rooms = yield self.get_rooms_for_user_where_membership_is( user_id, membership_list=[Membership.JOIN] ) - defer.returnValue( - frozenset( - GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering) - for r in rooms - ) + return frozenset( + GetRoomsForUserWithStreamOrdering(r.room_id, r.stream_ordering) + for r in rooms ) @defer.inlineCallbacks @@ -361,7 +359,7 @@ def get_rooms_for_user(self, user_id, on_invalidate=None): rooms = yield self.get_rooms_for_user_with_stream_ordering( user_id, on_invalidate=on_invalidate ) - defer.returnValue(frozenset(r.room_id for r in rooms)) + return frozenset(r.room_id for r in rooms) @cachedInlineCallbacks(max_entries=500000, cache_context=True, iterable=True) def get_users_who_share_room_with_user(self, user_id, cache_context): @@ -378,7 +376,7 @@ def get_users_who_share_room_with_user(self, user_id, cache_context): ) user_who_share_room.update(user_ids) - defer.returnValue(user_who_share_room) + return user_who_share_room @defer.inlineCallbacks def get_joined_users_from_context(self, event, context): @@ -394,7 +392,7 @@ def get_joined_users_from_context(self, event, context): result = yield self._get_joined_users_from_context( event.room_id, state_group, current_state_ids, event=event, context=context ) - defer.returnValue(result) + return result def get_joined_users_from_state(self, room_id, state_entry): state_group = state_entry.state_group @@ -508,7 +506,7 @@ def _get_joined_users_from_context( avatar_url=to_ascii(event.content.get("avatar_url", None)), ) - defer.returnValue(users_in_room) + return users_in_room @cachedInlineCallbacks(max_entries=10000) def is_host_joined(self, room_id, host): @@ -533,14 +531,14 @@ def is_host_joined(self, room_id, host): rows = yield self._execute("is_host_joined", None, sql, room_id, like_clause) if not rows: - defer.returnValue(False) + return False user_id = rows[0][0] if get_domain_from_id(user_id) != host: # This can only happen if the host name has something funky in it raise Exception("Invalid host name") - defer.returnValue(True) + return True @cachedInlineCallbacks() def was_host_joined(self, room_id, host): @@ -573,14 +571,14 @@ def was_host_joined(self, room_id, host): rows = yield self._execute("was_host_joined", None, sql, room_id, like_clause) if not rows: - defer.returnValue(False) + return False user_id = rows[0][0] if get_domain_from_id(user_id) != host: # This can only happen if the host name has something funky in it raise Exception("Invalid host name") - defer.returnValue(True) + return True def get_joined_hosts(self, room_id, state_entry): state_group = state_entry.state_group @@ -607,7 +605,7 @@ def _get_joined_hosts(self, room_id, state_group, current_state_ids, state_entry cache = self._get_joined_hosts_cache(room_id) joined_hosts = yield cache.get_destinations(state_entry) - defer.returnValue(joined_hosts) + return joined_hosts @cached(max_entries=10000) def _get_joined_hosts_cache(self, room_id): @@ -637,7 +635,7 @@ def f(txn): return rows[0][0] count = yield self.runInteraction("did_forget_membership", f) - defer.returnValue(count == 0) + return count == 0 @defer.inlineCallbacks def get_rooms_user_has_been_in(self, user_id): @@ -847,7 +845,7 @@ def add_membership_profile_txn(txn): if not result: yield self._end_background_update(_MEMBERSHIP_PROFILE_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_current_state_membership(self, progress, batch_size): @@ -905,7 +903,7 @@ def _background_current_state_membership_txn(txn, last_processed_room): if finished: yield self._end_background_update(_CURRENT_STATE_MEMBERSHIP_UPDATE_NAME) - defer.returnValue(row_count) + return row_count class _JoinedHostsCache(object): @@ -933,7 +931,7 @@ def get_destinations(self, state_entry): state_entry(synapse.state._StateCacheEntry) """ if state_entry.state_group == self.state_group: - defer.returnValue(frozenset(self.hosts_to_joined_users)) + return frozenset(self.hosts_to_joined_users) with (yield self.linearizer.queue(())): if state_entry.state_group == self.state_group: @@ -970,7 +968,7 @@ def get_destinations(self, state_entry): else: self.state_group = object() self._len = sum(len(v) for v in itervalues(self.hosts_to_joined_users)) - defer.returnValue(frozenset(self.hosts_to_joined_users)) + return frozenset(self.hosts_to_joined_users) def __len__(self): return self._len diff --git a/synapse/storage/search.py b/synapse/storage/search.py index f3b1cec93363..df87ab6a6dcb 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -166,7 +166,7 @@ def reindex_search_txn(txn): if not result: yield self._end_background_update(self.EVENT_SEARCH_UPDATE_NAME) - defer.returnValue(result) + return result @defer.inlineCallbacks def _background_reindex_gin_search(self, progress, batch_size): @@ -209,7 +209,7 @@ def create_index(conn): yield self.runWithConnection(create_index) yield self._end_background_update(self.EVENT_SEARCH_USE_GIN_POSTGRES_NAME) - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _background_reindex_search_order(self, progress, batch_size): @@ -287,7 +287,7 @@ def reindex_search_txn(txn): if not finished: yield self._end_background_update(self.EVENT_SEARCH_ORDER_UPDATE_NAME) - defer.returnValue(num_rows) + return num_rows def store_event_search_txn(self, txn, event, key, value): """Add event to the search table @@ -454,17 +454,15 @@ def search_msgs(self, room_ids, search_term, keys): count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) - defer.returnValue( - { - "results": [ - {"event": event_map[r["event_id"]], "rank": r["rank"]} - for r in results - if r["event_id"] in event_map - ], - "highlights": highlights, - "count": count, - } - ) + return { + "results": [ + {"event": event_map[r["event_id"]], "rank": r["rank"]} + for r in results + if r["event_id"] in event_map + ], + "highlights": highlights, + "count": count, + } @defer.inlineCallbacks def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None): @@ -599,22 +597,20 @@ def search_rooms(self, room_ids, search_term, keys, limit, pagination_token=None count = sum(row["count"] for row in count_results if row["room_id"] in room_ids) - defer.returnValue( - { - "results": [ - { - "event": event_map[r["event_id"]], - "rank": r["rank"], - "pagination_token": "%s,%s" - % (r["origin_server_ts"], r["stream_ordering"]), - } - for r in results - if r["event_id"] in event_map - ], - "highlights": highlights, - "count": count, - } - ) + return { + "results": [ + { + "event": event_map[r["event_id"]], + "rank": r["rank"], + "pagination_token": "%s,%s" + % (r["origin_server_ts"], r["stream_ordering"]), + } + for r in results + if r["event_id"] in event_map + ], + "highlights": highlights, + "count": count, + } def _find_highlights_in_postgres(self, search_query, events): """Given a list of events and a search term, return a list of words diff --git a/synapse/storage/signatures.py b/synapse/storage/signatures.py index 6bd81e84adb2..fb83218f90bb 100644 --- a/synapse/storage/signatures.py +++ b/synapse/storage/signatures.py @@ -59,7 +59,7 @@ def add_event_hashes(self, event_ids): for e_id, h in hashes.items() } - defer.returnValue(list(hashes.items())) + return list(hashes.items()) def _get_event_reference_hashes_txn(self, txn, event_id): """Get all the hashes for a given PDU. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index a35289876d10..1980a871087a 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -422,7 +422,7 @@ def get_room_version(self, room_id): # Retrieve the room's create event create_event = yield self.get_create_event_for_room(room_id) - defer.returnValue(create_event.content.get("room_version", "1")) + return create_event.content.get("room_version", "1") @defer.inlineCallbacks def get_room_predecessor(self, room_id): @@ -442,7 +442,7 @@ def get_room_predecessor(self, room_id): create_event = yield self.get_create_event_for_room(room_id) # Return predecessor if present - defer.returnValue(create_event.content.get("predecessor", None)) + return create_event.content.get("predecessor", None) @defer.inlineCallbacks def get_create_event_for_room(self, room_id): @@ -466,7 +466,7 @@ def get_create_event_for_room(self, room_id): # Retrieve the room's create event and return create_event = yield self.get_event(create_id) - defer.returnValue(create_event) + return create_event @cached(max_entries=100000, iterable=True) def get_current_state_ids(self, room_id): @@ -563,7 +563,7 @@ def get_canonical_alias_for_room(self, room_id): if not event: return - defer.returnValue(event.content.get("canonical_alias")) + return event.content.get("canonical_alias") @cached(max_entries=10000, iterable=True) def get_state_group_delta(self, state_group): @@ -613,14 +613,14 @@ def get_state_groups_ids(self, _room_id, event_ids): dict of state_group_id -> (dict of (type, state_key) -> event id) """ if not event_ids: - defer.returnValue({}) + return {} event_to_groups = yield self._get_state_group_for_events(event_ids) groups = set(itervalues(event_to_groups)) group_to_state = yield self._get_state_for_groups(groups) - defer.returnValue(group_to_state) + return group_to_state @defer.inlineCallbacks def get_state_ids_for_group(self, state_group): @@ -634,7 +634,7 @@ def get_state_ids_for_group(self, state_group): """ group_to_state = yield self._get_state_for_groups((state_group,)) - defer.returnValue(group_to_state[state_group]) + return group_to_state[state_group] @defer.inlineCallbacks def get_state_groups(self, room_id, event_ids): @@ -645,7 +645,7 @@ def get_state_groups(self, room_id, event_ids): dict of state_group_id -> list of state events. """ if not event_ids: - defer.returnValue({}) + return {} group_to_ids = yield self.get_state_groups_ids(room_id, event_ids) @@ -658,16 +658,14 @@ def get_state_groups(self, room_id, event_ids): get_prev_content=False, ) - defer.returnValue( - { - group: [ - state_event_map[v] - for v in itervalues(event_id_map) - if v in state_event_map - ] - for group, event_id_map in iteritems(group_to_ids) - } - ) + return { + group: [ + state_event_map[v] + for v in itervalues(event_id_map) + if v in state_event_map + ] + for group, event_id_map in iteritems(group_to_ids) + } @defer.inlineCallbacks def _get_state_groups_from_groups(self, groups, state_filter): @@ -694,7 +692,7 @@ def _get_state_groups_from_groups(self, groups, state_filter): ) results.update(res) - defer.returnValue(results) + return results def _get_state_groups_from_groups_txn( self, txn, groups, state_filter=StateFilter.all() @@ -829,7 +827,7 @@ def get_state_for_events(self, event_ids, state_filter=StateFilter.all()): for event_id, group in iteritems(event_to_groups) } - defer.returnValue({event: event_to_state[event] for event in event_ids}) + return {event: event_to_state[event] for event in event_ids} @defer.inlineCallbacks def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()): @@ -855,7 +853,7 @@ def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()): for event_id, group in iteritems(event_to_groups) } - defer.returnValue({event: event_to_state[event] for event in event_ids}) + return {event: event_to_state[event] for event in event_ids} @defer.inlineCallbacks def get_state_for_event(self, event_id, state_filter=StateFilter.all()): @@ -871,7 +869,7 @@ def get_state_for_event(self, event_id, state_filter=StateFilter.all()): A deferred dict from (type, state_key) -> state_event """ state_map = yield self.get_state_for_events([event_id], state_filter) - defer.returnValue(state_map[event_id]) + return state_map[event_id] @defer.inlineCallbacks def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()): @@ -887,7 +885,7 @@ def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()): A deferred dict from (type, state_key) -> state_event """ state_map = yield self.get_state_ids_for_events([event_id], state_filter) - defer.returnValue(state_map[event_id]) + return state_map[event_id] @cached(max_entries=50000) def _get_state_group_for_event(self, event_id): @@ -917,7 +915,7 @@ def _get_state_group_for_events(self, event_ids): desc="_get_state_group_for_events", ) - defer.returnValue({row["event_id"]: row["state_group"] for row in rows}) + return {row["event_id"]: row["state_group"] for row in rows} def _get_state_for_group_using_cache(self, cache, group, state_filter): """Checks if group is in cache. See `_get_state_for_groups` @@ -997,7 +995,7 @@ def _get_state_for_groups(self, groups, state_filter=StateFilter.all()): incomplete_groups = incomplete_groups_m | incomplete_groups_nm if not incomplete_groups: - defer.returnValue(state) + return state cache_sequence_nm = self._state_group_cache.sequence cache_sequence_m = self._state_group_members_cache.sequence @@ -1024,7 +1022,7 @@ def _get_state_for_groups(self, groups, state_filter=StateFilter.all()): # everything we need from the database anyway. state[group] = state_filter.filter_state(group_state_dict) - defer.returnValue(state) + return state def _get_state_for_groups_using_cache(self, groups, cache, state_filter): """Gets the state at each of a list of state groups, optionally @@ -1498,7 +1496,7 @@ def reindex_txn(txn): self.STATE_GROUP_DEDUPLICATION_UPDATE_NAME ) - defer.returnValue(result * BATCH_SIZE_SCALE_FACTOR) + return result * BATCH_SIZE_SCALE_FACTOR @defer.inlineCallbacks def _background_index_state(self, progress, batch_size): @@ -1528,4 +1526,4 @@ def reindex_txn(conn): yield self._end_background_update(self.STATE_GROUP_INDEX_UPDATE_NAME) - defer.returnValue(1) + return 1 diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1cec84ee2eeb..e893b05ee76c 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -66,7 +66,7 @@ def _populate_stats_createtables(self, progress, batch_size): if not self.stats_enabled: yield self._end_background_update("populate_stats_createtables") - defer.returnValue(1) + return 1 # Get all the rooms that we want to process. def _make_staging_area(txn): @@ -120,7 +120,7 @@ def _make_staging_area(txn): self.get_earliest_token_for_room_stats.invalidate_all() yield self._end_background_update("populate_stats_createtables") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_stats_cleanup(self, progress, batch_size): @@ -129,7 +129,7 @@ def _populate_stats_cleanup(self, progress, batch_size): """ if not self.stats_enabled: yield self._end_background_update("populate_stats_cleanup") - defer.returnValue(1) + return 1 position = yield self._simple_select_one_onecol( TEMP_TABLE + "_position", None, "position" @@ -143,14 +143,14 @@ def _delete_staging_area(txn): yield self.runInteraction("populate_stats_cleanup", _delete_staging_area) yield self._end_background_update("populate_stats_cleanup") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_stats_process_rooms(self, progress, batch_size): if not self.stats_enabled: yield self._end_background_update("populate_stats_process_rooms") - defer.returnValue(1) + return 1 # If we don't have progress filed, delete everything. if not progress: @@ -186,7 +186,7 @@ def _get_next_batch(txn): # No more rooms -- complete the transaction. if not rooms_to_work_on: yield self._end_background_update("populate_stats_process_rooms") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d rooms of %d remaining", @@ -303,9 +303,9 @@ def _fetch_data(txn): if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. - defer.returnValue(processed_event_count) + return processed_event_count - defer.returnValue(processed_event_count) + return processed_event_count def delete_all_stats(self): """ diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index a0465484df5e..856c2ee8d8c8 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -300,7 +300,7 @@ def get_room_events_stream_for_rooms( ) if not room_ids: - defer.returnValue({}) + return {} results = {} room_ids = list(room_ids) @@ -323,7 +323,7 @@ def get_room_events_stream_for_rooms( ) results.update(dict(zip(rm_ids, res))) - defer.returnValue(results) + return results def get_rooms_that_changed(self, room_ids, from_key): """Given a list of rooms and a token, return rooms where there may have @@ -364,7 +364,7 @@ def get_room_events_stream_for_room( the chunk of events returned. """ if from_key == to_key: - defer.returnValue(([], from_key)) + return ([], from_key) from_id = RoomStreamToken.parse_stream_token(from_key).stream to_id = RoomStreamToken.parse_stream_token(to_key).stream @@ -374,7 +374,7 @@ def get_room_events_stream_for_room( ) if not has_changed: - defer.returnValue(([], from_key)) + return ([], from_key) def f(txn): sql = ( @@ -407,7 +407,7 @@ def f(txn): # get. key = from_key - defer.returnValue((ret, key)) + return (ret, key) @defer.inlineCallbacks def get_membership_changes_for_user(self, user_id, from_key, to_key): @@ -415,14 +415,14 @@ def get_membership_changes_for_user(self, user_id, from_key, to_key): to_id = RoomStreamToken.parse_stream_token(to_key).stream if from_key == to_key: - defer.returnValue([]) + return [] if from_id: has_changed = self._membership_stream_cache.has_entity_changed( user_id, int(from_id) ) if not has_changed: - defer.returnValue([]) + return [] def f(txn): sql = ( @@ -447,7 +447,7 @@ def f(txn): self._set_before_and_after(ret, rows, topo_order=False) - defer.returnValue(ret) + return ret @defer.inlineCallbacks def get_recent_events_for_room(self, room_id, limit, end_token): @@ -477,7 +477,7 @@ def get_recent_events_for_room(self, room_id, limit, end_token): self._set_before_and_after(events, rows) - defer.returnValue((events, token)) + return (events, token) @defer.inlineCallbacks def get_recent_event_ids_for_room(self, room_id, limit, end_token): @@ -496,7 +496,7 @@ def get_recent_event_ids_for_room(self, room_id, limit, end_token): """ # Allow a zero limit here, and no-op. if limit == 0: - defer.returnValue(([], end_token)) + return ([], end_token) end_token = RoomStreamToken.parse(end_token) @@ -511,7 +511,7 @@ def get_recent_event_ids_for_room(self, room_id, limit, end_token): # We want to return the results in ascending order. rows.reverse() - defer.returnValue((rows, token)) + return (rows, token) def get_room_event_after_stream_ordering(self, room_id, stream_ordering): """Gets details of the first event in a room at or after a stream ordering @@ -549,12 +549,12 @@ def get_room_events_max_id(self, room_id=None): """ token = yield self.get_room_max_stream_ordering() if room_id is None: - defer.returnValue("s%d" % (token,)) + return "s%d" % (token,) else: topo = yield self.runInteraction( "_get_max_topological_txn", self._get_max_topological_txn, room_id ) - defer.returnValue("t%d-%d" % (topo, token)) + return "t%d-%d" % (topo, token) def get_stream_token_for_event(self, event_id): """The stream token for an event @@ -674,14 +674,12 @@ def get_events_around( [e for e in results["after"]["event_ids"]], get_prev_content=True ) - defer.returnValue( - { - "events_before": events_before, - "events_after": events_after, - "start": results["before"]["token"], - "end": results["after"]["token"], - } - ) + return { + "events_before": events_before, + "events_after": events_after, + "start": results["before"]["token"], + "end": results["after"]["token"], + } def _get_events_around_txn( self, txn, room_id, event_id, before_limit, after_limit, event_filter @@ -785,7 +783,7 @@ def get_all_new_events_stream_txn(txn): events = yield self.get_events_as_list(event_ids) - defer.returnValue((upper_bound, events)) + return (upper_bound, events) def get_federation_out_pos(self, typ): return self._simple_select_one_onecol( @@ -939,7 +937,7 @@ def paginate_room_events( self._set_before_and_after(events, rows) - defer.returnValue((events, token)) + return (events, token) class StreamStore(StreamWorkerStore): diff --git a/synapse/storage/tags.py b/synapse/storage/tags.py index e88f8ea35f2e..20dd6bd53df2 100644 --- a/synapse/storage/tags.py +++ b/synapse/storage/tags.py @@ -66,7 +66,7 @@ def get_all_updated_tags(self, last_id, current_id, limit): room_id string, tag string and content string. """ if last_id == current_id: - defer.returnValue([]) + return [] def get_all_updated_tags_txn(txn): sql = ( @@ -107,7 +107,7 @@ def get_tag_content(txn, tag_ids): ) results.extend(tags) - defer.returnValue(results) + return results @defer.inlineCallbacks def get_updated_tags(self, user_id, stream_id): @@ -135,7 +135,7 @@ def get_updated_tags_txn(txn): user_id, int(stream_id) ) if not changed: - defer.returnValue({}) + return {} room_ids = yield self.runInteraction("get_updated_tags", get_updated_tags_txn) @@ -145,7 +145,7 @@ def get_updated_tags_txn(txn): for room_id in room_ids: results[room_id] = tags_by_room.get(room_id, {}) - defer.returnValue(results) + return results def get_tags_for_room(self, user_id, room_id): """Get all the tags for the given room @@ -194,7 +194,7 @@ def add_tag_txn(txn, next_id): self.get_tags_for_user.invalidate((user_id,)) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result @defer.inlineCallbacks def remove_tag_from_room(self, user_id, room_id, tag): @@ -217,7 +217,7 @@ def remove_tag_txn(txn, next_id): self.get_tags_for_user.invalidate((user_id,)) result = self._account_data_id_gen.get_current_token() - defer.returnValue(result) + return result def _update_revision_txn(self, txn, user_id, room_id, next_id): """Update the latest revision of the tags for the given user and room. diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index c585cf6cf79d..b3c3bf55bc75 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -147,7 +147,7 @@ def get_destination_retry_timings(self, destination): result = self._destination_retry_cache.get(destination, SENTINEL) if result is not SENTINEL: - defer.returnValue(result) + return result result = yield self.runInteraction( "get_destination_retry_timings", @@ -158,7 +158,7 @@ def get_destination_retry_timings(self, destination): # We don't hugely care about race conditions between getting and # invalidating the cache, since we time out fairly quickly anyway. self._destination_retry_cache[destination] = result - defer.returnValue(result) + return result def _get_destination_retry_timings(self, txn, destination): result = self._simple_select_one_txn( diff --git a/synapse/storage/user_directory.py b/synapse/storage/user_directory.py index 7fd16fe65e31..b5188d9bee03 100644 --- a/synapse/storage/user_directory.py +++ b/synapse/storage/user_directory.py @@ -109,7 +109,7 @@ def _make_staging_area(txn): yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos}) yield self._end_background_update("populate_user_directory_createtables") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_user_directory_cleanup(self, progress, batch_size): @@ -131,7 +131,7 @@ def _delete_staging_area(txn): ) yield self._end_background_update("populate_user_directory_cleanup") - defer.returnValue(1) + return 1 @defer.inlineCallbacks def _populate_user_directory_process_rooms(self, progress, batch_size): @@ -177,7 +177,7 @@ def _get_next_batch(txn): # No more rooms -- complete the transaction. if not rooms_to_work_on: yield self._end_background_update("populate_user_directory_process_rooms") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d rooms of %d remaining" @@ -257,9 +257,9 @@ def _get_next_batch(txn): if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. - defer.returnValue(processed_event_count) + return processed_event_count - defer.returnValue(processed_event_count) + return processed_event_count @defer.inlineCallbacks def _populate_user_directory_process_users(self, progress, batch_size): @@ -268,7 +268,7 @@ def _populate_user_directory_process_users(self, progress, batch_size): """ if not self.hs.config.user_directory_search_all_users: yield self._end_background_update("populate_user_directory_process_users") - defer.returnValue(1) + return 1 def _get_next_batch(txn): sql = "SELECT user_id FROM %s LIMIT %s" % ( @@ -298,7 +298,7 @@ def _get_next_batch(txn): # No more users -- complete the transaction. if not users_to_work_on: yield self._end_background_update("populate_user_directory_process_users") - defer.returnValue(1) + return 1 logger.info( "Processing the next %d users of %d remaining" @@ -322,7 +322,7 @@ def _get_next_batch(txn): progress, ) - defer.returnValue(len(users_to_work_on)) + return len(users_to_work_on) @defer.inlineCallbacks def is_room_world_readable_or_publicly_joinable(self, room_id): @@ -344,16 +344,16 @@ def is_room_world_readable_or_publicly_joinable(self, room_id): join_rule_ev = yield self.get_event(join_rules_id, allow_none=True) if join_rule_ev: if join_rule_ev.content.get("join_rule") == JoinRules.PUBLIC: - defer.returnValue(True) + return True hist_vis_id = current_state_ids.get((EventTypes.RoomHistoryVisibility, "")) if hist_vis_id: hist_vis_ev = yield self.get_event(hist_vis_id, allow_none=True) if hist_vis_ev: if hist_vis_ev.content.get("history_visibility") == "world_readable": - defer.returnValue(True) + return True - defer.returnValue(False) + return False def update_profile_in_user_dir(self, user_id, display_name, avatar_url): """ @@ -499,7 +499,7 @@ def get_users_in_dir_due_to_room(self, room_id): user_ids = set(user_ids_share_pub) user_ids.update(user_ids_share_priv) - defer.returnValue(user_ids) + return user_ids def add_users_who_share_private_room(self, room_id, user_id_tuples): """Insert entries into the users_who_share_private_rooms table. The first @@ -609,7 +609,7 @@ def get_user_dir_rooms_user_is_in(self, user_id): users = set(pub_rows) users.update(rows) - defer.returnValue(list(users)) + return list(users) @defer.inlineCallbacks def get_rooms_in_common_for_users(self, user_id, other_user_id): @@ -635,7 +635,7 @@ def get_rooms_in_common_for_users(self, user_id, other_user_id): "get_rooms_in_common_for_users", None, sql, user_id, other_user_id ) - defer.returnValue([room_id for room_id, in rows]) + return [room_id for room_id, in rows] def delete_all_from_user_dir(self): """Delete the entire user directory @@ -782,7 +782,7 @@ def search_user_dir(self, user_id, search_term, limit): limited = len(results) > limit - defer.returnValue({"limited": limited, "results": results}) + return {"limited": limited, "results": results} def _parse_query_sqlite(search_term): diff --git a/synapse/storage/user_erasure_store.py b/synapse/storage/user_erasure_store.py index 1815fdc0ddf8..05cabc228224 100644 --- a/synapse/storage/user_erasure_store.py +++ b/synapse/storage/user_erasure_store.py @@ -12,9 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import operator -from twisted.internet import defer +import operator from synapse.storage._base import SQLBaseStore from synapse.util.caches.descriptors import cached, cachedList @@ -67,7 +66,7 @@ def _get_erased_users(txn): erased_users = yield self.runInteraction("are_users_erased", _get_erased_users) res = dict((u, u in erased_users) for u in user_ids) - defer.returnValue(res) + return res class UserErasureStore(UserErasureWorkerStore): diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 488c49747ad4..b91fb2db7b45 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -56,7 +56,7 @@ def get_current_token(self): device_list_key=device_list_key, groups_key=groups_key, ) - defer.returnValue(token) + return token @defer.inlineCallbacks def get_current_token_for_pagination(self): @@ -80,4 +80,4 @@ def get_current_token_for_pagination(self): device_list_key=0, groups_key=0, ) - defer.returnValue(token) + return token diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index f506b2a695f6..841625a99176 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -49,7 +49,7 @@ def sleep(self, seconds): with context.PreserveLoggingContext(): self._reactor.callLater(seconds, d.callback, seconds) res = yield d - defer.returnValue(res) + return res def time(self): """Returns the current system time in seconds since epoch.""" diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 58a6b8764f10..f1c46836b1cd 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -366,7 +366,7 @@ def _ctx_manager(): new_defer.callback(None) self.key_to_current_readers.get(key, set()).discard(new_defer) - defer.returnValue(_ctx_manager()) + return _ctx_manager() @defer.inlineCallbacks def write(self, key): @@ -396,7 +396,7 @@ def _ctx_manager(): if self.key_to_current_writer[key] == new_defer: self.key_to_current_writer.pop(key) - defer.returnValue(_ctx_manager()) + return _ctx_manager() def _cancelled_to_timed_out_error(value, timeout): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 675db2f4483c..a1acacbde937 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -289,7 +289,7 @@ class CacheDescriptor(_CacheDescriptorBase): def foo(self, key, cache_context): r1 = yield self.bar1(key, on_invalidate=cache_context.invalidate) r2 = yield self.bar2(key, on_invalidate=cache_context.invalidate) - defer.returnValue(r1 + r2) + return r1 + r2 Args: num_args (int): number of positional arguments (excluding ``self`` and diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index d6908e169d45..82d3eefe0e43 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -121,7 +121,7 @@ def wrap(self, key, callback, *args, **kwargs): @defer.inlineCallbacks def handle_request(request): # etc - defer.returnValue(result) + return result result = yield response_cache.wrap( key, diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index c30b6de19c98..0910930c2117 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -67,7 +67,7 @@ def wrapper(func): def measured_func(self, *args, **kwargs): with Measure(self.clock, name): r = yield func(self, *args, **kwargs) - defer.returnValue(r) + return r return measured_func diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index d8d0ceae5173..0862b5ca5aaa 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -95,15 +95,13 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs) # maximum backoff even though it might only have been down briefly backoff_on_failure = not ignore_backoff - defer.returnValue( - RetryDestinationLimiter( - destination, - clock, - store, - retry_interval, - backoff_on_failure=backoff_on_failure, - **kwargs - ) + return RetryDestinationLimiter( + destination, + clock, + store, + retry_interval, + backoff_on_failure=backoff_on_failure, + **kwargs ) diff --git a/synapse/visibility.py b/synapse/visibility.py index 2a11c8359699..bf0f1eebd8ca 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -208,7 +208,7 @@ def allowed(event): filtered_events = filter(operator.truth, filtered_events) # we turn it into a list before returning it. - defer.returnValue(list(filtered_events)) + return list(filtered_events) @defer.inlineCallbacks @@ -317,11 +317,11 @@ def check_event_is_visible(event, state): elif redact: to_return.append(prune_event(e)) - defer.returnValue(to_return) + return to_return # If there are no erased users then we can just return the given list # of events without having to copy it. - defer.returnValue(events) + return events # Ok, so we're dealing with events that have non-trivial visibility # rules, so we need to also get the memberships of the room. @@ -384,4 +384,4 @@ def include(typ, state_key): elif redact: to_return.append(prune_event(e)) - defer.returnValue(to_return) + return to_return diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 8d94a503d690..c4f0bbd3dd8a 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -107,7 +107,7 @@ def get_perspectives(**kwargs): self.assertEquals(LoggingContext.current_context().request, "11") with PreserveLoggingContext(): yield persp_deferred - defer.returnValue(persp_resp) + return persp_resp self.http_client.post_json.side_effect = get_perspectives @@ -554,7 +554,7 @@ def run_in_context(f, *args, **kwargs): # logs. ctx.request = "testctx" rv = yield f(*args, **kwargs) - defer.returnValue(rv) + return rv def _verify_json_for_server(kr, *args): @@ -565,6 +565,6 @@ def _verify_json_for_server(kr, *args): @defer.inlineCallbacks def v(): rv1 = yield kr.verify_json_for_server(*args) - defer.returnValue(rv1) + return rv1 return run_in_context(v) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 90d012937404..99dce45cfee5 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -283,4 +283,4 @@ def get_or_create_user(self, requester, localpart, displayname, password_hash=No user, requester, displayname, by_admin=True ) - defer.returnValue((user_id, token)) + return (user_id, token) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index a49f9b322479..b906686b495c 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -145,7 +145,7 @@ def _make_get_request(self, uri): try: fetch_res = yield fetch_d - defer.returnValue(fetch_res) + return fetch_res except Exception as e: logger.info("Fetch of %s failed: %s", uri.decode("ascii"), e) raise @@ -936,7 +936,7 @@ def do_get_well_known(self, serv): except Exception as e: logger.warning("Error fetching well-known: %s", e) raise - defer.returnValue(result) + return result def test_well_known_cache(self): self.reactor.lookups["testserv"] = "1.2.3.4" diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 65b51dc981b5..3b885ef64bde 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -61,7 +61,7 @@ def do_lookup(): # should have restored our context self.assertIs(LoggingContext.current_context(), ctx) - defer.returnValue(result) + return result test_d = do_lookup() self.assertNoResult(test_d) diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index b9d6d7ad1c1f..2b01f40a428e 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -68,7 +68,7 @@ def do_request(): try: fetch_res = yield fetch_d - defer.returnValue(fetch_res) + return fetch_res finally: check_logcontext(context) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index a8adc9a61d1b..a3d7e3c0466f 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -46,7 +46,7 @@ def test_logcontexts_with_async_result(self): @defer.inlineCallbacks def cb(): yield Clock(reactor).sleep(0) - defer.returnValue("yay") + return "yay" @defer.inlineCallbacks def test(): diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index fbb930269431..9fabe3fbc009 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -43,7 +43,7 @@ def update(progress, count): "test_update", progress, ) - defer.returnValue(count) + return count self.update_handler.side_effect = update @@ -60,7 +60,7 @@ def update(progress, count): @defer.inlineCallbacks def update(progress, count): yield self.store._end_background_update("test_update") - defer.returnValue(count) + return count self.update_handler.side_effect = update self.update_handler.reset_mock() diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 732a778fabca..1cb471205be8 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -69,7 +69,7 @@ def inject_room_member( yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_message(self, room, user, body): @@ -92,7 +92,7 @@ def inject_message(self, room, user, body): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_redaction(self, room, event_id, user, reason): diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 73ed943f5a39..c6e8196b91f7 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -67,7 +67,7 @@ def inject_room_member(self, room, user, membership, replaces_state=None): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def test_one_member(self): diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 212a7ae765d9..5c2cf3c2db98 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -65,7 +65,7 @@ def inject_state_event(self, room, sender, typ, state_key, content): yield self.store.persist_event(event, context) - defer.returnValue(event) + return event def assertStateMapEqual(self, s1, s2): for t in s1: diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 118c3bd238bd..e0605dac2ffd 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -139,7 +139,7 @@ def inject_visibility(self, user_id, visibility): builder ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_room_member(self, user_id, membership="join", extra_content={}): @@ -161,7 +161,7 @@ def inject_room_member(self, user_id, membership="join", extra_content={}): ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def inject_message(self, user_id, content=None): @@ -182,7 +182,7 @@ def inject_message(self, user_id, content=None): ) yield self.hs.get_datastore().persist_event(event, context) - defer.returnValue(event) + return event @defer.inlineCallbacks def test_large_room(self): diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 7807328e2fb1..56320bbaf986 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -159,7 +159,7 @@ def fn(self, arg1): def inner_fn(): with PreserveLoggingContext(): yield complete_lookup - defer.returnValue(1) + return 1 return inner_fn() @@ -169,7 +169,7 @@ def do_lookup(): c1.name = "c1" r = yield obj.fn(1) self.assertEqual(LoggingContext.current_context(), c1) - defer.returnValue(r) + return r def check_result(r): self.assertEqual(r, 1) @@ -286,7 +286,7 @@ def list_fn(self, args1, arg2): # we want this to behave like an asynchronous function yield run_on_reactor() assert LoggingContext.current_context().request == "c1" - defer.returnValue(self.mock(args1, arg2)) + return self.mock(args1, arg2) with LoggingContext() as c1: c1.request = "c1" @@ -334,7 +334,7 @@ def fn(self, arg1, arg2): def list_fn(self, args1, arg2): # we want this to behave like an asynchronous function yield run_on_reactor() - defer.returnValue(self.mock(args1, arg2)) + return self.mock(args1, arg2) obj = Cls() invalidate0 = mock.Mock() diff --git a/tests/utils.py b/tests/utils.py index 8a94ce0b475d..425e3387db49 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -361,7 +361,7 @@ def cleanup(): if fed: register_federation_servlets(hs, fed) - defer.returnValue(hs) + return hs def register_federation_servlets(hs, resource): @@ -465,9 +465,9 @@ def trigger( args = [urlparse.unquote(u) for u in matcher.groups()] (code, response) = yield func(mock_request, *args) - defer.returnValue((code, response)) + return (code, response) except CodeMessageException as e: - defer.returnValue((e.code, cs_error(e.msg, code=e.errcode))) + return (e.code, cs_error(e.msg, code=e.errcode)) raise KeyError("No event can handle %s" % path) From 65afc535a6a3fd61ea91c99ed2284b6fd0c4e204 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 15:14:21 +0100 Subject: [PATCH 043/136] Update changelog.d/5743.bugfix Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5743.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5743.bugfix b/changelog.d/5743.bugfix index a160e9945f48..65728ff079fb 100644 --- a/changelog.d/5743.bugfix +++ b/changelog.d/5743.bugfix @@ -1 +1 @@ -Log when we receive receipt from a different origin. +Log when we receive an event receipt from an unexpected origin. From 3641784e8c63e1ac0deaa99519d53c0bf2853993 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Tue, 23 Jul 2019 15:46:04 +0100 Subject: [PATCH 044/136] Make Jaeger fully configurable (#5694) * Allow Jaeger to be configured * Update sample config --- changelog.d/5694.misc | 1 + docs/sample_config.yaml | 16 ++++++++++++++++ synapse/config/tracer.py | 22 ++++++++++++++++++++++ synapse/logging/opentracing.py | 11 +++++++---- 4 files changed, 46 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5694.misc diff --git a/changelog.d/5694.misc b/changelog.d/5694.misc new file mode 100644 index 000000000000..3b12dcc849df --- /dev/null +++ b/changelog.d/5694.misc @@ -0,0 +1 @@ +Make Jaeger fully configurable. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 0a96197ca65e..7edf15207afc 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1430,3 +1430,19 @@ opentracing: # #homeserver_whitelist: # - ".*" + + # Jaeger can be configured to sample traces at different rates. + # All configuration options provided by Jaeger can be set here. + # Jaeger's configuration mostly related to trace sampling which + # is documented here: + # https://www.jaegertracing.io/docs/1.13/sampling/. + # + #jaeger_config: + # sampler: + # type: const + # param: 1 + + # Logging whether spans were started and reported + # + # logging: + # false diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 4479454415d4..95e7ccb3a3b1 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -23,6 +23,12 @@ def read_config(self, config, **kwargs): opentracing_config = {} self.opentracer_enabled = opentracing_config.get("enabled", False) + + self.jaeger_config = opentracing_config.get( + "jaeger_config", + {"sampler": {"type": "const", "param": 1}, "logging": False}, + ) + if not self.opentracer_enabled: return @@ -56,4 +62,20 @@ def generate_config_section(cls, **kwargs): # #homeserver_whitelist: # - ".*" + + # Jaeger can be configured to sample traces at different rates. + # All configuration options provided by Jaeger can be set here. + # Jaeger's configuration mostly related to trace sampling which + # is documented here: + # https://www.jaegertracing.io/docs/1.13/sampling/. + # + #jaeger_config: + # sampler: + # type: const + # param: 1 + + # Logging whether spans were started and reported + # + # logging: + # false """ diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index fb338ca223d0..d2c209c471fa 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -228,13 +228,16 @@ def init_tracer(config): # Include the worker name name = config.worker_name if config.worker_name else "master" + # Pull out the jaeger config if it was given. Otherwise set it to something sensible. + # See https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/config.py + set_homeserver_whitelist(config.opentracer_whitelist) - jaeger_config = JaegerConfig( - config={"sampler": {"type": "const", "param": 1}, "logging": True}, + + JaegerConfig( + config=config.jaeger_config, service_name="{} {}".format(config.server_name, name), scope_manager=LogContextScopeManager(config), - ) - jaeger_config.initialize_tracer() + ).initialize_tracer() # Set up tags to be opentracing's tags global tags From 73bbaf2bc6962df2c25443cbc70286318601af5a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 16:55:45 +0100 Subject: [PATCH 045/136] Add unit test for current state membership bg update --- tests/storage/test_roommember.py | 37 +++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 73ed943f5a39..b04be921f48d 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -20,7 +20,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions -from synapse.types import RoomID, UserID +from synapse.types import Requester, RoomID, UserID from tests import unittest from tests.utils import create_room, setup_test_homeserver @@ -84,3 +84,38 @@ def test_one_member(self): ) ], ) + + +class CurrentStateMembershipUpdateTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + self.room_creator = homeserver.get_room_creation_handler() + + def test_can_rerun_update(self): + # First make sure we have completed all updates. + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + # Now let's create a room, which will insert a membership + user = UserID("alice", "test") + requester = Requester(user, None, False, None, None) + self.get_success(self.room_creator.create_room(requester, {})) + + # Register the background update to run again. + self.get_success( + self.store._simple_insert( + table="background_updates", + values={ + "update_name": "current_state_events_membership", + "progress_json": "{}", + "depends_on": None, + }, + ) + ) + + # ... and tell the DataStore that it hasn't finished all updates yet + self.store._all_done = False + + # Now let's actually drive the updates to completion + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) From adcd5368b0f58bc457b25e5af993c89f8daa9300 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 23 Jul 2019 16:58:13 +0100 Subject: [PATCH 046/136] Newsfile --- changelog.d/5746.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5746.misc diff --git a/changelog.d/5746.misc b/changelog.d/5746.misc new file mode 100644 index 000000000000..5e15dfd5faa7 --- /dev/null +++ b/changelog.d/5746.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From 418635e68a127da24bf887adcbf19d3110c57630 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Jul 2019 11:33:13 +0100 Subject: [PATCH 047/136] Add a prometheus metric for active cache lookups. (#5750) * Add a prometheus metric for active cache lookups. * changelog --- changelog.d/5750.misc | 1 + synapse/util/caches/__init__.py | 17 ++++++++++++++++- synapse/util/caches/descriptors.py | 18 +++++++++++++++++- 3 files changed, 34 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5750.misc diff --git a/changelog.d/5750.misc b/changelog.d/5750.misc new file mode 100644 index 000000000000..6beaa460a5a7 --- /dev/null +++ b/changelog.d/5750.misc @@ -0,0 +1 @@ +Add a prometheus metric for pending cache lookups. \ No newline at end of file diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 8271229015b3..b50e3503f031 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,7 +52,19 @@ def get_cache_factor_for(cache_name): response_cache_total = Gauge("synapse_util_caches_response_cache:total", "", ["name"]) -def register_cache(cache_type, cache_name, cache): +def register_cache(cache_type, cache_name, cache, collect_callback=None): + """Register a cache object for metric collection. + + Args: + cache_type (str): + cache_name (str): name of the cache + cache (object): cache itself + collect_callback (callable|None): if not None, a function which is called during + metric collection to update additional metrics. + + Returns: + CacheMetric: an object which provides inc_{hits,misses,evictions} methods + """ # Check if the metric is already registered. Unregister it, if so. # This usually happens during tests, as at runtime these caches are @@ -90,6 +103,8 @@ def collect(self): cache_hits.labels(cache_name).set(self.hits) cache_evicted.labels(cache_name).set(self.evicted_size) cache_total.labels(cache_name).set(self.hits + self.misses) + if collect_callback: + collect_callback() except Exception as e: logger.warn("Error calculating metrics for %s: %s", cache_name, e) raise diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index a1acacbde937..7e69cf55fbb4 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -22,6 +22,8 @@ import six from six import itervalues, string_types +from prometheus_client import Gauge + from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable, preserve_fn @@ -37,6 +39,12 @@ logger = logging.getLogger(__name__) +cache_pending_metric = Gauge( + "synapse_util_caches_cache_pending", + "Number of lookups currently pending for this cache", + ["name"], +) + _CacheSentinel = object() @@ -82,11 +90,19 @@ def __init__(self, name, max_entries=1000, keylen=1, tree=False, iterable=False) self.name = name self.keylen = keylen self.thread = None - self.metrics = register_cache("cache", name, self.cache) + self.metrics = register_cache( + "cache", + name, + self.cache, + collect_callback=self._metrics_collection_callback, + ) def _on_evicted(self, evicted_count): self.metrics.inc_evictions(evicted_count) + def _metrics_collection_callback(self): + cache_pending_metric.labels(self.name).set(len(self._pending_deferred_cache)) + def check_thread(self): expected_thread = self.thread if expected_thread is None: From 32768e96d44d0f3febae8c372e8c1569ea31788e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2019 11:37:43 +0100 Subject: [PATCH 048/136] Add function to get all forgotten rooms for user This will allow us to efficiently filter out rooms that have been forgotten in other queries without having to join against the `room_memberships` table. --- synapse/storage/roommember.py | 43 +++++++++++++++++++ .../schema/delta/56/room_membership_idx.sql | 25 +++++++++++ 2 files changed, 68 insertions(+) create mode 100644 synapse/storage/schema/delta/56/room_membership_idx.sql diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index b3c002b9eb92..bc77705e972d 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -639,6 +639,39 @@ def f(txn): count = yield self.runInteraction("did_forget_membership", f) defer.returnValue(count == 0) + @cached() + def get_forgotten_rooms_for_user(self, user_id): + """Gets all rooms the user has forgotten. + + Args: + user_id (str) + + Returns: + Deferred[set[str]] + """ + + def _get_forgotten_rooms_for_user_txn(txn): + # This is a slightly convoluted query that first looks up all rooms + # that the user has forgotten in the past, then rechecks that list + # to see if any have subsequently been updated. This is done so that + # we can use a partial index on `forgotten = 1` on the assumption + # that few users will actually forget many rooms. + sql = """ + SELECT room_id, ( + SELECT count(*) FROM room_memberships + WHERE room_id = m.room_id AND user_id = m.user_id AND forgotten = 0 + ) AS count + FROM room_memberships AS m + WHERE user_id = ? AND forgotten = 1 + GROUP BY room_id, user_id; + """ + txn.execute(sql, (user_id,)) + return set(row[0] for row in txn if row[1] == 0) + + return self.runInteraction( + "get_forgotten_rooms_for_user", _get_forgotten_rooms_for_user_txn + ) + @defer.inlineCallbacks def get_rooms_user_has_been_in(self, user_id): """Get all rooms that the user has ever been in. @@ -670,6 +703,13 @@ def __init__(self, db_conn, hs): _CURRENT_STATE_MEMBERSHIP_UPDATE_NAME, self._background_current_state_membership, ) + self.register_background_index_update( + "room_membership_forgotten_idx", + index_name="room_memberships_user_room_forgotten", + table="room_memberships", + columns=["user_id", "room_id"], + where_clause="forgotten = 1", + ) def _store_room_members_txn(self, txn, events, backfilled): """Store a room member in the database. @@ -771,6 +811,9 @@ def f(txn): txn.execute(sql, (user_id, room_id)) self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) + self._invalidate_cache_and_stream( + txn, self.get_forgotten_rooms_for_user, (user_id,) + ) return self.runInteraction("forget_membership", f) diff --git a/synapse/storage/schema/delta/56/room_membership_idx.sql b/synapse/storage/schema/delta/56/room_membership_idx.sql new file mode 100644 index 000000000000..fc0b49884328 --- /dev/null +++ b/synapse/storage/schema/delta/56/room_membership_idx.sql @@ -0,0 +1,25 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- We add membership to current state so that we don't need to join against +-- room_memberships, which can be surprisingly costly (we do such queries +-- very frequently). +-- This will be null for non-membership events and the content.membership key +-- for membership events. (Will also be null for membership events until the +-- background update job has finished). + +-- Adds an index on room_memberships for fetching all forgotten rooms for a user +INSERT INTO background_updates (update_name, progress_json) VALUES + ('room_membership_forgotten_idx', '{}'); From 62921fb53e773c0510aacf36345c5301b4688088 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2019 11:45:58 +0100 Subject: [PATCH 049/136] Remove join on room_memberships when fetching rooms for user. --- synapse/storage/roommember.py | 58 ++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 22 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index bc77705e972d..7852d3866aef 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -256,28 +256,35 @@ def get_invite_for_user_in_room(self, user_id, room_id): defer.returnValue(invite) defer.returnValue(None) + @defer.inlineCallbacks def get_rooms_for_user_where_membership_is(self, user_id, membership_list): """ Get all the rooms for this user where the membership for this user matches one in the membership list. + Filters out forgotten rooms. + Args: user_id (str): The user ID. membership_list (list): A list of synapse.api.constants.Membership values which the user must be in. + Returns: - A list of dictionary objects, with room_id, membership and sender - defined. + Deferred[list[RoomsForUser]] """ if not membership_list: return defer.succeed(None) - return self.runInteraction( + rooms = yield self.runInteraction( "get_rooms_for_user_where_membership_is", self._get_rooms_for_user_where_membership_is_txn, user_id, membership_list, ) + # Now we filter out forgotten rooms + forgotten_rooms = yield self.get_forgotten_rooms_for_user(user_id) + return [room for room in rooms if room.room_id not in forgotten_rooms] + def _get_rooms_for_user_where_membership_is_txn( self, txn, user_id, membership_list ): @@ -287,26 +294,33 @@ def _get_rooms_for_user_where_membership_is_txn( results = [] if membership_list: - where_clause = "user_id = ? AND (%s) AND forgotten = 0" % ( - " OR ".join(["m.membership = ?" for _ in membership_list]), - ) - - args = [user_id] - args.extend(membership_list) + if self._current_state_events_membership_up_to_date: + sql = """ + SELECT room_id, e.sender, c.membership, event_id, e.stream_ordering + FROM current_state_events AS c + INNER JOIN events AS e USING (room_id, event_id) + WHERE + c.type = 'm.room.member' + AND state_key = ? + AND c.membership IN (%s) + """ % ( + ",".join("?" * len(membership_list)) + ) + else: + sql = """ + SELECT room_id, e.sender, m.membership, event_id, e.stream_ordering + FROM current_state_events AS c + INNER JOIN room_memberships AS m USING (room_id, event_id) + INNER JOIN events AS e USING (room_id, event_id) + WHERE + c.type = 'm.room.member' + AND state_key = ? + AND m.membership IN (%s) + """ % ( + ",".join("?" * len(membership_list)) + ) - sql = ( - "SELECT m.room_id, m.sender, m.membership, m.event_id, e.stream_ordering" - " FROM current_state_events as c" - " INNER JOIN room_memberships as m" - " ON m.event_id = c.event_id" - " INNER JOIN events as e" - " ON e.event_id = c.event_id" - " AND m.room_id = c.room_id" - " AND m.user_id = c.state_key" - " WHERE c.type = 'm.room.member' AND %s" - ) % (where_clause,) - - txn.execute(sql, args) + txn.execute(sql, (user_id, *membership_list)) results = [RoomsForUser(**r) for r in self.cursor_to_dict(txn)] if do_invite: From 0c4a99607e839c5a363351238a6a555015c8ddfc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2019 11:49:15 +0100 Subject: [PATCH 050/136] Remove join when calculating room summaries. --- synapse/storage/roommember.py | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 7852d3866aef..bfb834ccca76 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -179,19 +179,27 @@ def _get_room_summary_txn(txn): # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent - sql = """ - SELECT m.user_id, m.membership, m.event_id - FROM room_memberships as m - INNER JOIN current_state_events as c - ON m.event_id = c.event_id - AND m.room_id = c.room_id - AND m.user_id = c.state_key - WHERE c.type = 'm.room.member' AND c.room_id = ? - ORDER BY - CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, - m.event_id ASC - LIMIT ? - """ + if self._current_state_events_membership_up_to_date: + sql = """ + SELECT state_key, membership, event_id + FROM current_state_events + WHERE type = 'm.room.member' AND room_id = ? + ORDER BY + CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, + event_id ASC + LIMIT ? + """ + else: + sql = """ + SELECT c.state_key, m.membership, c.event_id + FROM room_memberships as m + INNER JOIN current_state_events as c USING (room_id, event_id) + WHERE c.type = 'm.room.member' AND c.room_id = ? + ORDER BY + CASE m.membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, + c.event_id ASC + LIMIT ? + """ # 6 is 5 (number of heroes) plus 1, in case one of them is the calling user. txn.execute(sql, (room_id, Membership.JOIN, Membership.INVITE, 6)) From c1598030671ee39364863fef579d041989bf852e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jul 2019 11:51:44 +0100 Subject: [PATCH 051/136] Newsfile --- changelog.d/5752.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5752.misc diff --git a/changelog.d/5752.misc b/changelog.d/5752.misc new file mode 100644 index 000000000000..5e15dfd5faa7 --- /dev/null +++ b/changelog.d/5752.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From f30a71a67b6605cb0f09975af3befc61090326bd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 24 Jul 2019 13:16:18 +0100 Subject: [PATCH 052/136] Stop trying to fetch events with event_id=None. (#5753) `None` is not a valid event id, so queuing up a database fetch for it seems like a silly thing to do. I considered making `get_event` return `None` if `event_id is None`, but then its interaction with `allow_none` seemed uninituitive, and strong typing ftw. --- changelog.d/5753.misc | 1 + synapse/handlers/message.py | 8 +++++++- synapse/storage/events_worker.py | 5 ++++- synapse/storage/stats.py | 20 +++++++++++--------- 4 files changed, 23 insertions(+), 11 deletions(-) create mode 100644 changelog.d/5753.misc diff --git a/changelog.d/5753.misc b/changelog.d/5753.misc new file mode 100644 index 000000000000..22bba9ce3c05 --- /dev/null +++ b/changelog.d/5753.misc @@ -0,0 +1 @@ +Stop trying to fetch events with event_id=None. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8b27e23378c9..e951c39fa7e9 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -378,7 +378,11 @@ def create_event( # tolerate them in event_auth.check(). prev_state_ids = yield context.get_prev_state_ids(self.store) prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender)) - prev_event = yield self.store.get_event(prev_event_id, allow_none=True) + prev_event = ( + yield self.store.get_event(prev_event_id, allow_none=True) + if prev_event_id + else None + ) if not prev_event or prev_event.membership != Membership.JOIN: logger.warning( ( @@ -521,6 +525,8 @@ def deduplicate_state_event(self, event, context): """ prev_state_ids = yield context.get_prev_state_ids(self.store) prev_event_id = prev_state_ids.get((event.type, event.state_key)) + if not prev_event_id: + return prev_event = yield self.store.get_event(prev_event_id, allow_none=True) if not prev_event: return diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 44441957dbdf..83fe4764d87b 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -139,8 +139,11 @@ def get_event( If there is a mismatch, behave as per allow_none. Returns: - Deferred : A FrozenEvent. + Deferred[EventBase|None] """ + if not isinstance(event_id, str): + raise TypeError("Invalid event event_id %r" % (event_id,)) + events = yield self.get_events_as_list( [event_id], check_redacted=check_redacted, diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index e893b05ee76c..e13efed417bd 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -211,16 +211,18 @@ def _get_next_batch(txn): avatar_id = current_state_ids.get((EventTypes.RoomAvatar, "")) canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, "")) + event_ids = [ + join_rules_id, + history_visibility_id, + encryption_id, + name_id, + topic_id, + avatar_id, + canonical_alias_id, + ] + state_events = yield self.get_events( - [ - join_rules_id, - history_visibility_id, - encryption_id, - name_id, - topic_id, - avatar_id, - canonical_alias_id, - ] + [ev for ev in event_ids if ev is not None] ) def _get_or_none(event_id, arg): From 618bd1ee76a83bd29beb208e9b7097ffcd787099 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 25 Jul 2019 15:59:45 +0100 Subject: [PATCH 053/136] Fix some error cases in the caching layer. (#5749) There was some inconsistent behaviour in the caching layer around how exceptions were handled - particularly synchronously-thrown ones. This seems to be most easily handled by pushing the creation of ObservableDeferreds down from CacheDescriptor to the Cache. --- changelog.d/5749.misc | 1 + synapse/util/caches/descriptors.py | 74 ++++++++++++---------- tests/util/caches/test_descriptors.py | 90 ++++++++++++++++++++++++++- 3 files changed, 130 insertions(+), 35 deletions(-) create mode 100644 changelog.d/5749.misc diff --git a/changelog.d/5749.misc b/changelog.d/5749.misc new file mode 100644 index 000000000000..48dd61f4617d --- /dev/null +++ b/changelog.d/5749.misc @@ -0,0 +1 @@ +Fix some error cases in the caching layer. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 7e69cf55fbb4..43f66ec4beb6 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -19,8 +19,7 @@ import threading from collections import namedtuple -import six -from six import itervalues, string_types +from six import itervalues from prometheus_client import Gauge @@ -32,7 +31,6 @@ from synapse.util.caches import get_cache_factor_for from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry -from synapse.util.stringutils import to_ascii from . import register_cache @@ -124,7 +122,7 @@ def get(self, key, default=_CacheSentinel, callback=None, update_metrics=True): update_metrics (bool): whether to update the cache hit rate metrics Returns: - Either a Deferred or the raw result + Either an ObservableDeferred or the raw result """ callbacks = [callback] if callback else [] val = self._pending_deferred_cache.get(key, _CacheSentinel) @@ -148,9 +146,14 @@ def get(self, key, default=_CacheSentinel, callback=None, update_metrics=True): return default def set(self, key, value, callback=None): + if not isinstance(value, defer.Deferred): + raise TypeError("not a Deferred") + callbacks = [callback] if callback else [] self.check_thread() - entry = CacheEntry(deferred=value, callbacks=callbacks) + observable = ObservableDeferred(value, consumeErrors=True) + observer = defer.maybeDeferred(observable.observe) + entry = CacheEntry(deferred=observable, callbacks=callbacks) existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry: @@ -158,20 +161,31 @@ def set(self, key, value, callback=None): self._pending_deferred_cache[key] = entry - def shuffle(result): + def compare_and_pop(): + """Check if our entry is still the one in _pending_deferred_cache, and + if so, pop it. + + Returns true if the entries matched. + """ existing_entry = self._pending_deferred_cache.pop(key, None) if existing_entry is entry: + return True + + # oops, the _pending_deferred_cache has been updated since + # we started our query, so we are out of date. + # + # Better put back whatever we took out. (We do it this way + # round, rather than peeking into the _pending_deferred_cache + # and then removing on a match, to make the common case faster) + if existing_entry is not None: + self._pending_deferred_cache[key] = existing_entry + + return False + + def cb(result): + if compare_and_pop(): self.cache.set(key, result, entry.callbacks) else: - # oops, the _pending_deferred_cache has been updated since - # we started our query, so we are out of date. - # - # Better put back whatever we took out. (We do it this way - # round, rather than peeking into the _pending_deferred_cache - # and then removing on a match, to make the common case faster) - if existing_entry is not None: - self._pending_deferred_cache[key] = existing_entry - # we're not going to put this entry into the cache, so need # to make sure that the invalidation callbacks are called. # That was probably done when _pending_deferred_cache was @@ -179,9 +193,16 @@ def shuffle(result): # `invalidate` being previously called, in which case it may # not have been. Either way, let's double-check now. entry.invalidate() - return result - entry.deferred.addCallback(shuffle) + def eb(_fail): + compare_and_pop() + entry.invalidate() + + # once the deferred completes, we can move the entry from the + # _pending_deferred_cache to the real cache. + # + observer.addCallbacks(cb, eb) + return observable def prefill(self, key, value, callback=None): callbacks = [callback] if callback else [] @@ -414,20 +435,10 @@ def onErr(f): ret.addErrback(onErr) - # If our cache_key is a string on py2, try to convert to ascii - # to save a bit of space in large caches. Py3 does this - # internally automatically. - if six.PY2 and isinstance(cache_key, string_types): - cache_key = to_ascii(cache_key) - - result_d = ObservableDeferred(ret, consumeErrors=True) - cache.set(cache_key, result_d, callback=invalidate_callback) + result_d = cache.set(cache_key, ret, callback=invalidate_callback) observer = result_d.observe() - if isinstance(observer, defer.Deferred): - return make_deferred_yieldable(observer) - else: - return observer + return make_deferred_yieldable(observer) if self.num_args == 1: wrapped.invalidate = lambda key: cache.invalidate(key[0]) @@ -543,7 +554,7 @@ def arg_to_cache_key(arg): missing.add(arg) if missing: - # we need an observable deferred for each entry in the list, + # we need a deferred for each entry in the list, # which we put in the cache. Each deferred resolves with the # relevant result for that key. deferreds_map = {} @@ -551,8 +562,7 @@ def arg_to_cache_key(arg): deferred = defer.Deferred() deferreds_map[arg] = deferred key = arg_to_cache_key(arg) - observable = ObservableDeferred(deferred) - cache.set(key, observable, callback=invalidate_callback) + cache.set(key, deferred, callback=invalidate_callback) def complete_all(res): # the wrapped function has completed. It returns a diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 56320bbaf986..5713870f48d8 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -27,6 +27,7 @@ make_deferred_yieldable, ) from synapse.util.caches import descriptors +from synapse.util.caches.descriptors import cached from tests import unittest @@ -55,12 +56,15 @@ def record_callback(idx): d2 = defer.Deferred() cache.set("key2", d2, partial(record_callback, 1)) - # lookup should return the deferreds - self.assertIs(cache.get("key1"), d1) - self.assertIs(cache.get("key2"), d2) + # lookup should return observable deferreds + self.assertFalse(cache.get("key1").has_called()) + self.assertFalse(cache.get("key2").has_called()) # let one of the lookups complete d2.callback("result2") + + # for now at least, the cache will return real results rather than an + # observabledeferred self.assertEqual(cache.get("key2"), "result2") # now do the invalidation @@ -146,6 +150,28 @@ def fn(self, arg1, arg2): self.assertEqual(r, "chips") obj.mock.assert_not_called() + def test_cache_with_sync_exception(self): + """If the wrapped function throws synchronously, things should continue to work + """ + + class Cls(object): + @cached() + def fn(self, arg1): + raise SynapseError(100, "mai spoon iz too big!!1") + + obj = Cls() + + # this should fail immediately + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should result in a second exception + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" @@ -222,6 +248,9 @@ def do_lookup(): self.assertEqual(LoggingContext.current_context(), c1) + # the cache should now be empty + self.assertEqual(len(obj.fn.cache.cache), 0) + obj = Cls() # set off a deferred which will do a cache lookup @@ -268,6 +297,61 @@ def fn(self, arg1, arg2=2, arg3=3): self.assertEqual(r, "chips") obj.mock.assert_not_called() + def test_cache_iterable(self): + class Cls(object): + def __init__(self): + self.mock = mock.Mock() + + @descriptors.cached(iterable=True) + def fn(self, arg1, arg2): + return self.mock(arg1, arg2) + + obj = Cls() + + obj.mock.return_value = ["spam", "eggs"] + r = obj.fn(1, 2) + self.assertEqual(r, ["spam", "eggs"]) + obj.mock.assert_called_once_with(1, 2) + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = ["chips"] + r = obj.fn(1, 3) + self.assertEqual(r, ["chips"]) + obj.mock.assert_called_once_with(1, 3) + obj.mock.reset_mock() + + # the two values should now be cached + self.assertEqual(len(obj.fn.cache.cache), 3) + + r = obj.fn(1, 2) + self.assertEqual(r, ["spam", "eggs"]) + r = obj.fn(1, 3) + self.assertEqual(r, ["chips"]) + obj.mock.assert_not_called() + + def test_cache_iterable_with_sync_exception(self): + """If the wrapped function throws synchronously, things should continue to work + """ + + class Cls(object): + @descriptors.cached(iterable=True) + def fn(self, arg1): + raise SynapseError(100, "mai spoon iz too big!!1") + + obj = Cls() + + # this should fail immediately + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + + # ... leaving the cache empty + self.assertEqual(len(obj.fn.cache.cache), 0) + + # and a second call should result in a second exception + d = obj.fn(1) + self.failureResultOf(d, SynapseError) + class CachedListDescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks From 1cad8d7b6f736d86bd53b7f5e8b8417c302fdbd1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 26 Jul 2019 07:38:55 +0100 Subject: [PATCH 054/136] Convert RedactionTestCase to modern test style (#5768) --- changelog.d/5768.misc | 1 + tests/storage/test_redaction.py | 74 +++++++++++++++++---------------- 2 files changed, 39 insertions(+), 36 deletions(-) create mode 100644 changelog.d/5768.misc diff --git a/changelog.d/5768.misc b/changelog.d/5768.misc new file mode 100644 index 000000000000..7a9c88b4c258 --- /dev/null +++ b/changelog.d/5768.misc @@ -0,0 +1 @@ +Convert RedactionTestCase to modern test style. diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 1cb471205be8..8488b6edc8ad 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,23 +17,21 @@ from mock import Mock -from twisted.internet import defer - from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.types import RoomID, UserID from tests import unittest -from tests.utils import create_room, setup_test_homeserver +from tests.utils import create_room -class RedactionTestCase(unittest.TestCase): - @defer.inlineCallbacks - def setUp(self): - hs = yield setup_test_homeserver( - self.addCleanup, resource_for_federation=Mock(), http_client=None +class RedactionTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + resource_for_federation=Mock(), http_client=None ) + def prepare(self, reactor, clock, hs): self.store = hs.get_datastore() self.event_builder_factory = hs.get_event_builder_factory() self.event_creation_handler = hs.get_event_creation_handler() @@ -42,11 +41,12 @@ def setUp(self): self.room1 = RoomID.from_string("!abc123:test") - yield create_room(hs, self.room1.to_string(), self.u_alice.to_string()) + self.get_success( + create_room(hs, self.room1.to_string(), self.u_alice.to_string()) + ) self.depth = 1 - @defer.inlineCallbacks def inject_room_member( self, room, user, membership, replaces_state=None, extra_content={} ): @@ -63,15 +63,14 @@ def inject_room_member( }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) return event - @defer.inlineCallbacks def inject_message(self, room, user, body): self.depth += 1 @@ -86,15 +85,14 @@ def inject_message(self, room, user, body): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) return event - @defer.inlineCallbacks def inject_redaction(self, room, event_id, user, reason): builder = self.event_builder_factory.for_room_version( RoomVersions.V1, @@ -108,20 +106,21 @@ def inject_redaction(self, room, event_id, user, reason): }, ) - event, context = yield self.event_creation_handler.create_new_client_event( - builder + event, context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) ) - yield self.store.persist_event(event, context) + self.get_success(self.store.persist_event(event, context)) - @defer.inlineCallbacks def test_redact(self): - yield self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + self.get_success( + self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + ) - msg_event = yield self.inject_message(self.room1, self.u_alice, "t") + msg_event = self.get_success(self.inject_message(self.room1, self.u_alice, "t")) # Check event has not been redacted: - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertObjectHasAttributes( { @@ -136,11 +135,11 @@ def test_redact(self): # Redact event reason = "Because I said so" - yield self.inject_redaction( - self.room1, msg_event.event_id, self.u_alice, reason + self.get_success( + self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason) ) - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertEqual(msg_event.event_id, event.event_id) @@ -164,15 +163,18 @@ def test_redact(self): event.unsigned["redacted_because"], ) - @defer.inlineCallbacks def test_redact_join(self): - yield self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + self.get_success( + self.inject_room_member(self.room1, self.u_alice, Membership.JOIN) + ) - msg_event = yield self.inject_room_member( - self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"} + msg_event = self.get_success( + self.inject_room_member( + self.room1, self.u_bob, Membership.JOIN, extra_content={"blue": "red"} + ) ) - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertObjectHasAttributes( { @@ -187,13 +189,13 @@ def test_redact_join(self): # Redact event reason = "Because I said so" - yield self.inject_redaction( - self.room1, msg_event.event_id, self.u_alice, reason + self.get_success( + self.inject_redaction(self.room1, msg_event.event_id, self.u_alice, reason) ) # Check redaction - event = yield self.store.get_event(msg_event.event_id) + event = self.get_success(self.store.get_event(msg_event.event_id)) self.assertTrue("redacted_because" in event.unsigned) From 14c24c9037a7be46f9f79e85d2ce303ada4085e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 26 Jul 2019 10:07:21 +0100 Subject: [PATCH 055/136] Fix room summary when rejected events are in state Annoyingly, `current_state_events` table can include rejected events, in which case the membership column will be null. To work around this lets just always filter out null membership for now. --- synapse/storage/roommember.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index bfb834ccca76..d0fe3a7f78c1 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -156,9 +156,12 @@ def _get_room_summary_txn(txn): # then we can avoid a join, which is a Very Good Thing given how # frequently this function gets called. if self._current_state_events_membership_up_to_date: + # Note, rejected events will have a null membership field, so + # we we manually filter them out. sql = """ SELECT count(*), membership FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL GROUP BY membership """ else: @@ -180,10 +183,13 @@ def _get_room_summary_txn(txn): # we order by membership and then fairly arbitrarily by event_id so # heroes are consistent if self._current_state_events_membership_up_to_date: + # Note, rejected events will have a null membership field, so + # we we manually filter them out. sql = """ SELECT state_key, membership, event_id FROM current_state_events WHERE type = 'm.room.member' AND room_id = ? + AND membership IS NOT NULL ORDER BY CASE membership WHEN ? THEN 1 WHEN ? THEN 2 ELSE 3 END ASC, event_id ASC From 2e9cf7dda5aa5a13e434bf85733747d3d9c2d8e5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 26 Jul 2019 10:14:31 +0100 Subject: [PATCH 056/136] Newsfile --- changelog.d/5774.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5774.misc diff --git a/changelog.d/5774.misc b/changelog.d/5774.misc new file mode 100644 index 000000000000..5e15dfd5faa7 --- /dev/null +++ b/changelog.d/5774.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From 08352d44f81a76ba53fc96753cc5038589defaa7 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 26 Jul 2019 18:45:31 +0200 Subject: [PATCH 057/136] Add ability to pass arguments to looping calls --- synapse/util/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 841625a99176..9e0a47d2063c 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -59,7 +59,7 @@ def time_msec(self): """Returns the current system time in miliseconds since epoch.""" return int(self.time() * 1000) - def looping_call(self, f, msec): + def looping_call(self, f, msec, *args): """Call a function repeatedly. Waits `msec` initially before calling `f` for the first time. @@ -71,7 +71,7 @@ def looping_call(self, f, msec): f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. """ - call = task.LoopingCall(f) + call = task.LoopingCall(f, *args) call.clock = self._reactor d = call.start(msec / 1000.0, now=False) d.addErrback(log_failure, "Looping call died", consumeErrors=False) From 244953be3f2532fbb57f5ecd09ed499bcb1e1c69 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 29 Jul 2019 10:03:14 +0200 Subject: [PATCH 058/136] Add kwargs and doc --- synapse/util/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 9e0a47d2063c..785635300233 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -59,7 +59,7 @@ def time_msec(self): """Returns the current system time in miliseconds since epoch.""" return int(self.time() * 1000) - def looping_call(self, f, msec, *args): + def looping_call(self, f, msec, *args, **kwargs): """Call a function repeatedly. Waits `msec` initially before calling `f` for the first time. @@ -70,8 +70,10 @@ def looping_call(self, f, msec, *args): Args: f(function): The function to call repeatedly. msec(float): How long to wait between calls in milliseconds. + *args: Postional arguments to pass to function. + **kwargs: Key arguments to pass to function. """ - call = task.LoopingCall(f, *args) + call = task.LoopingCall(f, *args, **kwargs) call.clock = self._reactor d = call.start(msec / 1000.0, now=False) d.addErrback(log_failure, "Looping call died", consumeErrors=False) From bd083a5fcf8633c059625c31062ea0654e7890b3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 29 Jul 2019 10:04:09 +0200 Subject: [PATCH 059/136] Changelog --- changelog.d/5780.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5780.misc diff --git a/changelog.d/5780.misc b/changelog.d/5780.misc new file mode 100644 index 000000000000..b7eb56e625b2 --- /dev/null +++ b/changelog.d/5780.misc @@ -0,0 +1 @@ +Allow looping calls to be given arguments. From 45df38e61bb876208d032cf3480230cecff6cdd8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Jul 2019 17:15:54 +0100 Subject: [PATCH 060/136] Fix current_state bg update to work on old SQLite --- synapse/storage/roommember.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index cb88e49b5168..a1b9fd8199c0 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -870,10 +870,10 @@ def _background_current_state_membership_txn(txn, last_processed_room): next_room, = row sql = """ - UPDATE current_state_events AS c + UPDATE current_state_events SET membership = ( SELECT membership FROM room_memberships - WHERE event_id = c.event_id + WHERE event_id = current_state_events.event_id ) WHERE room_id = ? """ From 84c6ea1af8383049333213e108941a7831b2a4fd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 13:04:50 +0100 Subject: [PATCH 061/136] Update old deps unit test to use old sqlite3 --- .buildkite/pipeline.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c8ae1a44bed5..b75269a15594 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -49,14 +49,15 @@ steps: - command: - - "python -m pip install tox" + - "apt-get update && apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev zlib1g-dev" + - "python3.5 -m pip install tox" - "tox -e py35-old,codecov" label: ":python: 3.5 / SQLite / Old Deps" env: TRIAL_FLAGS: "-j 2" plugins: - docker#v3.0.1: - image: "python:3.5" + image: "ubuntu:xenial" # We use xenail to get an old sqlite and python propagate-environment: true retry: automatic: From d94916852fed15806202d58d903a8b43ff7b4367 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 25 Jul 2019 17:17:20 +0100 Subject: [PATCH 062/136] Newsfile --- changelog.d/5770.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5770.misc diff --git a/changelog.d/5770.misc b/changelog.d/5770.misc new file mode 100644 index 000000000000..5e15dfd5faa7 --- /dev/null +++ b/changelog.d/5770.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From 3b476f57679f21b1bff6c5c90f19e64eaca00fd7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 15:33:32 +0100 Subject: [PATCH 063/136] Fix debian packages for sid being called buster. (#5775) * Fix debian packages for sid being called buster. I don't know why the sid images return buster as its codename in `lsb_release` but it does, so lets just grab the codename from the distro we pass into dockerfile * Newsfile --- changelog.d/5775.bugfix | 1 + docker/Dockerfile-dhvirtualenv | 5 +++++ docker/build_debian.sh | 3 ++- 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5775.bugfix diff --git a/changelog.d/5775.bugfix b/changelog.d/5775.bugfix new file mode 100644 index 000000000000..b124897d802c --- /dev/null +++ b/changelog.d/5775.bugfix @@ -0,0 +1 @@ +Fix debian packaging scripts to correctly build sid packages. diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 0117ab8bcc07..ac9ebcfd88c2 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -42,6 +42,11 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b ### FROM ${distro} +# Get the distro we want to pull from as a dynamic build variable +# (We need to define it in each build stage) +ARG distro="" +ENV distro ${distro} + # Install the build dependencies # # NB: keep this list in sync with the list of build-deps in debian/control diff --git a/docker/build_debian.sh b/docker/build_debian.sh index 6ed2b398986d..f312f0715fd1 100644 --- a/docker/build_debian.sh +++ b/docker/build_debian.sh @@ -4,7 +4,8 @@ set -ex -DIST=`lsb_release -c -s` +# Get the codename from distro env +DIST=`cut -d ':' -f2 <<< $distro` # we get a read-only copy of the source: make a writeable copy cp -aT /synapse/source /synapse/build From 105e7f6ed3a08bcbf0fac2c7749ccb29f39d1492 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 16:09:48 +0100 Subject: [PATCH 064/136] Remove lost comment --- synapse/storage/schema/delta/56/room_membership_idx.sql | 7 ------- 1 file changed, 7 deletions(-) diff --git a/synapse/storage/schema/delta/56/room_membership_idx.sql b/synapse/storage/schema/delta/56/room_membership_idx.sql index fc0b49884328..92ab1f5e65c2 100644 --- a/synapse/storage/schema/delta/56/room_membership_idx.sql +++ b/synapse/storage/schema/delta/56/room_membership_idx.sql @@ -13,13 +13,6 @@ * limitations under the License. */ --- We add membership to current state so that we don't need to join against --- room_memberships, which can be surprisingly costly (we do such queries --- very frequently). --- This will be null for non-membership events and the content.membership key --- for membership events. (Will also be null for membership events until the --- background update job has finished). - -- Adds an index on room_memberships for fetching all forgotten rooms for a user INSERT INTO background_updates (update_name, progress_json) VALUES ('room_membership_forgotten_idx', '{}'); From 85b0bd8fe05ed78548c9b2b0da768927582f7d70 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Mon, 29 Jul 2019 16:34:44 +0100 Subject: [PATCH 065/136] Update the device list cache when keys/query is called (#5693) --- changelog.d/5693.bugfix | 1 + synapse/handlers/device.py | 150 ++++++++++++++++++----------------- synapse/handlers/e2e_keys.py | 60 +++++++++++++- 3 files changed, 137 insertions(+), 74 deletions(-) create mode 100644 changelog.d/5693.bugfix diff --git a/changelog.d/5693.bugfix b/changelog.d/5693.bugfix new file mode 100644 index 000000000000..d6f4e590aef9 --- /dev/null +++ b/changelog.d/5693.bugfix @@ -0,0 +1 @@ +Fix UISIs during homeserver outage. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index d6ab33778393..5c1cf83c9dd1 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -209,12 +209,12 @@ def __init__(self, hs): self.federation_sender = hs.get_federation_sender() - self._edu_updater = DeviceListEduUpdater(hs, self) + self.device_list_updater = DeviceListUpdater(hs, self) federation_registry = hs.get_federation_registry() federation_registry.register_edu_handler( - "m.device_list_update", self._edu_updater.incoming_device_list_update + "m.device_list_update", self.device_list_updater.incoming_device_list_update ) federation_registry.register_query_handler( "user_devices", self.on_federation_query_user_devices @@ -426,7 +426,7 @@ def _update_device_from_client_ips(device, client_ips): device.update({"last_seen_ts": ip.get("last_seen"), "last_seen_ip": ip.get("ip")}) -class DeviceListEduUpdater(object): +class DeviceListUpdater(object): "Handles incoming device list updates from federation and updates the DB" def __init__(self, hs, device_handler): @@ -519,75 +519,7 @@ def _handle_device_updates(self, user_id): logger.debug("Need to re-sync devices for %r? %r", user_id, resync) if resync: - # Fetch all devices for the user. - origin = get_domain_from_id(user_id) - try: - result = yield self.federation.query_user_devices(origin, user_id) - except ( - NotRetryingDestination, - RequestSendFailed, - HttpResponseException, - ): - # TODO: Remember that we are now out of sync and try again - # later - logger.warn("Failed to handle device list update for %s", user_id) - # We abort on exceptions rather than accepting the update - # as otherwise synapse will 'forget' that its device list - # is out of date. If we bail then we will retry the resync - # next time we get a device list update for this user_id. - # This makes it more likely that the device lists will - # eventually become consistent. - return - except FederationDeniedError as e: - logger.info(e) - return - except Exception: - # TODO: Remember that we are now out of sync and try again - # later - logger.exception( - "Failed to handle device list update for %s", user_id - ) - return - - stream_id = result["stream_id"] - devices = result["devices"] - - # If the remote server has more than ~1000 devices for this user - # we assume that something is going horribly wrong (e.g. a bot - # that logs in and creates a new device every time it tries to - # send a message). Maintaining lots of devices per user in the - # cache can cause serious performance issues as if this request - # takes more than 60s to complete, internal replication from the - # inbound federation worker to the synapse master may time out - # causing the inbound federation to fail and causing the remote - # server to retry, causing a DoS. So in this scenario we give - # up on storing the total list of devices and only handle the - # delta instead. - if len(devices) > 1000: - logger.warn( - "Ignoring device list snapshot for %s as it has >1K devs (%d)", - user_id, - len(devices), - ) - devices = [] - - for device in devices: - logger.debug( - "Handling resync update %r/%r, ID: %r", - user_id, - device["device_id"], - stream_id, - ) - - yield self.store.update_remote_device_list_cache( - user_id, devices, stream_id - ) - device_ids = [device["device_id"] for device in devices] - yield self.device_handler.notify_device_update(user_id, device_ids) - - # We clobber the seen updates since we've re-synced from a given - # point. - self._seen_updates[user_id] = set([stream_id]) + yield self.user_device_resync(user_id) else: # Simply update the single device, since we know that is the only # change (because of the single prev_id matching the current cache) @@ -634,3 +566,77 @@ def _need_to_do_resync(self, user_id, updates): stream_id_in_updates.add(stream_id) return False + + @defer.inlineCallbacks + def user_device_resync(self, user_id): + """Fetches all devices for a user and updates the device cache with them. + + Args: + user_id (str): The user's id whose device_list will be updated. + Returns: + Deferred[dict]: a dict with device info as under the "devices" in the result of this + request: + https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid + """ + # Fetch all devices for the user. + origin = get_domain_from_id(user_id) + try: + result = yield self.federation.query_user_devices(origin, user_id) + except (NotRetryingDestination, RequestSendFailed, HttpResponseException): + # TODO: Remember that we are now out of sync and try again + # later + logger.warn("Failed to handle device list update for %s", user_id) + # We abort on exceptions rather than accepting the update + # as otherwise synapse will 'forget' that its device list + # is out of date. If we bail then we will retry the resync + # next time we get a device list update for this user_id. + # This makes it more likely that the device lists will + # eventually become consistent. + return + except FederationDeniedError as e: + logger.info(e) + return + except Exception: + # TODO: Remember that we are now out of sync and try again + # later + logger.exception("Failed to handle device list update for %s", user_id) + return + stream_id = result["stream_id"] + devices = result["devices"] + + # If the remote server has more than ~1000 devices for this user + # we assume that something is going horribly wrong (e.g. a bot + # that logs in and creates a new device every time it tries to + # send a message). Maintaining lots of devices per user in the + # cache can cause serious performance issues as if this request + # takes more than 60s to complete, internal replication from the + # inbound federation worker to the synapse master may time out + # causing the inbound federation to fail and causing the remote + # server to retry, causing a DoS. So in this scenario we give + # up on storing the total list of devices and only handle the + # delta instead. + if len(devices) > 1000: + logger.warn( + "Ignoring device list snapshot for %s as it has >1K devs (%d)", + user_id, + len(devices), + ) + devices = [] + + for device in devices: + logger.debug( + "Handling resync update %r/%r, ID: %r", + user_id, + device["device_id"], + stream_id, + ) + + yield self.store.update_remote_device_list_cache(user_id, devices, stream_id) + device_ids = [device["device_id"] for device in devices] + yield self.device_handler.notify_device_update(user_id, device_ids) + + # We clobber the seen updates since we've re-synced from a given + # point. + self._seen_updates[user_id] = set([stream_id]) + + defer.returnValue(result) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 1300b540e34c..366a0bc68b60 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -65,6 +65,7 @@ def query_devices(self, query_body, timeout): } } """ + device_keys_query = query_body.get("device_keys", {}) # separate users by domain. @@ -121,7 +122,58 @@ def query_devices(self, query_body, timeout): # Now fetch any devices that we don't have in our cache @defer.inlineCallbacks def do_remote_query(destination): + """This is called when we are querying the device list of a user on + a remote homeserver and their device list is not in the device list + cache. If we share a room with this user and we're not querying for + specific user we will update the cache + with their device list.""" + destination_query = remote_queries_not_in_cache[destination] + + # We first consider whether we wish to update the device list cache with + # the users device list. We want to track a user's devices when the + # authenticated user shares a room with the queried user and the query + # has not specified a particular device. + # If we update the cache for the queried user we remove them from further + # queries. We use the more efficient batched query_client_keys for all + # remaining users + user_ids_updated = [] + for (user_id, device_list) in destination_query.items(): + if user_id in user_ids_updated: + continue + + if device_list: + continue + + room_ids = yield self.store.get_rooms_for_user(user_id) + if not room_ids: + continue + + # We've decided we're sharing a room with this user and should + # probably be tracking their device lists. However, we haven't + # done an initial sync on the device list so we do it now. + try: + user_devices = yield self.device_handler.device_list_updater.user_device_resync( + user_id + ) + user_devices = user_devices["devices"] + for device in user_devices: + results[user_id] = {device["device_id"]: device["keys"]} + user_ids_updated.append(user_id) + except Exception as e: + failures[destination] = failures.get(destination, []).append( + _exception_to_failure(e) + ) + + if len(destination_query) == len(user_ids_updated): + # We've updated all the users in the query and we do not need to + # make any further remote calls. + return + + # Remove all the users from the query which we have updated + for user_id in user_ids_updated: + destination_query.pop(user_id) + try: remote_result = yield self.federation.query_client_keys( destination, {"device_keys": destination_query}, timeout=timeout @@ -132,7 +184,8 @@ def do_remote_query(destination): results[user_id] = keys except Exception as e: - failures[destination] = _exception_to_failure(e) + failure = _exception_to_failure(e) + failures[destination] = failure yield make_deferred_yieldable( defer.gatherResults( @@ -234,8 +287,10 @@ def claim_client_keys(destination): for user_id, keys in remote_result["one_time_keys"].items(): if user_id in device_keys: json_result[user_id] = keys + except Exception as e: - failures[destination] = _exception_to_failure(e) + failure = _exception_to_failure(e) + failures[destination] = failure yield make_deferred_yieldable( defer.gatherResults( @@ -263,6 +318,7 @@ def claim_client_keys(destination): @defer.inlineCallbacks def upload_keys_for_user(self, user_id, device_id, keys): + time_now = self.clock.time_msec() # TODO: Validate the JSON to make sure it has the right keys. From df3a5db629daa384cdf291f5ecbb0ff1721c80df Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 16:40:25 +0100 Subject: [PATCH 066/136] Expand comment --- synapse/storage/roommember.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index bfb834ccca76..59ea7277fe43 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -678,6 +678,11 @@ def _get_forgotten_rooms_for_user_txn(txn): # to see if any have subsequently been updated. This is done so that # we can use a partial index on `forgotten = 1` on the assumption # that few users will actually forget many rooms. + # + # Note that a room is considered "forgotten" if *all* membership + # events for that user and room have the forgotten field set (as + # when a user forgets a room we update all rows for that user and + # room, not just the current one). sql = """ SELECT room_id, ( SELECT count(*) FROM room_memberships From 97a8b4caf7badb83c941c8afdb7ce237ee19cb7d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 30 Jul 2019 02:02:18 +1000 Subject: [PATCH 067/136] Move some timeout checking logs to DEBUG #5785 --- changelog.d/5785.misc | 1 + synapse/handlers/presence.py | 2 +- synapse/handlers/typing.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5785.misc diff --git a/changelog.d/5785.misc b/changelog.d/5785.misc new file mode 100644 index 000000000000..0691222c421c --- /dev/null +++ b/changelog.d/5785.misc @@ -0,0 +1 @@ +Set the logs emitted when checking typing and presence timeouts to DEBUG level, not INFO. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index ea54d0b991f5..94a9ca035705 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -333,7 +333,7 @@ def _handle_timeouts(self): """Checks the presence of users that have timed out and updates as appropriate. """ - logger.info("Handling presence timeouts") + logger.debug("Handling presence timeouts") now = self.clock.time_msec() # Fetch the list of users that *may* have timed out. Things may have diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 6b661aa93da8..f882330293e5 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -83,7 +83,7 @@ def _reset(self): self._room_typing = {} def _handle_timeouts(self): - logger.info("Checking for typing timeouts") + logger.debug("Checking for typing timeouts") now = self.clock.time_msec() From aecae8f3973803dcdbe93bcc1c5b9022f2c38ddd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 29 Jul 2019 17:21:57 +0100 Subject: [PATCH 068/136] Correctly handle errors doing requests to group servers --- synapse/handlers/groups_local.py | 89 ++++++++++++++++++++------------ 1 file changed, 57 insertions(+), 32 deletions(-) diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 7b67c8ae0f5d..46eb9ee88b53 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -126,9 +126,12 @@ def get_group_summary(self, group_id, requester_user_id): group_id, requester_user_id ) else: - res = yield self.transport_client.get_group_summary( - get_domain_from_id(group_id), group_id, requester_user_id - ) + try: + res = yield self.transport_client.get_group_summary( + get_domain_from_id(group_id), group_id, requester_user_id + ) + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") group_server_name = get_domain_from_id(group_id) @@ -183,9 +186,12 @@ def create_group(self, group_id, user_id, content): content["user_profile"] = yield self.profile_handler.get_profile(user_id) - res = yield self.transport_client.create_group( - get_domain_from_id(group_id), group_id, user_id, content - ) + try: + res = yield self.transport_client.create_group( + get_domain_from_id(group_id), group_id, user_id, content + ) + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") remote_attestation = res["attestation"] yield self.attestations.verify_attestation( @@ -221,9 +227,12 @@ def get_users_in_group(self, group_id, requester_user_id): group_server_name = get_domain_from_id(group_id) - res = yield self.transport_client.get_users_in_group( - get_domain_from_id(group_id), group_id, requester_user_id - ) + try: + res = yield self.transport_client.get_users_in_group( + get_domain_from_id(group_id), group_id, requester_user_id + ) + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") chunk = res["chunk"] valid_entries = [] @@ -258,9 +267,12 @@ def join_group(self, group_id, user_id, content): local_attestation = self.attestations.create_attestation(group_id, user_id) content["attestation"] = local_attestation - res = yield self.transport_client.join_group( - get_domain_from_id(group_id), group_id, user_id, content - ) + try: + res = yield self.transport_client.join_group( + get_domain_from_id(group_id), group_id, user_id, content + ) + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") remote_attestation = res["attestation"] @@ -299,9 +311,12 @@ def accept_invite(self, group_id, user_id, content): local_attestation = self.attestations.create_attestation(group_id, user_id) content["attestation"] = local_attestation - res = yield self.transport_client.accept_group_invite( - get_domain_from_id(group_id), group_id, user_id, content - ) + try: + res = yield self.transport_client.accept_group_invite( + get_domain_from_id(group_id), group_id, user_id, content + ) + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") remote_attestation = res["attestation"] @@ -338,13 +353,16 @@ def invite(self, group_id, user_id, requester_user_id, config): group_id, user_id, requester_user_id, content ) else: - res = yield self.transport_client.invite_to_group( - get_domain_from_id(group_id), - group_id, - user_id, - requester_user_id, - content, - ) + try: + res = yield self.transport_client.invite_to_group( + get_domain_from_id(group_id), + group_id, + user_id, + requester_user_id, + content, + ) + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") return res @@ -398,13 +416,16 @@ def remove_user_from_group(self, group_id, user_id, requester_user_id, content): ) else: content["requester_user_id"] = requester_user_id - res = yield self.transport_client.remove_user_from_group( - get_domain_from_id(group_id), - group_id, - requester_user_id, - user_id, - content, - ) + try: + res = yield self.transport_client.remove_user_from_group( + get_domain_from_id(group_id), + group_id, + requester_user_id, + user_id, + content, + ) + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") return res @@ -435,9 +456,13 @@ def get_publicised_groups_for_user(self, user_id): return {"groups": result} else: - bulk_result = yield self.transport_client.bulk_get_publicised_groups( - get_domain_from_id(user_id), [user_id] - ) + try: + bulk_result = yield self.transport_client.bulk_get_publicised_groups( + get_domain_from_id(user_id), [user_id] + ) + except RequestSendFailed: + raise SynapseError(502, "Failed to contact group server") + result = bulk_result.get("users", {}).get(user_id) # TODO: Verify attestations return {"groups": result} From 865077f1d1f4866ab874c56b70abbd426fedfb97 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 30 Jul 2019 02:47:27 +1000 Subject: [PATCH 069/136] Room Complexity Client Implementation (#5783) --- changelog.d/5783.feature | 1 + docs/sample_config.yaml | 17 +++++ synapse/config/server.py | 41 ++++++++++++ synapse/federation/federation_client.py | 36 +++++++++++ synapse/federation/transport/client.py | 31 ++++++--- synapse/handlers/federation.py | 25 ++++++++ synapse/handlers/room_member.py | 84 +++++++++++++++++++++++-- tests/federation/test_complexity.py | 77 ++++++++++++++++++++++- 8 files changed, 298 insertions(+), 14 deletions(-) create mode 100644 changelog.d/5783.feature diff --git a/changelog.d/5783.feature b/changelog.d/5783.feature new file mode 100644 index 000000000000..18f5a3cb288c --- /dev/null +++ b/changelog.d/5783.feature @@ -0,0 +1 @@ +Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7edf15207afc..b92959692dfb 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -278,6 +278,23 @@ listeners: # Used by phonehome stats to group together related servers. #server_context: context +# Resource-constrained Homeserver Settings +# +# If limit_remote_rooms.enabled is True, the room complexity will be +# checked before a user joins a new remote room. If it is above +# limit_remote_rooms.complexity, it will disallow joining or +# instantly leave. +# +# limit_remote_rooms.complexity_error can be set to customise the text +# displayed to the user when a room above the complexity threshold has +# its join cancelled. +# +# Uncomment the below lines to enable: +#limit_remote_rooms: +# enabled: True +# complexity: 1.0 +# complexity_error: "This room is too complex." + # Whether to require a user to be in the room to add an alias to it. # Defaults to 'true'. # diff --git a/synapse/config/server.py b/synapse/config/server.py index 00170f139369..15449695d19d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -18,6 +18,7 @@ import logging import os.path +import attr from netaddr import IPSet from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -38,6 +39,12 @@ DEFAULT_ROOM_VERSION = "4" +ROOM_COMPLEXITY_TOO_GREAT = ( + "Your homeserver is unable to join rooms this large or complex. " + "Please speak to your server administrator, or upgrade your instance " + "to join this room." +) + class ServerConfig(Config): def read_config(self, config, **kwargs): @@ -247,6 +254,23 @@ def read_config(self, config, **kwargs): self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) + @attr.s + class LimitRemoteRoomsConfig(object): + enabled = attr.ib( + validator=attr.validators.instance_of(bool), default=False + ) + complexity = attr.ib( + validator=attr.validators.instance_of((int, float)), default=1.0 + ) + complexity_error = attr.ib( + validator=attr.validators.instance_of(str), + default=ROOM_COMPLEXITY_TOO_GREAT, + ) + + self.limit_remote_rooms = LimitRemoteRoomsConfig( + **config.get("limit_remote_rooms", {}) + ) + bind_port = config.get("bind_port") if bind_port: if config.get("no_tls", False): @@ -617,6 +641,23 @@ def generate_config_section( # Used by phonehome stats to group together related servers. #server_context: context + # Resource-constrained Homeserver Settings + # + # If limit_remote_rooms.enabled is True, the room complexity will be + # checked before a user joins a new remote room. If it is above + # limit_remote_rooms.complexity, it will disallow joining or + # instantly leave. + # + # limit_remote_rooms.complexity_error can be set to customise the text + # displayed to the user when a room above the complexity threshold has + # its join cancelled. + # + # Uncomment the below lines to enable: + #limit_remote_rooms: + # enabled: True + # complexity: 1.0 + # complexity_error: "This room is too complex." + # Whether to require a user to be in the room to add an alias to it. # Defaults to 'true'. # diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 25ed1257f11b..6e03ce21af57 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -993,3 +993,39 @@ def forward_third_party_invite(self, destinations, room_id, event_dict): ) raise RuntimeError("Failed to send to any server.") + + @defer.inlineCallbacks + def get_room_complexity(self, destination, room_id): + """ + Fetch the complexity of a remote room from another server. + + Args: + destination (str): The remote server + room_id (str): The room ID to ask about. + + Returns: + Deferred[dict] or Deferred[None]: Dict contains the complexity + metric versions, while None means we could not fetch the complexity. + """ + try: + complexity = yield self.transport_layer.get_room_complexity( + destination=destination, room_id=room_id + ) + defer.returnValue(complexity) + except CodeMessageException as e: + # We didn't manage to get it -- probably a 404. We are okay if other + # servers don't give it to us. + logger.debug( + "Failed to fetch room complexity via %s for %s, got a %d", + destination, + room_id, + e.code, + ) + except Exception: + logger.exception( + "Failed to fetch room complexity via %s for %s", destination, room_id + ) + + # If we don't manage to find it, return None. It's not an error if a + # server doesn't give it to us. + defer.returnValue(None) diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 2a6709ff48e9..0cea0d2a1096 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -21,7 +21,11 @@ from twisted.internet import defer from synapse.api.constants import Membership -from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX +from synapse.api.urls import ( + FEDERATION_UNSTABLE_PREFIX, + FEDERATION_V1_PREFIX, + FEDERATION_V2_PREFIX, +) from synapse.logging.utils import log_function logger = logging.getLogger(__name__) @@ -935,6 +939,23 @@ def bulk_get_publicised_groups(self, destination, user_ids): destination=destination, path=path, data=content, ignore_backoff=True ) + def get_room_complexity(self, destination, room_id): + """ + Args: + destination (str): The remote server + room_id (str): The room ID to ask about. + """ + path = _create_path(FEDERATION_UNSTABLE_PREFIX, "/rooms/%s/complexity", room_id) + + return self.client.get_json(destination=destination, path=path) + + +def _create_path(federation_prefix, path, *args): + """ + Ensures that all args are url encoded. + """ + return federation_prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args) + def _create_v1_path(path, *args): """Creates a path against V1 federation API from the path template and @@ -951,9 +972,7 @@ def _create_v1_path(path, *args): Returns: str """ - return FEDERATION_V1_PREFIX + path % tuple( - urllib.parse.quote(arg, "") for arg in args - ) + return _create_path(FEDERATION_V1_PREFIX, path, *args) def _create_v2_path(path, *args): @@ -971,6 +990,4 @@ def _create_v2_path(path, *args): Returns: str """ - return FEDERATION_V2_PREFIX + path % tuple( - urllib.parse.quote(arg, "") for arg in args - ) + return _create_path(FEDERATION_V2_PREFIX, path, *args) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 89b37dbc1c50..10160bfe86b5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2796,3 +2796,28 @@ def user_joined_room(self, user, room_id): ) else: return user_joined_room(self.distributor, user, room_id) + + @defer.inlineCallbacks + def get_room_complexity(self, remote_room_hosts, room_id): + """ + Fetch the complexity of a remote room over federation. + + Args: + remote_room_hosts (list[str]): The remote servers to ask. + room_id (str): The room ID to ask about. + + Returns: + Deferred[dict] or Deferred[None]: Dict contains the complexity + metric versions, while None means we could not fetch the complexity. + """ + + for host in remote_room_hosts: + res = yield self.federation_client.get_room_complexity(host, room_id) + + # We got a result, return it. + if res: + defer.returnValue(res) + + # We fell off the bottom, couldn't get the complexity from anyone. Oh + # well. + defer.returnValue(None) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index baea08ddd07f..249a6d9c5d18 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -26,8 +26,7 @@ from twisted.internet import defer -import synapse.server -import synapse.types +from synapse import types from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError from synapse.types import RoomID, UserID @@ -543,7 +542,7 @@ def send_membership_event( ), "Sender (%s) must be same as requester (%s)" % (sender, requester.user) assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,) else: - requester = synapse.types.create_requester(target_user) + requester = types.create_requester(target_user) prev_event = yield self.event_creation_handler.deduplicate_state_event( event, context @@ -945,6 +944,47 @@ def __init__(self, hs): self.distributor.declare("user_joined_room") self.distributor.declare("user_left_room") + @defer.inlineCallbacks + def _is_remote_room_too_complex(self, room_id, remote_room_hosts): + """ + Check if complexity of a remote room is too great. + + Args: + room_id (str) + remote_room_hosts (list[str]) + + Returns: bool of whether the complexity is too great, or None + if unable to be fetched + """ + max_complexity = self.hs.config.limit_remote_rooms.complexity + complexity = yield self.federation_handler.get_room_complexity( + remote_room_hosts, room_id + ) + + if complexity: + if complexity["v1"] > max_complexity: + return True + return False + return None + + @defer.inlineCallbacks + def _is_local_room_too_complex(self, room_id): + """ + Check if the complexity of a local room is too great. + + Args: + room_id (str) + + Returns: bool + """ + max_complexity = self.hs.config.limit_remote_rooms.complexity + complexity = yield self.store.get_room_complexity(room_id) + + if complexity["v1"] > max_complexity: + return True + + return False + @defer.inlineCallbacks def _remote_join(self, requester, remote_room_hosts, room_id, user, content): """Implements RoomMemberHandler._remote_join @@ -952,7 +992,6 @@ def _remote_join(self, requester, remote_room_hosts, room_id, user, content): # filter ourselves out of remote_room_hosts: do_invite_join ignores it # and if it is the only entry we'd like to return a 404 rather than a # 500. - remote_room_hosts = [ host for host in remote_room_hosts if host != self.hs.hostname ] @@ -960,6 +999,18 @@ def _remote_join(self, requester, remote_room_hosts, room_id, user, content): if len(remote_room_hosts) == 0: raise SynapseError(404, "No known servers") + if self.hs.config.limit_remote_rooms.enabled: + # Fetch the room complexity + too_complex = yield self._is_remote_room_too_complex( + room_id, remote_room_hosts + ) + if too_complex is True: + raise SynapseError( + code=400, + msg=self.hs.config.limit_remote_rooms.complexity_error, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, + ) + # We don't do an auth check if we are doing an invite # join dance for now, since we're kinda implicitly checking # that we are allowed to join when we decide whether or not we @@ -969,6 +1020,31 @@ def _remote_join(self, requester, remote_room_hosts, room_id, user, content): ) yield self._user_joined_room(user, room_id) + # Check the room we just joined wasn't too large, if we didn't fetch the + # complexity of it before. + if self.hs.config.limit_remote_rooms.enabled: + if too_complex is False: + # We checked, and we're under the limit. + return + + # Check again, but with the local state events + too_complex = yield self._is_local_room_too_complex(room_id) + + if too_complex is False: + # We're under the limit. + return + + # The room is too large. Leave. + requester = types.create_requester(user, None, False, None) + yield self.update_membership( + requester=requester, target=user, room_id=room_id, action="leave" + ) + raise SynapseError( + code=400, + msg=self.hs.config.limit_remote_rooms.complexity_error, + errcode=Codes.RESOURCE_LIMIT_EXCEEDED, + ) + @defer.inlineCallbacks def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): """Implements RoomMemberHandler._remote_reject_invite diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index a5b03005d7aa..51714a2b0663 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -13,12 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + from twisted.internet import defer +from synapse.api.errors import Codes, SynapseError from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.federation.transport import server from synapse.rest import admin from synapse.rest.client.v1 import login, room +from synapse.types import UserID from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest @@ -33,9 +37,8 @@ class RoomComplexityTests(unittest.HomeserverTestCase): ] def default_config(self, name="test"): - config = super(RoomComplexityTests, self).default_config(name=name) - config["limit_large_remote_room_joins"] = True - config["limit_large_remote_room_complexity"] = 0.05 + config = super().default_config(name=name) + config["limit_remote_rooms"] = {"enabled": True, "complexity": 0.05} return config def prepare(self, reactor, clock, homeserver): @@ -88,3 +91,71 @@ def test_complexity_simple(self): self.assertEquals(200, channel.code) complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) + + def test_join_too_large(self): + + u1 = self.register_user("u1", "pass") + + handler = self.hs.get_room_member_handler() + fed_transport = self.hs.get_federation_transport_client() + + # Mock out some things, because we don't want to test the whole join + fed_transport.client.get_json = Mock(return_value=defer.succeed({"v1": 9999})) + handler.federation_handler.do_invite_join = Mock(return_value=defer.succeed(1)) + + d = handler._remote_join( + None, + ["otherserver.example"], + "roomid", + UserID.from_string(u1), + {"membership": "join"}, + ) + + self.pump() + + # The request failed with a SynapseError saying the resource limit was + # exceeded. + f = self.get_failure(d, SynapseError) + self.assertEqual(f.value.code, 400, f.value) + self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) + + def test_join_too_large_once_joined(self): + + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + # Ok, this might seem a bit weird -- I want to test that we actually + # leave the room, but I don't want to simulate two servers. So, we make + # a local room, which we say we're joining remotely, even if there's no + # remote, because we mock that out. Then, we'll leave the (actually + # local) room, which will be propagated over federation in a real + # scenario. + room_1 = self.helper.create_room_as(u1, tok=u1_token) + + handler = self.hs.get_room_member_handler() + fed_transport = self.hs.get_federation_transport_client() + + # Mock out some things, because we don't want to test the whole join + fed_transport.client.get_json = Mock(return_value=defer.succeed(None)) + handler.federation_handler.do_invite_join = Mock(return_value=defer.succeed(1)) + + # Artificially raise the complexity + self.hs.get_datastore().get_current_state_event_counts = lambda x: defer.succeed( + 600 + ) + + d = handler._remote_join( + None, + ["otherserver.example"], + room_1, + UserID.from_string(u1), + {"membership": "join"}, + ) + + self.pump() + + # The request failed with a SynapseError saying the resource limit was + # exceeded. + f = self.get_failure(d, SynapseError) + self.assertEqual(f.value.code, 400) + self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) From c9964ba6004903b0cfd76d245b121d9df6cb791c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Jul 2019 15:27:53 +0100 Subject: [PATCH 070/136] Return dicts from _fetch_event_list --- synapse/storage/events_worker.py | 42 ++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 79680ee856d9..5b606bec7c00 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -17,6 +17,7 @@ import itertools import logging +import operator from collections import namedtuple from canonicaljson import json @@ -421,28 +422,28 @@ def _fetch_event_list(self, conn, event_list): The fetch requests. Each entry consists of a list of event ids to be fetched, and a deferred to be completed once the events have been fetched. + + The deferreds are callbacked with a dictionary mapping from event id + to event row. Note that it may well contain additional events that + were not part of this request. """ with Measure(self._clock, "_fetch_event_list"): try: - event_id_lists = list(zip(*event_list))[0] - event_ids = [item for sublist in event_id_lists for item in sublist] + events_to_fetch = set( + event_id for events, _ in event_list for event_id in events + ) row_dict = self._new_transaction( - conn, "do_fetch", [], [], self._fetch_event_rows, event_ids + conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch ) # We only want to resolve deferreds from the main thread - def fire(lst, res): - for ids, d in lst: - if not d.called: - try: - with PreserveLoggingContext(): - d.callback([res[i] for i in ids if i in res]) - except Exception: - logger.exception("Failed to callback") + def fire(): + for _, d in event_list: + d.callback(row_dict) with PreserveLoggingContext(): - self.hs.get_reactor().callFromThread(fire, event_list, row_dict) + self.hs.get_reactor().callFromThread(fire) except Exception as e: logger.exception("do_fetch") @@ -461,6 +462,12 @@ def _enqueue_events(self, events, allow_rejected=False): """Fetches events from the database using the _event_fetch_list. This allows batch and bulk fetching of events - it allows us to fetch events without having to create a new transaction for each request for events. + + Args: + events (Iterable[str]): events to be fetched. + + Returns: + Deferred[Dict[str, _EventCacheEntry]]: map from event id to result. """ if not events: return {} @@ -484,11 +491,16 @@ def _enqueue_events(self, events, allow_rejected=False): logger.debug("Loading %d events", len(events)) with PreserveLoggingContext(): - rows = yield events_d - logger.debug("Loaded %d events (%d rows)", len(events), len(rows)) + row_map = yield events_d + logger.debug("Loaded %d events (%d rows)", len(events), len(row_map)) + + rows = (row_map.get(event_id) for event_id in events) + + # filter out absent rows + rows = filter(operator.truth, rows) if not allow_rejected: - rows[:] = [r for r in rows if r["rejected_reason"] is None] + rows = (r for r in rows if r["rejected_reason"] is None) res = yield make_deferred_yieldable( defer.gatherResults( From e6a6c4fbab8f409a20422659dddc3b7437a2cc07 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Jul 2019 16:37:50 +0100 Subject: [PATCH 071/136] split _get_events_from_db out of _enqueue_events --- synapse/storage/events_worker.py | 83 ++++++++++++++++++++------------ 1 file changed, 51 insertions(+), 32 deletions(-) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 5b606bec7c00..6e5f1cf6eecd 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -343,13 +343,12 @@ def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False): log_ctx = LoggingContext.current_context() log_ctx.record_event_fetch(len(missing_events_ids)) - # Note that _enqueue_events is also responsible for turning db rows + # Note that _get_events_from_db is also responsible for turning db rows # into FrozenEvents (via _get_event_from_row), which involves seeing if # the events have been redacted, and if so pulling the redaction event out # of the database to check it. # - # _enqueue_events is a bit of a rubbish name but naming is hard. - missing_events = yield self._enqueue_events( + missing_events = yield self._get_events_from_db( missing_events_ids, allow_rejected=allow_rejected ) @@ -458,43 +457,25 @@ def fire(evs, exc): self.hs.get_reactor().callFromThread(fire, event_list, e) @defer.inlineCallbacks - def _enqueue_events(self, events, allow_rejected=False): - """Fetches events from the database using the _event_fetch_list. This - allows batch and bulk fetching of events - it allows us to fetch events - without having to create a new transaction for each request for events. + def _get_events_from_db(self, event_ids, allow_rejected=False): + """Fetch a bunch of events from the database. + + Returned events will be added to the cache for future lookups. Args: - events (Iterable[str]): events to be fetched. + event_ids (Iterable[str]): The event_ids of the events to fetch + allow_rejected (bool): Whether to include rejected events Returns: - Deferred[Dict[str, _EventCacheEntry]]: map from event id to result. + Deferred[Dict[str, _EventCacheEntry]]: + map from event id to result. """ - if not events: + if not event_ids: return {} - events_d = defer.Deferred() - with self._event_fetch_lock: - self._event_fetch_list.append((events, events_d)) + row_map = yield self._enqueue_events(event_ids) - self._event_fetch_lock.notify() - - if self._event_fetch_ongoing < EVENT_QUEUE_THREADS: - self._event_fetch_ongoing += 1 - should_start = True - else: - should_start = False - - if should_start: - run_as_background_process( - "fetch_events", self.runWithConnection, self._do_fetch - ) - - logger.debug("Loading %d events", len(events)) - with PreserveLoggingContext(): - row_map = yield events_d - logger.debug("Loaded %d events (%d rows)", len(events), len(row_map)) - - rows = (row_map.get(event_id) for event_id in events) + rows = (row_map.get(event_id) for event_id in event_ids) # filter out absent rows rows = filter(operator.truth, rows) @@ -521,6 +502,44 @@ def _enqueue_events(self, events, allow_rejected=False): return {e.event.event_id: e for e in res if e} + @defer.inlineCallbacks + def _enqueue_events(self, events): + """Fetches events from the database using the _event_fetch_list. This + allows batch and bulk fetching of events - it allows us to fetch events + without having to create a new transaction for each request for events. + + Args: + events (Iterable[str]): events to be fetched. + + Returns: + Deferred[Dict[str, Dict]]: map from event id to row data from the database. + May contain events that weren't requested. + """ + + events_d = defer.Deferred() + with self._event_fetch_lock: + self._event_fetch_list.append((events, events_d)) + + self._event_fetch_lock.notify() + + if self._event_fetch_ongoing < EVENT_QUEUE_THREADS: + self._event_fetch_ongoing += 1 + should_start = True + else: + should_start = False + + if should_start: + run_as_background_process( + "fetch_events", self.runWithConnection, self._do_fetch + ) + + logger.debug("Loading %d events: %s", len(events), events) + with PreserveLoggingContext(): + row_map = yield events_d + logger.debug("Loaded %d events (%d rows)", len(events), len(row_map)) + + return row_map + def _fetch_event_rows(self, txn, event_ids): """Fetch event rows from the database From 448bcfd0f9975e7f26e1e493e269262140dd7dc7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Jul 2019 16:44:10 +0100 Subject: [PATCH 072/136] recursively fetch redactions --- synapse/storage/events_worker.py | 68 +++++++++++++++++--------------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 6e5f1cf6eecd..e15e7d86fecd 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -17,7 +17,6 @@ import itertools import logging -import operator from collections import namedtuple from canonicaljson import json @@ -30,12 +29,7 @@ from synapse.events import FrozenEvent, event_type_from_format_version # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 from synapse.events.utils import prune_event -from synapse.logging.context import ( - LoggingContext, - PreserveLoggingContext, - make_deferred_yieldable, - run_in_background, -) +from synapse.logging.context import LoggingContext, PreserveLoggingContext from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import get_domain_from_id from synapse.util import batch_iter @@ -468,39 +462,49 @@ def _get_events_from_db(self, event_ids, allow_rejected=False): Returns: Deferred[Dict[str, _EventCacheEntry]]: - map from event id to result. + map from event id to result. May return extra events which + weren't asked for. """ - if not event_ids: - return {} + fetched_events = {} + events_to_fetch = event_ids - row_map = yield self._enqueue_events(event_ids) + while events_to_fetch: + row_map = yield self._enqueue_events(events_to_fetch) - rows = (row_map.get(event_id) for event_id in event_ids) + # we need to recursively fetch any redactions of those events + redaction_ids = set() + for event_id in events_to_fetch: + row = row_map.get(event_id) + fetched_events[event_id] = row + if row: + redaction_ids.update(row["redactions"]) - # filter out absent rows - rows = filter(operator.truth, rows) + events_to_fetch = redaction_ids.difference(fetched_events.keys()) + if events_to_fetch: + logger.debug("Also fetching redaction events %s", events_to_fetch) - if not allow_rejected: - rows = (r for r in rows if r["rejected_reason"] is None) + result_map = {} + for event_id, row in fetched_events.items(): + if not row: + continue + assert row["event_id"] == event_id - res = yield make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self._get_event_from_row, - row["internal_metadata"], - row["json"], - row["redactions"], - rejected_reason=row["rejected_reason"], - format_version=row["format_version"], - ) - for row in rows - ], - consumeErrors=True, + rejected_reason = row["rejected_reason"] + + if not allow_rejected and rejected_reason: + continue + + cache_entry = yield self._get_event_from_row( + row["internal_metadata"], + row["json"], + row["redactions"], + rejected_reason=row["rejected_reason"], + format_version=row["format_version"], ) - ) - return {e.event.event_id: e for e in res if e} + result_map[event_id] = cache_entry + + return result_map @defer.inlineCallbacks def _enqueue_events(self, events): From 4e97eb89e5ed4517e5967a49acf6db987bb96d51 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 24 Jul 2019 22:45:35 +0100 Subject: [PATCH 073/136] Handle loops in redaction events --- synapse/storage/events_worker.py | 96 ++++++++++++-------------------- tests/storage/test_redaction.py | 70 +++++++++++++++++++++++ 2 files changed, 106 insertions(+), 60 deletions(-) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index e15e7d86fecd..c6fa7f82fdd3 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -483,7 +483,8 @@ def _get_events_from_db(self, event_ids, allow_rejected=False): if events_to_fetch: logger.debug("Also fetching redaction events %s", events_to_fetch) - result_map = {} + # build a map from event_id to EventBase + event_map = {} for event_id, row in fetched_events.items(): if not row: continue @@ -494,14 +495,37 @@ def _get_events_from_db(self, event_ids, allow_rejected=False): if not allow_rejected and rejected_reason: continue - cache_entry = yield self._get_event_from_row( - row["internal_metadata"], - row["json"], - row["redactions"], - rejected_reason=row["rejected_reason"], - format_version=row["format_version"], + d = json.loads(row["json"]) + internal_metadata = json.loads(row["internal_metadata"]) + + format_version = row["format_version"] + if format_version is None: + # This means that we stored the event before we had the concept + # of a event format version, so it must be a V1 event. + format_version = EventFormatVersions.V1 + + original_ev = event_type_from_format_version(format_version)( + event_dict=d, + internal_metadata_dict=internal_metadata, + rejected_reason=rejected_reason, ) + event_map[event_id] = original_ev + + # finally, we can decide whether each one nededs redacting, and build + # the cache entries. + result_map = {} + for event_id, original_ev in event_map.items(): + redactions = fetched_events[event_id]["redactions"] + redacted_event = self._maybe_redact_event_row( + original_ev, redactions, event_map + ) + + cache_entry = _EventCacheEntry( + event=original_ev, redacted_event=redacted_event + ) + + self._get_event_cache.prefill((event_id,), cache_entry) result_map[event_id] = cache_entry return result_map @@ -615,50 +639,7 @@ def _fetch_event_rows(self, txn, event_ids): return event_dict - @defer.inlineCallbacks - def _get_event_from_row( - self, internal_metadata, js, redactions, format_version, rejected_reason=None - ): - """Parse an event row which has been read from the database - - Args: - internal_metadata (str): json-encoded internal_metadata column - js (str): json-encoded event body from event_json - redactions (list[str]): a list of the events which claim to have redacted - this event, from the redactions table - format_version: (str): the 'format_version' column - rejected_reason (str|None): the reason this event was rejected, if any - - Returns: - _EventCacheEntry - """ - with Measure(self._clock, "_get_event_from_row"): - d = json.loads(js) - internal_metadata = json.loads(internal_metadata) - - if format_version is None: - # This means that we stored the event before we had the concept - # of a event format version, so it must be a V1 event. - format_version = EventFormatVersions.V1 - - original_ev = event_type_from_format_version(format_version)( - event_dict=d, - internal_metadata_dict=internal_metadata, - rejected_reason=rejected_reason, - ) - - redacted_event = yield self._maybe_redact_event_row(original_ev, redactions) - - cache_entry = _EventCacheEntry( - event=original_ev, redacted_event=redacted_event - ) - - self._get_event_cache.prefill((original_ev.event_id,), cache_entry) - - return cache_entry - - @defer.inlineCallbacks - def _maybe_redact_event_row(self, original_ev, redactions): + def _maybe_redact_event_row(self, original_ev, redactions, event_map): """Given an event object and a list of possible redacting event ids, determine whether to honour any of those redactions and if so return a redacted event. @@ -666,6 +647,8 @@ def _maybe_redact_event_row(self, original_ev, redactions): Args: original_ev (EventBase): redactions (iterable[str]): list of event ids of potential redaction events + event_map (dict[str, EventBase]): other events which have been fetched, in + which we can look up the redaaction events. Map from event id to event. Returns: Deferred[EventBase|None]: if the event should be redacted, a pruned @@ -675,15 +658,9 @@ def _maybe_redact_event_row(self, original_ev, redactions): # we choose to ignore redactions of m.room.create events. return None - if original_ev.type == "m.room.redaction": - # ... and redaction events - return None - - redaction_map = yield self._get_events_from_cache_or_db(redactions) - for redaction_id in redactions: - redaction_entry = redaction_map.get(redaction_id) - if not redaction_entry: + redaction_event = event_map.get(redaction_id) + if not redaction_event or redaction_event.rejected_reason: # we don't have the redaction event, or the redaction event was not # authorized. logger.debug( @@ -693,7 +670,6 @@ def _maybe_redact_event_row(self, original_ev, redactions): ) continue - redaction_event = redaction_entry.event if redaction_event.room_id != original_ev.room_id: logger.debug( "%s was redacted by %s but redaction was in a different room!", diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 8488b6edc8ad..d961b81d487f 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -17,6 +17,8 @@ from mock import Mock +from twisted.internet import defer + from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.types import RoomID, UserID @@ -216,3 +218,71 @@ def test_redact_join(self): }, event.unsigned["redacted_because"], ) + + def test_circular_redaction(self): + redaction_event_id1 = "$redaction1_id:test" + redaction_event_id2 = "$redaction2_id:test" + + class EventIdManglingBuilder: + def __init__(self, base_builder, event_id): + self._base_builder = base_builder + self._event_id = event_id + + @defer.inlineCallbacks + def build(self, prev_event_ids): + built_event = yield self._base_builder.build(prev_event_ids) + built_event.event_id = self._event_id + built_event._event_dict["event_id"] = self._event_id + return built_event + + @property + def room_id(self): + return self._base_builder.room_id + + event_1, context_1 = self.get_success( + self.event_creation_handler.create_new_client_event( + EventIdManglingBuilder( + self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Redaction, + "sender": self.u_alice.to_string(), + "room_id": self.room1.to_string(), + "content": {"reason": "test"}, + "redacts": redaction_event_id2, + }, + ), + redaction_event_id1, + ) + ) + ) + + self.get_success(self.store.persist_event(event_1, context_1)) + + event_2, context_2 = self.get_success( + self.event_creation_handler.create_new_client_event( + EventIdManglingBuilder( + self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Redaction, + "sender": self.u_alice.to_string(), + "room_id": self.room1.to_string(), + "content": {"reason": "test"}, + "redacts": redaction_event_id1, + }, + ), + redaction_event_id2, + ) + ) + ) + self.get_success(self.store.persist_event(event_2, context_2)) + + # fetch one of the redactions + fetched = self.get_success(self.store.get_event(redaction_event_id1)) + + # it should have been redacted + self.assertEqual(fetched.unsigned["redacted_by"], redaction_event_id2) + self.assertEqual( + fetched.unsigned["redacted_because"].event_id, redaction_event_id2 + ) From 5c3eecc70f3777561253afd89adf9fb974a27e69 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Jul 2019 16:08:57 +0100 Subject: [PATCH 074/136] changelog --- changelog.d/5788.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5788.bugfix diff --git a/changelog.d/5788.bugfix b/changelog.d/5788.bugfix new file mode 100644 index 000000000000..5632f3cb99ba --- /dev/null +++ b/changelog.d/5788.bugfix @@ -0,0 +1 @@ +Correctly handle redactions of redactions. From 8c97f6414cf322fc5b42a92ed0df2fb70bfab3fc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 30 Jul 2019 08:25:02 +0100 Subject: [PATCH 075/136] Remove non-functional 'expire_access_token' setting (#5782) The `expire_access_token` didn't do what it sounded like it should do. What it actually did was make Synapse enforce the 'time' caveat on macaroons used as access tokens, but since our access token macaroons never contained such a caveat, it was always a no-op. (The code to add 'time' caveats was removed back in v0.18.5, in #1656) --- changelog.d/5782.removal | 1 + docs/sample_config.yaml | 4 --- synapse/api/auth.py | 28 ++++--------------- synapse/config/key.py | 6 ---- synapse/handlers/auth.py | 2 +- tests/handlers/test_register.py | 2 +- .../test_resource_limits_server_notices.py | 2 +- tests/utils.py | 1 - 8 files changed, 9 insertions(+), 37 deletions(-) create mode 100644 changelog.d/5782.removal diff --git a/changelog.d/5782.removal b/changelog.d/5782.removal new file mode 100644 index 000000000000..658bf923ab57 --- /dev/null +++ b/changelog.d/5782.removal @@ -0,0 +1 @@ +Remove non-functional 'expire_access_token' setting. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b92959692dfb..08316597fa1e 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -942,10 +942,6 @@ uploads_path: "DATADIR/uploads" # # macaroon_secret_key: -# Used to enable access token expiration. -# -#expire_access_token: False - # a secret which is used to calculate HMACs for form values, to stop # falsification of values. Must be specified for the User Consent # forms to work. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 351790cca495..179644852a2c 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -410,21 +410,16 @@ def _parse_and_validate_macaroon(self, token, rights="access"): try: user_id = self.get_user_id_from_macaroon(macaroon) - has_expiry = False guest = False for caveat in macaroon.caveats: - if caveat.caveat_id.startswith("time "): - has_expiry = True - elif caveat.caveat_id == "guest = true": + if caveat.caveat_id == "guest = true": guest = True - self.validate_macaroon( - macaroon, rights, self.hs.config.expire_access_token, user_id=user_id - ) + self.validate_macaroon(macaroon, rights, user_id=user_id) except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError): raise InvalidClientTokenError("Invalid macaroon passed.") - if not has_expiry and rights == "access": + if rights == "access": self.token_cache[token] = (user_id, guest) return user_id, guest @@ -450,7 +445,7 @@ def get_user_id_from_macaroon(self, macaroon): return caveat.caveat_id[len(user_prefix) :] raise InvalidClientTokenError("No user caveat in macaroon") - def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id): + def validate_macaroon(self, macaroon, type_string, user_id): """ validate that a Macaroon is understood by and was signed by this server. @@ -458,7 +453,6 @@ def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id): macaroon(pymacaroons.Macaroon): The macaroon to validate type_string(str): The kind of token required (e.g. "access", "delete_pusher") - verify_expiry(bool): Whether to verify whether the macaroon has expired. user_id (str): The user_id required """ v = pymacaroons.Verifier() @@ -471,19 +465,7 @@ def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id): v.satisfy_exact("type = " + type_string) v.satisfy_exact("user_id = %s" % user_id) v.satisfy_exact("guest = true") - - # verify_expiry should really always be True, but there exist access - # tokens in the wild which expire when they should not, so we can't - # enforce expiry yet (so we have to allow any caveat starting with - # 'time < ' in access tokens). - # - # On the other hand, short-term login tokens (as used by CAS login, for - # example) have an expiry time which we do want to enforce. - - if verify_expiry: - v.satisfy_general(self._verify_expiry) - else: - v.satisfy_general(lambda c: c.startswith("time < ")) + v.satisfy_general(self._verify_expiry) # access_tokens include a nonce for uniqueness: any value is acceptable v.satisfy_general(lambda c: c.startswith("nonce = ")) diff --git a/synapse/config/key.py b/synapse/config/key.py index 8fc74f9cdf45..fe8386985cbc 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -116,8 +116,6 @@ def read_config(self, config, config_dir_path, **kwargs): seed = bytes(self.signing_key[0]) self.macaroon_secret_key = hashlib.sha256(seed).digest() - self.expire_access_token = config.get("expire_access_token", False) - # a secret which is used to calculate HMACs for form values, to stop # falsification of values self.form_secret = config.get("form_secret", None) @@ -144,10 +142,6 @@ def generate_config_section( # %(macaroon_secret_key)s - # Used to enable access token expiration. - # - #expire_access_token: False - # a secret which is used to calculate HMACs for form values, to stop # falsification of values. Must be specified for the User Consent # forms to work. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 05be5b7c4820..0f3ebf7ef887 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -860,7 +860,7 @@ def validate_short_term_login_token_and_get_user_id(self, login_token): try: macaroon = pymacaroons.Macaroon.deserialize(login_token) user_id = auth_api.get_user_id_from_macaroon(macaroon) - auth_api.validate_macaroon(macaroon, "login", True, user_id) + auth_api.validate_macaroon(macaroon, "login", user_id) except Exception: raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN) self.ratelimit_login_per_account(user_id) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 99dce45cfee5..0ad0a8816505 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -44,7 +44,7 @@ def make_homeserver(self, reactor, clock): hs_config["max_mau_value"] = 50 hs_config["limit_usage_by_mau"] = True - hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True) + hs = self.setup_test_homeserver(config=hs_config) return hs def prepare(self, reactor, clock, hs): diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 984feb623f43..cdf89e338317 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -36,7 +36,7 @@ def make_homeserver(self, reactor, clock): "room_name": "Server Notices", } - hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True) + hs = self.setup_test_homeserver(config=hs_config) return hs def prepare(self, reactor, clock, hs): diff --git a/tests/utils.py b/tests/utils.py index 635064626312..f1eb9a545cb8 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -126,7 +126,6 @@ def default_config(name, parse=False): "enable_registration": True, "enable_registration_captcha": False, "macaroon_secret_key": "not even a little secret", - "expire_access_token": False, "trusted_third_party_id_servers": [], "room_invite_state_types": [], "password_providers": [], From 458e51df7aabe6fc2736c1aeb6a3556374309879 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 13:07:02 +0100 Subject: [PATCH 076/136] Fix error handling when fetching remote device keys --- synapse/handlers/e2e_keys.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 366a0bc68b60..848cd3a0d504 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -161,9 +161,7 @@ def do_remote_query(destination): results[user_id] = {device["device_id"]: device["keys"]} user_ids_updated.append(user_id) except Exception as e: - failures[destination] = failures.get(destination, []).append( - _exception_to_failure(e) - ) + failures[destination] = _exception_to_failure(e) if len(destination_query) == len(user_ids_updated): # We've updated all the users in the query and we do not need to From 1ec7d656dd57bce3c43994cc53727639ea05593e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 13:09:02 +0100 Subject: [PATCH 077/136] Unwrap error --- synapse/handlers/e2e_keys.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 848cd3a0d504..1f90b0d27864 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -25,6 +25,7 @@ from synapse.api.errors import CodeMessageException, SynapseError from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID, get_domain_from_id +from synapse.util import unwrapFirstError from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) @@ -192,7 +193,7 @@ def do_remote_query(destination): for destination in remote_queries_not_in_cache ], consumeErrors=True, - ) + ).addErrback(unwrapFirstError) ) return {"device_keys": results, "failures": failures} From e23ab7f41a2ba0e3e45a70c3a1915f9fd78c15ba Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 13:10:00 +0100 Subject: [PATCH 078/136] Newsfile --- changelog.d/5789.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5789.bugfix diff --git a/changelog.d/5789.bugfix b/changelog.d/5789.bugfix new file mode 100644 index 000000000000..d6f4e590aef9 --- /dev/null +++ b/changelog.d/5789.bugfix @@ -0,0 +1 @@ +Fix UISIs during homeserver outage. From b4d5ff0af73d40f8daad631b33e38e8d7c472bc1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 13:19:22 +0100 Subject: [PATCH 079/136] Don't log as exception when failing durig backfill --- synapse/handlers/federation.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 89b37dbc1c50..c70f12092a56 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -978,6 +978,9 @@ def try_backfill(domains): except NotRetryingDestination as e: logger.info(str(e)) continue + except RequestSendFailed as e: + logger.info("Falied to get backfill from %s because %s", dom, e) + continue except FederationDeniedError as e: logger.info(e) continue From f92d05e25487fdca22961847611e598006d17252 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 13:42:54 +0100 Subject: [PATCH 080/136] Newsfile --- changelog.d/5790.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5790.misc diff --git a/changelog.d/5790.misc b/changelog.d/5790.misc new file mode 100644 index 000000000000..3e9e435d7aa7 --- /dev/null +++ b/changelog.d/5790.misc @@ -0,0 +1 @@ +Remove some spurious exceptions from the logs where we failed to talk to a remote server. From 15056ca2086f3165a74cad65d35b2b742caf4fee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 14:51:41 +0100 Subject: [PATCH 081/136] Fix current_state_events membership background update. Turns out not all rooms are in `rooms`, so lets fetch the room list from `current_state_events`. We move the delta file to force it to be run again. --- synapse/storage/roommember.py | 2 +- ...s_membership.sql => current_state_events_membership_mk2.sql} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename synapse/storage/schema/delta/56/{current_state_events_membership.sql => current_state_events_membership_mk2.sql} (100%) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index e60409ed73d8..eecb276465ef 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -935,7 +935,7 @@ def _background_current_state_membership_txn(txn, last_processed_room): while processed < batch_size: txn.execute( """ - SELECT MIN(room_id) FROM rooms WHERE room_id > ? + SELECT MIN(room_id) FROM current_state_events WHERE room_id > ? """, (last_processed_room,), ) diff --git a/synapse/storage/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql similarity index 100% rename from synapse/storage/schema/delta/56/current_state_events_membership.sql rename to synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql From 958d69f30066994fbd22f404c4260c63318b8c15 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 14:53:52 +0100 Subject: [PATCH 082/136] Newsfile --- changelog.d/5792.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5792.misc diff --git a/changelog.d/5792.misc b/changelog.d/5792.misc new file mode 100644 index 000000000000..5e15dfd5faa7 --- /dev/null +++ b/changelog.d/5792.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From 123c04daa7e729ce22c8771d1aa3d79a1a880e29 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 15:29:26 +0100 Subject: [PATCH 083/136] Don't recreate column --- .../56/current_state_events_membership.sql | 22 +++++++++++++++++++ .../current_state_events_membership_mk2.sql | 1 - 2 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 synapse/storage/schema/delta/56/current_state_events_membership.sql diff --git a/synapse/storage/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/delta/56/current_state_events_membership.sql new file mode 100644 index 000000000000..473018676f59 --- /dev/null +++ b/synapse/storage/schema/delta/56/current_state_events_membership.sql @@ -0,0 +1,22 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- We add membership to current state so that we don't need to join against +-- room_memberships, which can be surprisingly costly (we do such queries +-- very frequently). +-- This will be null for non-membership events and the content.membership key +-- for membership events. (Will also be null for membership events until the +-- background update job has finished). +ALTER TABLE current_state_events ADD membership TEXT; diff --git a/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql b/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql index b2e08cd85dcb..3133d42d4a2f 100644 --- a/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql +++ b/synapse/storage/schema/delta/56/current_state_events_membership_mk2.sql @@ -19,7 +19,6 @@ -- This will be null for non-membership events and the content.membership key -- for membership events. (Will also be null for membership events until the -- background update job has finished). -ALTER TABLE current_state_events ADD membership TEXT; INSERT INTO background_updates (update_name, progress_json) VALUES ('current_state_events_membership', '{}'); From 4037d3220aa265d0888527f05329084eaa4dbe71 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 16:43:59 +0100 Subject: [PATCH 084/136] Newsfile --- changelog.d/5793.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5793.misc diff --git a/changelog.d/5793.misc b/changelog.d/5793.misc new file mode 100644 index 000000000000..5e15dfd5faa7 --- /dev/null +++ b/changelog.d/5793.misc @@ -0,0 +1 @@ +Reduce database IO usage by optimising queries for current membership. From a9bcae9f50a14dc020634c532732c1a86b696d92 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jul 2019 17:42:56 +0100 Subject: [PATCH 085/136] Share SSL options for well-known requests --- synapse/crypto/context_factory.py | 8 ++++++++ .../http/federation/matrix_federation_agent.py | 16 +++++----------- .../federation/test_matrix_federation_agent.py | 12 ++++++------ 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 4f48e8e88d51..06e63a96b50c 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -31,6 +31,7 @@ platformTrust, ) from twisted.python.failure import Failure +from twisted.web.iweb import IPolicyForHTTPS logger = logging.getLogger(__name__) @@ -74,6 +75,7 @@ def getContext(self): return self._context +@implementer(IPolicyForHTTPS) class ClientTLSOptionsFactory(object): """Factory for Twisted SSLClientConnectionCreators that are used to make connections to remote servers for federation. @@ -146,6 +148,12 @@ def _context_info_cb(ssl_connection, where, ret): f = Failure() tls_protocol.failVerification(f) + def creatorForNetloc(self, hostname, port): + """Implements the IPolicyForHTTPS interace so that this can be passed + directly to agents. + """ + return self.get_options(hostname) + @implementer(IOpenSSLClientConnectionCreator) class SSLClientConnectionCreator(object): diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index c03ddb724ff5..a0d513983914 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -64,10 +64,6 @@ class MatrixFederationAgent(object): tls_client_options_factory (ClientTLSOptionsFactory|None): factory to use for fetching client tls options, or none to disable TLS. - _well_known_tls_policy (IPolicyForHTTPS|None): - TLS policy to use for fetching .well-known files. None to use a default - (browser-like) implementation. - _srv_resolver (SrvResolver|None): SRVResolver impl to use for looking up SRV records. None to use a default implementation. @@ -81,7 +77,6 @@ def __init__( self, reactor, tls_client_options_factory, - _well_known_tls_policy=None, _srv_resolver=None, _well_known_cache=well_known_cache, ): @@ -98,13 +93,12 @@ def __init__( self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 - agent_args = {} - if _well_known_tls_policy is not None: - # the param is called 'contextFactory', but actually passing a - # contextfactory is deprecated, and it expects an IPolicyForHTTPS. - agent_args["contextFactory"] = _well_known_tls_policy _well_known_agent = RedirectAgent( - Agent(self._reactor, pool=self._pool, **agent_args) + Agent( + self._reactor, + pool=self._pool, + contextFactory=tls_client_options_factory, + ) ) self._well_known_agent = _well_known_agent diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index b906686b495c..4255add09727 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -75,7 +75,6 @@ def setUp(self): config_dict = default_config("test", parse=False) config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()] - # config_dict["trusted_key_servers"] = [] self._config = config = HomeServerConfig() config.parse_config_dict(config_dict, "", "") @@ -83,7 +82,6 @@ def setUp(self): self.agent = MatrixFederationAgent( reactor=self.reactor, tls_client_options_factory=ClientTLSOptionsFactory(config), - _well_known_tls_policy=TrustingTLSPolicyForHTTPS(), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, ) @@ -691,16 +689,18 @@ def test_get_well_known_unsigned_cert(self): not signed by a CA """ - # we use the same test server as the other tests, but use an agent - # with _well_known_tls_policy left to the default, which will not - # trust it (since the presented cert is signed by a test CA) + # we use the same test server as the other tests, but use an agent with + # the config left to the default, which will not trust it (since the + # presented cert is signed by a test CA) self.mock_resolver.resolve_service.side_effect = lambda _: [] self.reactor.lookups["testserv"] = "1.2.3.4" + config = default_config("test", parse=True) + agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory(self._config), + tls_client_options_factory=ClientTLSOptionsFactory(config), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, ) From 3b7a35a59a29c2700171546cd052dd8d506f3330 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 10:31:00 +0100 Subject: [PATCH 086/136] Newsfile --- changelog.d/5794.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5794.misc diff --git a/changelog.d/5794.misc b/changelog.d/5794.misc new file mode 100644 index 000000000000..720e0ddcfb89 --- /dev/null +++ b/changelog.d/5794.misc @@ -0,0 +1 @@ +Improve performance when making `.well-known` requests by sharing the SSL options between requests. From 6be336c0d8b1203744ae745add847f01f21e1246 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 10:56:02 +0100 Subject: [PATCH 087/136] Disable codecov reports to GH comments. The double posting is really annoying, and I don't think anyone is actually reading them. The commit statuses should give a good summary and will link to a full report. --- .codecov.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.codecov.yml b/.codecov.yml index a05698a39ce6..ef2e1eabfb4b 100644 --- a/.codecov.yml +++ b/.codecov.yml @@ -1,5 +1,4 @@ -comment: - layout: "diff" +comment: off coverage: status: From fe2f2fc530e3ea832d30a6964322a2b8d3691e55 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 10:59:39 +0100 Subject: [PATCH 088/136] Newsfile --- changelog.d/5796.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5796.misc diff --git a/changelog.d/5796.misc b/changelog.d/5796.misc new file mode 100644 index 000000000000..be520946c7e0 --- /dev/null +++ b/changelog.d/5796.misc @@ -0,0 +1 @@ +Disable codecov GitHub comments on PRs. From 8f15832950148280ed5a9cf28b74971abfd09e4f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 31 Jul 2019 20:39:22 +1000 Subject: [PATCH 089/136] Remove DelayedCall debugging from test runs (#5787) --- changelog.d/5787.misc | 1 + tests/unittest.py | 6 ------ 2 files changed, 1 insertion(+), 6 deletions(-) create mode 100644 changelog.d/5787.misc diff --git a/changelog.d/5787.misc b/changelog.d/5787.misc new file mode 100644 index 000000000000..ead0b04b6210 --- /dev/null +++ b/changelog.d/5787.misc @@ -0,0 +1 @@ +Remove DelayedCall debugging from the test suite, as it is no longer required in the vast majority of Synapse's tests. diff --git a/tests/unittest.py b/tests/unittest.py index f5fae2131771..561cebc223a4 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -23,8 +23,6 @@ from canonicaljson import json -import twisted -import twisted.logger from twisted.internet.defer import Deferred, succeed from twisted.python.threadpool import ThreadPool from twisted.trial import unittest @@ -80,10 +78,6 @@ def __init__(self, methodName, *args, **kwargs): @around(self) def setUp(orig): - # enable debugging of delayed calls - this means that we get a - # traceback when a unit test exits leaving things on the reactor. - twisted.internet.base.DelayedCall.debug = True - # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if LoggingContext.current_context() is not LoggingContext.sentinel: From 58a755cdc36d94fa25c086576748bf44d978cf5e Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 31 Jul 2019 13:24:51 +0100 Subject: [PATCH 090/136] Remove duplicate return statement --- synapse/handlers/directory.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 0fd423197c93..526379c6f773 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -278,7 +278,6 @@ def get_association(self, room_alias): servers = list(servers) return {"room_id": room_id, "servers": servers} - return @defer.inlineCallbacks def on_directory_query(self, args): From 72167fb39474517da836170b79d03726f78e35e2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 31 Jul 2019 15:19:06 +0100 Subject: [PATCH 091/136] Change user deactivated errcode to USER_DEACTIVATED and use it (#5686) This is intended as an amendment to #5674 as using M_UNKNOWN as the errcode makes it hard for clients to differentiate between an invalid password and a deactivated user (the problem we were trying to solve in the first place). M_UNKNOWN was originally chosen as it was presumed than an MSC would have to be carried out to add a new code, but as Synapse often is the testing bed for new MSC implementations, it makes sense to try it out first in the wild and then add it into the spec if it is successful. Thus this PR return a new M_USER_DEACTIVATED code when a deactivated user attempts to login. --- changelog.d/5686.feature | 1 + synapse/api/errors.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5686.feature diff --git a/changelog.d/5686.feature b/changelog.d/5686.feature new file mode 100644 index 000000000000..367aa1eca251 --- /dev/null +++ b/changelog.d/5686.feature @@ -0,0 +1 @@ +Use `M_USER_DEACTIVATED` instead of `M_UNKNOWN` for errcode when a deactivated user attempts to login. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index ad3e262041df..cf1ebf1af231 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -61,6 +61,7 @@ class Codes(object): INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION" WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION" EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT" + USER_DEACTIVATED = "M_USER_DEACTIVATED" class CodeMessageException(RuntimeError): @@ -151,7 +152,7 @@ def __init__(self, msg): msg (str): The human-readable error message """ super(UserDeactivatedError, self).__init__( - code=http_client.FORBIDDEN, msg=msg, errcode=Codes.UNKNOWN + code=http_client.FORBIDDEN, msg=msg, errcode=Codes.USER_DEACTIVATED ) From f31d4cb7a2e90b337f60ef06a3d31c0be9ad667c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 15:52:27 +0100 Subject: [PATCH 092/136] Don't allow clients to send tombstones that reference the same room --- synapse/events/validator.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index f7ffd1d561da..29f99361c082 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -106,6 +106,13 @@ def validate_builder(self, event): if event.content["membership"] not in Membership.LIST: raise SynapseError(400, "Invalid membership key") + elif event.type == EventTypes.Tombstone: + if "replacement_room" not in event.content: + raise SynapseError(400, "Content has no replacement_room key") + + if event.content["replacement_room"] == event.room_id: + raise SynapseError(400, "Tombstone cannot reference itself") + def _ensure_strings(self, d, keys): for s in keys: if s not in d: From 02735e140f4b1e36ae29be15511a7c08cd74364e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 15:53:52 +0100 Subject: [PATCH 093/136] Newsfile --- changelog.d/5801.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5801.misc diff --git a/changelog.d/5801.misc b/changelog.d/5801.misc new file mode 100644 index 000000000000..e6ecb475d9c6 --- /dev/null +++ b/changelog.d/5801.misc @@ -0,0 +1 @@ +Don't allow clients to send tombstone events that reference the room its sent in. From cf89266b980b62a6d8547f8e1ae9394359a05fc8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 16:03:14 +0100 Subject: [PATCH 094/136] Deny redaction of events in a different room. We already correctly filter out such redactions, but we should also deny them over the CS API. --- synapse/handlers/message.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e951c39fa7e9..a5e23c4caf90 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -795,7 +795,6 @@ def is_inviter_member_event(e): get_prev_content=False, allow_rejected=False, allow_none=True, - check_room_id=event.room_id, ) # we can make some additional checks now if we have the original event. @@ -803,6 +802,9 @@ def is_inviter_member_event(e): if original_event.type == EventTypes.Create: raise AuthError(403, "Redacting create events is not permitted") + if original_event.room_id != event.room_id: + raise SynapseError(400, "Cannot redact event from a different room") + prev_state_ids = yield context.get_prev_state_ids(self.store) auth_events_ids = yield self.auth.compute_auth_events( event, prev_state_ids, for_verification=True From 0eefb76fa1a4348a50843097a92ead108cec398c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 16:13:57 +0100 Subject: [PATCH 095/136] Newsfile --- changelog.d/5802.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5802.misc diff --git a/changelog.d/5802.misc b/changelog.d/5802.misc new file mode 100644 index 000000000000..de31192652da --- /dev/null +++ b/changelog.d/5802.misc @@ -0,0 +1 @@ +Deny redactions of events sent in a different room. From 2e697d30134c2d1b8a8d98775aa72657186deb76 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 16:22:38 +0100 Subject: [PATCH 096/136] Explicitly check that tombstone is a state event before notifying. --- synapse/push/baserules.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 134bf805eb8d..286374d0b537 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -245,7 +245,13 @@ def make_base_prepend_rules(kind, modified_base_rules): "key": "type", "pattern": "m.room.tombstone", "_id": "_tombstone", - } + }, + { + "kind": "event_match", + "key": "state_key", + "pattern": "", + "_id": "_tombstone_statekey", + }, ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], }, From c5288e9984d87c1d1257094f0493da15a9e08962 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 16:28:40 +0100 Subject: [PATCH 097/136] Newsfile --- changelog.d/5804.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5804.bugfix diff --git a/changelog.d/5804.bugfix b/changelog.d/5804.bugfix new file mode 100644 index 000000000000..75c17b460dbb --- /dev/null +++ b/changelog.d/5804.bugfix @@ -0,0 +1 @@ +Fix check that tombstone is a state event in push rules. From dc4d74e44adbd8fc79bbaa7ac44b430a11454173 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 16:36:20 +0100 Subject: [PATCH 098/136] Validate well-known state events are state events. Lets disallow sending things like memberships, topics etc as non-state events. --- synapse/events/validator.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 29f99361c082..0cf2b9ba42b8 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -95,10 +95,10 @@ def validate_builder(self, event): elif event.type == EventTypes.Topic: self._ensure_strings(event.content, ["topic"]) - + self._ensure_state_event(event) elif event.type == EventTypes.Name: self._ensure_strings(event.content, ["name"]) - + self._ensure_state_event(event) elif event.type == EventTypes.Member: if "membership" not in event.content: raise SynapseError(400, "Content has not membership key") @@ -106,6 +106,7 @@ def validate_builder(self, event): if event.content["membership"] not in Membership.LIST: raise SynapseError(400, "Invalid membership key") + self._ensure_state_event(event) elif event.type == EventTypes.Tombstone: if "replacement_room" not in event.content: raise SynapseError(400, "Content has no replacement_room key") @@ -113,9 +114,15 @@ def validate_builder(self, event): if event.content["replacement_room"] == event.room_id: raise SynapseError(400, "Tombstone cannot reference itself") + self._ensure_state_event(event) + def _ensure_strings(self, d, keys): for s in keys: if s not in d: raise SynapseError(400, "'%s' not in content" % (s,)) if not isinstance(d[s], string_types): raise SynapseError(400, "'%s' not a string type" % (s,)) + + def _ensure_state_event(self, event): + if not event.is_state(): + raise SynapseError(400, "'%s' must be state events" % (event.type,)) From e5a0224837544142a2d78cae1c68c9c8023e1c32 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 31 Jul 2019 16:39:42 +0100 Subject: [PATCH 099/136] Newsfile --- changelog.d/5805.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5805.misc diff --git a/changelog.d/5805.misc b/changelog.d/5805.misc new file mode 100644 index 000000000000..352cb3db04d1 --- /dev/null +++ b/changelog.d/5805.misc @@ -0,0 +1 @@ +Deny sending well known state types as non-state events. From a4a9ded4d002d7edc6d6f46cc5ddcf279a0d7e9b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 31 Jul 2019 18:12:04 +0200 Subject: [PATCH 100/136] Allow defining HTML templates to serve the user on account renewal --- synapse/config/registration.py | 50 ++++++++++++++++++- synapse/handlers/account_validity.py | 10 +++- synapse/res/templates/account_renewed.html | 1 + synapse/res/templates/invalid_token.html | 1 + .../rest/client/v2_alpha/account_validity.py | 23 ++++++--- 5 files changed, 76 insertions(+), 9 deletions(-) create mode 100644 synapse/res/templates/account_renewed.html create mode 100644 synapse/res/templates/invalid_token.html diff --git a/synapse/config/registration.py b/synapse/config/registration.py index c3de7a4e32ae..624fd546dd71 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +import pkg_resources + from distutils.util import strtobool from synapse.config._base import Config, ConfigError @@ -41,8 +44,41 @@ def __init__(self, config, synapse_config): self.startup_job_max_delta = self.period * 10.0 / 100.0 - if self.renew_by_email_enabled and "public_baseurl" not in synapse_config: - raise ConfigError("Can't send renewal emails without 'public_baseurl'") + if self.renew_by_email_enabled: + if "public_baseurl" not in synapse_config: + raise ConfigError("Can't send renewal emails without 'public_baseurl'") + + template_dir = config.get("template_dir") + + if not template_dir: + template_dir = pkg_resources.resource_filename("synapse", "res/templates") + + if "account_renewed_html_path" in config: + file_path = os.path.join( + template_dir, config["account_renewed_html_path"], + ) + + self.account_renewed_html_content = self.read_file( + file_path, + "account_validity.account_renewed_html_path", + ) + else: + self.account_renewed_html_content = ( + "Your account has been successfully renewed." + ) + + if "invalid_token_html_path" in config: + file_path = os.path.join( + template_dir, config["invalid_token_html_path"], + ) + + self.invalid_token_html_content = self.read_file( + file_path, "account_validity.invalid_token_html_path", + ) + else: + self.invalid_token_html_content = ( + "Invalid renewal token." + ) class RegistrationConfig(Config): @@ -145,6 +181,16 @@ def generate_config_section(self, generate_secrets=False, **kwargs): # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" + # # Directory in which Synapse will try to find the HTML files to serve to the + # # user when trying to renew an account. Optional, defaults to + # # synapse/res/templates. + # template_dir: "res/templates" + # # HTML to be displayed to the user after they successfully renewed their + # # account. Optional. + # account_renewed_html_path: "account_renewed.html" + # # HTML to be displayed when the user tries to renew an account with an invalid + # # renewal token. Optional. + # invalid_token_html_path: "invalid_token.html" # Time that a user's session remains valid for, after they log in. # diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 930204e2d034..34574f1a12a6 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -226,11 +226,19 @@ def renew_account(self, renewal_token): Args: renewal_token (str): Token sent with the renewal request. + Returns: + bool: Whether the provided token is valid. """ - user_id = yield self.store.get_user_from_renewal_token(renewal_token) + try: + user_id = yield self.store.get_user_from_renewal_token(renewal_token) + except StoreError: + defer.returnValue(False) + logger.debug("Renewing an account for user %s", user_id) yield self.renew_account_for_user(user_id) + defer.returnValue(True) + @defer.inlineCallbacks def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False): """Renews the account attached to a given user by pushing back the diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html new file mode 100644 index 000000000000..894da030afb7 --- /dev/null +++ b/synapse/res/templates/account_renewed.html @@ -0,0 +1 @@ +Your account has been successfully renewed. diff --git a/synapse/res/templates/invalid_token.html b/synapse/res/templates/invalid_token.html new file mode 100644 index 000000000000..6bd2b9836437 --- /dev/null +++ b/synapse/res/templates/invalid_token.html @@ -0,0 +1 @@ +Invalid renewal token. diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index 133c61900a5d..347bde839a04 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -42,6 +42,8 @@ def __init__(self, hs): self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() self.auth = hs.get_auth() + self.success_html = hs.config.account_validity.account_renewed_html_content + self.failure_html = hs.config.account_validity.invalid_token_html_content @defer.inlineCallbacks def on_GET(self, request): @@ -49,16 +51,25 @@ def on_GET(self, request): raise SynapseError(400, "Missing renewal token") renewal_token = request.args[b"token"][0] - yield self.account_activity_handler.renew_account(renewal_token.decode("utf8")) + token_valid = yield self.account_activity_handler.renew_account( + renewal_token.decode("utf8"), + ) + + if token_valid: + status_code = 200 + response = self.success_html + else: + status_code = 404 + response = self.failure_html - request.setResponseCode(200) + request.setResponseCode(status_code) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") request.setHeader( - b"Content-Length", b"%d" % (len(AccountValidityRenewServlet.SUCCESS_HTML),) + b"Content-Length", b"%d" % (len(response),) ) - request.write(AccountValidityRenewServlet.SUCCESS_HTML) + request.write(response.encode("utf8")) finish_request(request) - return None + defer.returnValue(None) class AccountValiditySendMailServlet(RestServlet): @@ -87,7 +98,7 @@ def on_POST(self, request): user_id = requester.user.to_string() yield self.account_activity_handler.send_renewal_email_to_user(user_id) - return (200, {}) + defer.returnValue((200, {})) def register_servlets(hs, http_server): From bc3550352830bef2c484dd5d3dd5ff1fce29a6fc Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 1 Aug 2019 12:00:08 +0200 Subject: [PATCH 101/136] Add tests --- tests/rest/client/v2_alpha/test_register.py | 37 +++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 89a3f95c0a8b..bb867150f44b 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -323,6 +323,8 @@ def make_homeserver(self, reactor, clock): "renew_at": 172800000, # Time in ms for 2 days "renew_by_email_enabled": True, "renew_email_subject": "Renew your account", + "account_renewed_html_path": "account_renewed.html", + "invalid_token_html_path": "invalid_token.html", } # Email config. @@ -373,6 +375,19 @@ def test_renewal_email(self): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + # Check that we're getting HTML back. + content_type = None + for header in channel.result.get("headers", []): + if header[0] == b"Content-Type": + content_type = header[1] + self.assertEqual(content_type, b"text/html; charset=utf-8", channel.result) + + # Check that the HTML we're getting is the one we expect on a successful renewal. + expected_html = self.hs.config.account_validity.account_renewed_html_content + self.assertEqual( + channel.result["body"], expected_html.encode("utf8"), channel.result + ) + # Move 3 days forward. If the renewal failed, every authed request with # our access token should be denied from now, otherwise they should # succeed. @@ -381,6 +396,28 @@ def test_renewal_email(self): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + def test_renewal_invalid_token(self): + # Hit the renewal endpoint with an invalid token and check that it behaves as + # expected, i.e. that it responds with 404 Not Found and the correct HTML. + url = "/_matrix/client/unstable/account_validity/renew?token=123" + request, channel = self.make_request(b"GET", url) + self.render(request) + self.assertEquals(channel.result["code"], b"404", channel.result) + + # Check that we're getting HTML back. + content_type = None + for header in channel.result.get("headers", []): + if header[0] == b"Content-Type": + content_type = header[1] + self.assertEqual(content_type, b"text/html; charset=utf-8", channel.result) + + # Check that the HTML we're getting is the one we expect when using an + # invalid/unknown token. + expected_html = self.hs.config.account_validity.invalid_token_html_content + self.assertEqual( + channel.result["body"], expected_html.encode("utf8"), channel.result + ) + def test_manual_email_send(self): self.email_attempts = [] From f4a30d286f5788c16fed10bdd45a464e1fb11316 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 1 Aug 2019 12:08:06 +0200 Subject: [PATCH 102/136] Changelog --- changelog.d/5807.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5807.feature diff --git a/changelog.d/5807.feature b/changelog.d/5807.feature new file mode 100644 index 000000000000..8b7d29a23cb4 --- /dev/null +++ b/changelog.d/5807.feature @@ -0,0 +1 @@ +Allow defining HTML templates to serve the user on account renewal attempt when using the account validity feature. From 3ff3dfe5a3da7f7c2d13006b7da125c220cb5836 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 1 Aug 2019 12:08:25 +0200 Subject: [PATCH 103/136] Sample config --- docs/sample_config.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 08316597fa1e..8a324457fe59 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -802,6 +802,16 @@ uploads_path: "DATADIR/uploads" # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" +# # Directory in which Synapse will try to find the HTML files to serve to the +# # user when trying to renew an account. Optional, defaults to +# # synapse/res/templates. +# template_dir: "res/templates" +# # HTML to be displayed to the user after they successfully renewed their +# # account. Optional. +# account_renewed_html_path: "account_renewed.html" +# # HTML to be displayed when the user tries to renew an account with an invalid +# # renewal token. Optional. +# invalid_token_html_path: "invalid_token.html" # Time that a user's session remains valid for, after they log in. # From f25f638c35b1d16eb0aa3f746c10313b3c3b040d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 1 Aug 2019 12:11:27 +0200 Subject: [PATCH 104/136] Lint --- docs/sample_config.yaml | 2 +- synapse/config/registration.py | 19 +++++++------------ .../rest/client/v2_alpha/account_validity.py | 6 ++---- 3 files changed, 10 insertions(+), 17 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8a324457fe59..1b206fe6bf40 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -805,7 +805,7 @@ uploads_path: "DATADIR/uploads" # # Directory in which Synapse will try to find the HTML files to serve to the # # user when trying to renew an account. Optional, defaults to # # synapse/res/templates. -# template_dir: "res/templates" +# template_dir: "res/templates" # # HTML to be displayed to the user after they successfully renewed their # # account. Optional. # account_renewed_html_path: "account_renewed.html" diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 624fd546dd71..e2bee3c116b4 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -14,10 +14,10 @@ # limitations under the License. import os -import pkg_resources - from distutils.util import strtobool +import pkg_resources + from synapse.config._base import Config, ConfigError from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols @@ -54,13 +54,10 @@ def __init__(self, config, synapse_config): template_dir = pkg_resources.resource_filename("synapse", "res/templates") if "account_renewed_html_path" in config: - file_path = os.path.join( - template_dir, config["account_renewed_html_path"], - ) + file_path = os.path.join(template_dir, config["account_renewed_html_path"]) self.account_renewed_html_content = self.read_file( - file_path, - "account_validity.account_renewed_html_path", + file_path, "account_validity.account_renewed_html_path" ) else: self.account_renewed_html_content = ( @@ -68,12 +65,10 @@ def __init__(self, config, synapse_config): ) if "invalid_token_html_path" in config: - file_path = os.path.join( - template_dir, config["invalid_token_html_path"], - ) + file_path = os.path.join(template_dir, config["invalid_token_html_path"]) self.invalid_token_html_content = self.read_file( - file_path, "account_validity.invalid_token_html_path", + file_path, "account_validity.invalid_token_html_path" ) else: self.invalid_token_html_content = ( @@ -184,7 +179,7 @@ def generate_config_section(self, generate_secrets=False, **kwargs): # # Directory in which Synapse will try to find the HTML files to serve to the # # user when trying to renew an account. Optional, defaults to # # synapse/res/templates. - # template_dir: "res/templates" + # template_dir: "res/templates" # # HTML to be displayed to the user after they successfully renewed their # # account. Optional. # account_renewed_html_path: "account_renewed.html" diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index 347bde839a04..33f6a23028d2 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -52,7 +52,7 @@ def on_GET(self, request): renewal_token = request.args[b"token"][0] token_valid = yield self.account_activity_handler.renew_account( - renewal_token.decode("utf8"), + renewal_token.decode("utf8") ) if token_valid: @@ -64,9 +64,7 @@ def on_GET(self, request): request.setResponseCode(status_code) request.setHeader(b"Content-Type", b"text/html; charset=utf-8") - request.setHeader( - b"Content-Length", b"%d" % (len(response),) - ) + request.setHeader(b"Content-Length", b"%d" % (len(response),)) request.write(response.encode("utf8")) finish_request(request) defer.returnValue(None) From 76a58fdcced5d152efee48f69b6ab658e0e6cbc5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Aug 2019 13:14:25 +0100 Subject: [PATCH 105/136] Fix spelling. Co-Authored-By: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/5801.misc | 2 +- synapse/events/validator.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/changelog.d/5801.misc b/changelog.d/5801.misc index e6ecb475d9c6..e19854de822f 100644 --- a/changelog.d/5801.misc +++ b/changelog.d/5801.misc @@ -1 +1 @@ -Don't allow clients to send tombstone events that reference the room its sent in. +Don't allow clients to send tombstone events that reference the room it's sent in. diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 29f99361c082..6374dd067d02 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -111,7 +111,9 @@ def validate_builder(self, event): raise SynapseError(400, "Content has no replacement_room key") if event.content["replacement_room"] == event.room_id: - raise SynapseError(400, "Tombstone cannot reference itself") + raise SynapseError( + 400, "Tombstone cannot reference the room it was sent in" + ) def _ensure_strings(self, d, keys): for s in keys: From d2e3d5b9db346c88b31ff5eef2793c5cf82f698e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Aug 2019 13:23:00 +0100 Subject: [PATCH 106/136] Handle incorrectly encoded query params correctly --- synapse/http/servlet.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index f0ca7d9aba44..fd07bf7b8e55 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -166,7 +166,12 @@ def parse_string_from_args( value = args[name][0] if encoding: - value = value.decode(encoding) + try: + value = value.decode(encoding) + except ValueError: + raise SynapseError( + 400, "Query parameter %r must be %s" % (name, encoding) + ) if allowed_values is not None and value not in allowed_values: message = "Query parameter %r must be one of [%s]" % ( From da378af44517d96d461431c21400e44b00edb285 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Aug 2019 13:24:00 +0100 Subject: [PATCH 107/136] Newsfile --- changelog.d/5808.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5808.misc diff --git a/changelog.d/5808.misc b/changelog.d/5808.misc new file mode 100644 index 000000000000..cac3fd34d12e --- /dev/null +++ b/changelog.d/5808.misc @@ -0,0 +1 @@ +Handle incorrectly encoded query params correctly by returning a 400. From d02e41dcb299c7588bc9fa26bd0b5321fd7c5751 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Aug 2019 13:41:27 +0100 Subject: [PATCH 108/136] Handle pusher being deleted during processing. Instead of throwing a StoreError lets break out of processing loop and mark the pusher as stopped. --- synapse/push/emailpusher.py | 19 +++++++++++++------ synapse/push/httppusher.py | 27 ++++++++++++++++++++------- synapse/storage/pusher.py | 30 ++++++++++++++++++++++-------- 3 files changed, 55 insertions(+), 21 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 424ffa8b682c..f688d4152d7c 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -234,13 +234,20 @@ def save_last_stream_ordering_and_success(self, last_stream_ordering): return self.last_stream_ordering = last_stream_ordering - yield self.store.update_pusher_last_stream_ordering_and_success( - self.app_id, - self.email, - self.user_id, - last_stream_ordering, - self.clock.time_msec(), + pusher_still_exists = ( + yield self.store.update_pusher_last_stream_ordering_and_success( + self.app_id, + self.email, + self.user_id, + last_stream_ordering, + self.clock.time_msec(), + ) ) + if not pusher_still_exists: + # The pusher has been deleted while we were processing, so + # lets just stop and return. + self.on_stop() + return def seconds_until(self, ts_msec): secs = (ts_msec - self.clock.time_msec()) / 1000 diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 5b15b0dbe7c7..bd5d53af91cf 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -199,13 +199,21 @@ def _unsafe_process(self): http_push_processed_counter.inc() self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.last_stream_ordering = push_action["stream_ordering"] - yield self.store.update_pusher_last_stream_ordering_and_success( - self.app_id, - self.pushkey, - self.user_id, - self.last_stream_ordering, - self.clock.time_msec(), + pusher_still_exists = ( + yield self.store.update_pusher_last_stream_ordering_and_success( + self.app_id, + self.pushkey, + self.user_id, + self.last_stream_ordering, + self.clock.time_msec(), + ) ) + if not pusher_still_exists: + # The pusher has been deleted while we were processing, so + # lets just stop and return. + self.on_stop() + return + if self.failing_since: self.failing_since = None yield self.store.update_pusher_failing_since( @@ -234,12 +242,17 @@ def _unsafe_process(self): ) self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC self.last_stream_ordering = push_action["stream_ordering"] - yield self.store.update_pusher_last_stream_ordering( + pusher_still_exists = yield self.store.update_pusher_last_stream_ordering( self.app_id, self.pushkey, self.user_id, self.last_stream_ordering, ) + if not pusher_still_exists: + # The pusher has been deleted while we were processing, so + # lets just stop and return. + self.on_stop() + return self.failing_since = None yield self.store.update_pusher_failing_since( diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index be3d4d9ded1f..888035fe8697 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -308,22 +308,36 @@ def update_pusher_last_stream_ordering( def update_pusher_last_stream_ordering_and_success( self, app_id, pushkey, user_id, last_stream_ordering, last_success ): - yield self._simple_update_one( - "pushers", - {"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, - { + """Update the last stream ordering position we've processed up to for + the given pusher. + + Args: + app_id (str) + pushkey (str) + last_stream_ordering (int) + last_success (int) + + Returns: + Deferred[bool]: Whether the pusher stil exists or not. + """ + updated = yield self._simple_update( + table="pushers", + keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, + updatevalues={ "last_stream_ordering": last_stream_ordering, "last_success": last_success, }, desc="update_pusher_last_stream_ordering_and_success", ) + return bool(updated) + @defer.inlineCallbacks def update_pusher_failing_since(self, app_id, pushkey, user_id, failing_since): - yield self._simple_update_one( - "pushers", - {"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, - {"failing_since": failing_since}, + yield self._simple_update( + table="pushers", + keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, + updatevalues={"failing_since": failing_since}, desc="update_pusher_failing_since", ) From 312cc48e2ba8bfb703ed9f55be76714179723f67 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Aug 2019 13:45:09 +0100 Subject: [PATCH 109/136] Newsfile --- changelog.d/5809.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5809.misc diff --git a/changelog.d/5809.misc b/changelog.d/5809.misc new file mode 100644 index 000000000000..82a812480e0d --- /dev/null +++ b/changelog.d/5809.misc @@ -0,0 +1 @@ +Handle pusher being deleted during processing rather than logging an exception. From a8f40a8302fb1b9c95287d87a56440bb9b201435 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Aug 2019 13:47:31 +0100 Subject: [PATCH 110/136] Return 502 not 500 when failing to reach any remote server. --- synapse/federation/federation_client.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 6e03ce21af57..bec3080895d6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -511,9 +511,8 @@ def _try_destination_list(self, description, destinations, callback): The [Deferred] result of callback, if it succeeds Raises: - SynapseError if the chosen remote server returns a 300/400 code. - - RuntimeError if no servers were reachable. + SynapseError if the chosen remote server returns a 300/400 code, or + no servers were reachable. """ for destination in destinations: if destination == self.server_name: @@ -538,7 +537,7 @@ def _try_destination_list(self, description, destinations, callback): except Exception: logger.warn("Failed to %s via %s", description, destination, exc_info=1) - raise RuntimeError("Failed to %s via any server" % (description,)) + raise SynapseError(502, "Failed to %s via any server" % (description,)) def make_membership_event( self, destinations, room_id, user_id, membership, content, params From 93fd3cbc7a130c31c320fb6c4b4db1477ea827d1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 1 Aug 2019 13:48:52 +0100 Subject: [PATCH 111/136] Newsfile --- changelog.d/5810.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5810.misc diff --git a/changelog.d/5810.misc b/changelog.d/5810.misc new file mode 100644 index 000000000000..0a5ccbbb3f3d --- /dev/null +++ b/changelog.d/5810.misc @@ -0,0 +1 @@ +Return 502 not 500 when failing to reach any remote server. From 5d018d23f01afe995c435d1d2951a0e7f5b5feb1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 1 Aug 2019 13:54:56 +0100 Subject: [PATCH 112/136] Have ClientReaderSlavedStore inherit RegistrationStore (#5806) Fixes #5803 --- changelog.d/5806.bugfix | 1 + synapse/storage/registration.py | 42 ++++++++++++++++----------------- 2 files changed, 22 insertions(+), 21 deletions(-) create mode 100644 changelog.d/5806.bugfix diff --git a/changelog.d/5806.bugfix b/changelog.d/5806.bugfix new file mode 100644 index 000000000000..c5ca0f562917 --- /dev/null +++ b/changelog.d/5806.bugfix @@ -0,0 +1 @@ +Fix error when trying to login as a deactivated user when using a worker to handle login. diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 999c10a3086d..55e4e84d71bd 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -569,6 +569,27 @@ def get_id_servers_user_bound(self, user_id, medium, address): desc="get_id_servers_user_bound", ) + @cachedInlineCallbacks() + def get_user_deactivated_status(self, user_id): + """Retrieve the value for the `deactivated` property for the provided user. + + Args: + user_id (str): The ID of the user to retrieve the status for. + + Returns: + defer.Deferred(bool): The requested value. + """ + + res = yield self._simple_select_one_onecol( + table="users", + keyvalues={"name": user_id}, + retcol="deactivated", + desc="get_user_deactivated_status", + ) + + # Convert the integer into a boolean. + return res == 1 + class RegistrationStore( RegistrationWorkerStore, background_updates.BackgroundUpdateStore @@ -1317,24 +1338,3 @@ def set_user_deactivated_status(self, user_id, deactivated): user_id, deactivated, ) - - @cachedInlineCallbacks() - def get_user_deactivated_status(self, user_id): - """Retrieve the value for the `deactivated` property for the provided user. - - Args: - user_id (str): The ID of the user to retrieve the status for. - - Returns: - defer.Deferred(bool): The requested value. - """ - - res = yield self._simple_select_one_onecol( - table="users", - keyvalues={"name": user_id}, - retcol="deactivated", - desc="get_user_deactivated_status", - ) - - # Convert the integer into a boolean. - return res == 1 From 6881f21f3e249b5ed666a1e695c21d8695e3d8be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Aug 2019 12:55:36 +0100 Subject: [PATCH 113/136] Handle TimelineBatch being limited and empty. This hopefully addresses #5407 by gracefully handling an empty but limited TimelineBatch. We also add some logging to figure out how this is happening. --- synapse/handlers/sync.py | 43 ++++++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 4007284e5b3d..98da2318a0e4 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -781,9 +781,17 @@ def compute_state_delta( lazy_load_members=lazy_load_members, ) elif batch.limited: - state_at_timeline_start = yield self.store.get_state_ids_for_event( - batch.events[0].event_id, state_filter=state_filter - ) + if batch: + state_at_timeline_start = yield self.store.get_state_ids_for_event( + batch.events[0].event_id, state_filter=state_filter + ) + else: + # Its not clear how we get here, but empirically we do + # (#5407). Logging has been added elsewhere to try and + # figure out where this state comes from. + state_at_timeline_start = yield self.get_state_at( + room_id, stream_position=now_token, state_filter=state_filter + ) # for now, we disable LL for gappy syncs - see # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 @@ -803,9 +811,17 @@ def compute_state_delta( room_id, stream_position=since_token, state_filter=state_filter ) - current_state_ids = yield self.store.get_state_ids_for_event( - batch.events[-1].event_id, state_filter=state_filter - ) + if batch: + current_state_ids = yield self.store.get_state_ids_for_event( + batch.events[-1].event_id, state_filter=state_filter + ) + else: + # Its not clear how we get here, but empirically we do + # (#5407). Logging has been added elsewhere to try and + # figure out where this state comes from. + current_state_ids = yield self.get_state_at( + room_id, stream_position=now_token, state_filter=state_filter + ) state_ids = _calculate_state( timeline_contains=timeline_state, @@ -1755,6 +1771,21 @@ def _generate_room_entry( newly_joined_room=newly_joined, ) + if not batch and batch.limited: + # This resulted in #5407, which is weird, so lets log! We do it + # here as we have the maximum amount of information. + user_id = sync_result_builder.sync_config.user.to_string() + logger.info( + "Issue #5407: Found limited batch with no events. user %s, room %s," + " sync_config %s, newly_joined %s, events %s, batch %s.", + user_id, + room_id, + sync_config, + newly_joined, + events, + batch, + ) + if newly_joined: # debug for https://github.com/matrix-org/synapse/issues/4422 issue4422_logger.debug( From 977fa4a7170fdc79f9a7e477ad6c6804681f38cd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Aug 2019 13:00:45 +0100 Subject: [PATCH 114/136] Newsfile --- changelog.d/5825.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5825.bugfix diff --git a/changelog.d/5825.bugfix b/changelog.d/5825.bugfix new file mode 100644 index 000000000000..fb2c6f821d9f --- /dev/null +++ b/changelog.d/5825.bugfix @@ -0,0 +1 @@ +Fix bug where user `/sync` stream could get wedged in rare circumstances. From bf4db429209aa8ed6b8926bc8a17cbd1489d97f8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Aug 2019 13:27:22 +0100 Subject: [PATCH 115/136] Don't unnecessarily block notifying of new events. When persisting events we calculate new stream orderings up front. Before we notify about an event all events with lower stream orderings must have finished being persisted. This PR moves the assignment of stream ordering till *after* calculated the new current state and split the batch of events into separate chunks for persistence. This means that if it takes a long time to calculate new current state then it will not block events in other rooms being notified about. This should help reduce some global pauses in the events stream which can last for tens of seconds (if not longer), caused by some particularly expensive state resolutions. --- synapse/storage/events.py | 270 ++++++++++++++++++++------------------ 1 file changed, 142 insertions(+), 128 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 88c01801164f..ac876287fc10 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -364,147 +364,161 @@ def _persist_events( if not events_and_contexts: return - if backfilled: - stream_ordering_manager = self._backfill_id_gen.get_next_mult( - len(events_and_contexts) - ) - else: - stream_ordering_manager = self._stream_id_gen.get_next_mult( - len(events_and_contexts) - ) - - with stream_ordering_manager as stream_orderings: - for (event, context), stream in zip(events_and_contexts, stream_orderings): - event.internal_metadata.stream_ordering = stream - - chunks = [ - events_and_contexts[x : x + 100] - for x in range(0, len(events_and_contexts), 100) - ] - - for chunk in chunks: - # We can't easily parallelize these since different chunks - # might contain the same event. :( + chunks = [ + events_and_contexts[x : x + 100] + for x in range(0, len(events_and_contexts), 100) + ] - # NB: Assumes that we are only persisting events for one room - # at a time. + for chunk in chunks: + # We can't easily parallelize these since different chunks + # might contain the same event. :( - # map room_id->list[event_ids] giving the new forward - # extremities in each room - new_forward_extremeties = {} + # NB: Assumes that we are only persisting events for one room + # at a time. - # map room_id->(type,state_key)->event_id tracking the full - # state in each room after adding these events. - # This is simply used to prefill the get_current_state_ids - # cache - current_state_for_room = {} + # map room_id->list[event_ids] giving the new forward + # extremities in each room + new_forward_extremeties = {} - # map room_id->(to_delete, to_insert) where to_delete is a list - # of type/state keys to remove from current state, and to_insert - # is a map (type,key)->event_id giving the state delta in each - # room - state_delta_for_room = {} + # map room_id->(type,state_key)->event_id tracking the full + # state in each room after adding these events. + # This is simply used to prefill the get_current_state_ids + # cache + current_state_for_room = {} - if not backfilled: - with Measure(self._clock, "_calculate_state_and_extrem"): - # Work out the new "current state" for each room. - # We do this by working out what the new extremities are and then - # calculating the state from that. - events_by_room = {} - for event, context in chunk: - events_by_room.setdefault(event.room_id, []).append( - (event, context) - ) + # map room_id->(to_delete, to_insert) where to_delete is a list + # of type/state keys to remove from current state, and to_insert + # is a map (type,key)->event_id giving the state delta in each + # room + state_delta_for_room = {} - for room_id, ev_ctx_rm in iteritems(events_by_room): - latest_event_ids = yield self.get_latest_event_ids_in_room( - room_id - ) - new_latest_event_ids = yield self._calculate_new_extremities( - room_id, ev_ctx_rm, latest_event_ids + if not backfilled: + with Measure(self._clock, "_calculate_state_and_extrem"): + # Work out the new "current state" for each room. + # We do this by working out what the new extremities are and then + # calculating the state from that. + events_by_room = {} + for event, context in chunk: + events_by_room.setdefault(event.room_id, []).append( + (event, context) + ) + + for room_id, ev_ctx_rm in iteritems(events_by_room): + latest_event_ids = yield self.get_latest_event_ids_in_room( + room_id + ) + new_latest_event_ids = yield self._calculate_new_extremities( + room_id, ev_ctx_rm, latest_event_ids + ) + + latest_event_ids = set(latest_event_ids) + if new_latest_event_ids == latest_event_ids: + # No change in extremities, so no change in state + continue + + # there should always be at least one forward extremity. + # (except during the initial persistence of the send_join + # results, in which case there will be no existing + # extremities, so we'll `continue` above and skip this bit.) + assert new_latest_event_ids, "No forward extremities left!" + + new_forward_extremeties[room_id] = new_latest_event_ids + + len_1 = ( + len(latest_event_ids) == 1 + and len(new_latest_event_ids) == 1 + ) + if len_1: + all_single_prev_not_state = all( + len(event.prev_event_ids()) == 1 + and not event.is_state() + for event, ctx in ev_ctx_rm ) - - latest_event_ids = set(latest_event_ids) - if new_latest_event_ids == latest_event_ids: - # No change in extremities, so no change in state + # Don't bother calculating state if they're just + # a long chain of single ancestor non-state events. + if all_single_prev_not_state: continue - # there should always be at least one forward extremity. - # (except during the initial persistence of the send_join - # results, in which case there will be no existing - # extremities, so we'll `continue` above and skip this bit.) - assert new_latest_event_ids, "No forward extremities left!" - - new_forward_extremeties[room_id] = new_latest_event_ids - - len_1 = ( - len(latest_event_ids) == 1 - and len(new_latest_event_ids) == 1 + state_delta_counter.inc() + if len(new_latest_event_ids) == 1: + state_delta_single_event_counter.inc() + + # This is a fairly handwavey check to see if we could + # have guessed what the delta would have been when + # processing one of these events. + # What we're interested in is if the latest extremities + # were the same when we created the event as they are + # now. When this server creates a new event (as opposed + # to receiving it over federation) it will use the + # forward extremities as the prev_events, so we can + # guess this by looking at the prev_events and checking + # if they match the current forward extremities. + for ev, _ in ev_ctx_rm: + prev_event_ids = set(ev.prev_event_ids()) + if latest_event_ids == prev_event_ids: + state_delta_reuse_delta_counter.inc() + break + + logger.info("Calculating state delta for room %s", room_id) + with Measure( + self._clock, "persist_events.get_new_state_after_events" + ): + res = yield self._get_new_state_after_events( + room_id, + ev_ctx_rm, + latest_event_ids, + new_latest_event_ids, ) - if len_1: - all_single_prev_not_state = all( - len(event.prev_event_ids()) == 1 - and not event.is_state() - for event, ctx in ev_ctx_rm - ) - # Don't bother calculating state if they're just - # a long chain of single ancestor non-state events. - if all_single_prev_not_state: - continue - - state_delta_counter.inc() - if len(new_latest_event_ids) == 1: - state_delta_single_event_counter.inc() - - # This is a fairly handwavey check to see if we could - # have guessed what the delta would have been when - # processing one of these events. - # What we're interested in is if the latest extremities - # were the same when we created the event as they are - # now. When this server creates a new event (as opposed - # to receiving it over federation) it will use the - # forward extremities as the prev_events, so we can - # guess this by looking at the prev_events and checking - # if they match the current forward extremities. - for ev, _ in ev_ctx_rm: - prev_event_ids = set(ev.prev_event_ids()) - if latest_event_ids == prev_event_ids: - state_delta_reuse_delta_counter.inc() - break - - logger.info("Calculating state delta for room %s", room_id) + current_state, delta_ids = res + + # If either are not None then there has been a change, + # and we need to work out the delta (or use that + # given) + if delta_ids is not None: + # If there is a delta we know that we've + # only added or replaced state, never + # removed keys entirely. + state_delta_for_room[room_id] = ([], delta_ids) + elif current_state is not None: with Measure( - self._clock, "persist_events.get_new_state_after_events" + self._clock, "persist_events.calculate_state_delta" ): - res = yield self._get_new_state_after_events( - room_id, - ev_ctx_rm, - latest_event_ids, - new_latest_event_ids, + delta = yield self._calculate_state_delta( + room_id, current_state ) - current_state, delta_ids = res - - # If either are not None then there has been a change, - # and we need to work out the delta (or use that - # given) - if delta_ids is not None: - # If there is a delta we know that we've - # only added or replaced state, never - # removed keys entirely. - state_delta_for_room[room_id] = ([], delta_ids) - elif current_state is not None: - with Measure( - self._clock, "persist_events.calculate_state_delta" - ): - delta = yield self._calculate_state_delta( - room_id, current_state - ) - state_delta_for_room[room_id] = delta - - # If we have the current_state then lets prefill - # the cache with it. - if current_state is not None: - current_state_for_room[room_id] = current_state + state_delta_for_room[room_id] = delta + + # If we have the current_state then lets prefill + # the cache with it. + if current_state is not None: + current_state_for_room[room_id] = current_state + + # We want to calculate the stream orderings as late as possible, as + # we only notify after all events with a lesser stream ordering have + # been persisted. I.e. if we spend 10s inside the with block then + # that will delay all subsequent events from being notified about. + # Hence why we do it down here rather than wrapping the entire + # function. + # + # Its safe to do this after calculating the state deltas etc as we + # only need to protect the *persistence* of the events. This is to + # ensure that queries of the form "fetch events since X" don't + # return events and stream positions after events that are still in + # flight, as otherwise subsequent requests "fetch event since Y" + # will not return those events. + # + # Note: Multiple instances of this function cannot be in flight at + # the same time for the same room. + if backfilled: + stream_ordering_manager = self._backfill_id_gen.get_next_mult( + len(chunk) + ) + else: + stream_ordering_manager = self._stream_id_gen.get_next_mult(len(chunk)) + + with stream_ordering_manager as stream_orderings: + for (event, context), stream in zip(chunk, stream_orderings): + event.internal_metadata.stream_ordering = stream yield self.runInteraction( "persist_events", From c32d3590943542b2d433bb50a7b7fe1529c05acd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Aug 2019 13:33:42 +0100 Subject: [PATCH 116/136] Newsfile --- changelog.d/5826.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5826.misc diff --git a/changelog.d/5826.misc b/changelog.d/5826.misc new file mode 100644 index 000000000000..9abed11bbe2a --- /dev/null +++ b/changelog.d/5826.misc @@ -0,0 +1 @@ +Reduce global pauses in the events stream caused by expensive state resolution during persistence. From edeae53221f35a8308c3946369c2b433759091c5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 6 Aug 2019 13:33:55 +0100 Subject: [PATCH 117/136] Return 404 instead of 403 when retrieving an event without perms (#5798) Part of fixing matrix-org/sytest#652 Sytest PR: matrix-org/sytest#667 --- changelog.d/5798.bugfix | 1 + synapse/rest/client/v1/room.py | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5798.bugfix diff --git a/changelog.d/5798.bugfix b/changelog.d/5798.bugfix new file mode 100644 index 000000000000..7db2c37af5d9 --- /dev/null +++ b/changelog.d/5798.bugfix @@ -0,0 +1 @@ +Return 404 instead of 403 when accessing /rooms/{roomId}/event/{eventId} for an event without the appropriate permissions. diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 6fe1eddcce4a..4b2344e69644 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -568,14 +568,22 @@ def __init__(self, hs): @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): requester = yield self.auth.get_user_by_req(request, allow_guest=True) - event = yield self.event_handler.get_event(requester.user, room_id, event_id) + try: + event = yield self.event_handler.get_event( + requester.user, room_id, event_id + ) + except AuthError: + # This endpoint is supposed to return a 404 when the requester does + # not have permission to access the event + # https://matrix.org/docs/spec/client_server/r0.5.0#get-matrix-client-r0-rooms-roomid-event-eventid + raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() if event: event = yield self._event_serializer.serialize_event(event, time_now) return (200, event) - else: - return (404, "Event not found.") + + return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) class RoomEventContextServlet(RestServlet): From af9f1c07646031bf267aa20f2629d5b96c6603b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 6 Aug 2019 16:27:46 +0100 Subject: [PATCH 118/136] Add a lower bound for TTL on well known results. It costs both us and the remote server for us to fetch the well known for every single request we send, so we add a minimum cache period. This is set to 5m so that we still honour the basic premise of "refetch frequently". --- synapse/http/federation/matrix_federation_agent.py | 4 ++++ tests/http/federation/test_matrix_federation_agent.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index a0d513983914..79e488c94254 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -47,6 +47,9 @@ # cap for .well-known cache period WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600 +# lower bound for .well-known cache period +WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60 + logger = logging.getLogger(__name__) well_known_cache = TTLCache("well-known") @@ -356,6 +359,7 @@ def _do_get_well_known(self, server_name): cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) else: cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) + cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) return (result, cache_period) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 4255add09727..5e709c0c1739 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -953,7 +953,7 @@ def test_well_known_cache(self): well_known_server = self._handle_well_known_connection( client_factory, expected_sni=b"testserv", - response_headers={b"Cache-Control": b"max-age=10"}, + response_headers={b"Cache-Control": b"max-age=1000"}, content=b'{ "m.server": "target-server" }', ) @@ -969,7 +969,7 @@ def test_well_known_cache(self): self.assertEqual(r, b"target-server") # expire the cache - self.reactor.pump((10.0,)) + self.reactor.pump((1000.0,)) # now it should connect again fetch_d = self.do_get_well_known(b"testserv") From 107ad133fcf5b5a87e2aa1d53ab415aa39a01266 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 7 Aug 2019 15:36:38 +0100 Subject: [PATCH 119/136] Move well known lookup into a separate clas --- .../federation/matrix_federation_agent.py | 166 ++-------------- .../http/federation/well_known_resolver.py | 184 ++++++++++++++++++ .../test_matrix_federation_agent.py | 39 ++-- 3 files changed, 216 insertions(+), 173 deletions(-) create mode 100644 synapse/http/federation/well_known_resolver.py diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 79e488c94254..71a15f434d6d 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -12,10 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import json + import logging -import random -import time import attr from netaddr import IPAddress @@ -24,34 +22,16 @@ from twisted.internet import defer from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS from twisted.internet.interfaces import IStreamClientEndpoint -from twisted.web.client import URI, Agent, HTTPConnectionPool, RedirectAgent, readBody -from twisted.web.http import stringToDatetime +from twisted.web.client import URI, Agent, HTTPConnectionPool from twisted.web.http_headers import Headers from twisted.web.iweb import IAgent from synapse.http.federation.srv_resolver import SrvResolver, pick_server_from_list +from synapse.http.federation.well_known_resolver import WellKnownResolver from synapse.logging.context import make_deferred_yieldable from synapse.util import Clock -from synapse.util.caches.ttlcache import TTLCache -from synapse.util.metrics import Measure - -# period to cache .well-known results for by default -WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600 - -# jitter to add to the .well-known default cache ttl -WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60 - -# period to cache failure to fetch .well-known for -WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600 - -# cap for .well-known cache period -WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600 - -# lower bound for .well-known cache period -WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60 logger = logging.getLogger(__name__) -well_known_cache = TTLCache("well-known") @implementer(IAgent) @@ -81,7 +61,7 @@ def __init__( reactor, tls_client_options_factory, _srv_resolver=None, - _well_known_cache=well_known_cache, + _well_known_cache=None, ): self._reactor = reactor self._clock = Clock(reactor) @@ -96,20 +76,15 @@ def __init__( self._pool.maxPersistentPerHost = 5 self._pool.cachedConnectionTimeout = 2 * 60 - _well_known_agent = RedirectAgent( - Agent( + self._well_known_resolver = WellKnownResolver( + self._reactor, + agent=Agent( self._reactor, pool=self._pool, contextFactory=tls_client_options_factory, - ) + ), + well_known_cache=_well_known_cache, ) - self._well_known_agent = _well_known_agent - - # our cache of .well-known lookup results, mapping from server name - # to delegated name. The values can be: - # `bytes`: a valid server-name - # `None`: there is no (valid) .well-known here - self._well_known_cache = _well_known_cache @defer.inlineCallbacks def request(self, method, uri, headers=None, bodyProducer=None): @@ -220,7 +195,10 @@ def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): if lookup_well_known: # try a .well-known lookup - well_known_server = yield self._get_well_known(parsed_uri.host) + well_known_result = yield self._well_known_resolver.get_well_known( + parsed_uri.host + ) + well_known_server = well_known_result.delegated_server if well_known_server: # if we found a .well-known, start again, but don't do another @@ -283,86 +261,6 @@ def _route_matrix_uri(self, parsed_uri, lookup_well_known=True): target_port=port, ) - @defer.inlineCallbacks - def _get_well_known(self, server_name): - """Attempt to fetch and parse a .well-known file for the given server - - Args: - server_name (bytes): name of the server, from the requested url - - Returns: - Deferred[bytes|None]: either the new server name, from the .well-known, or - None if there was no .well-known file. - """ - try: - result = self._well_known_cache[server_name] - except KeyError: - # TODO: should we linearise so that we don't end up doing two .well-known - # requests for the same server in parallel? - with Measure(self._clock, "get_well_known"): - result, cache_period = yield self._do_get_well_known(server_name) - - if cache_period > 0: - self._well_known_cache.set(server_name, result, cache_period) - - return result - - @defer.inlineCallbacks - def _do_get_well_known(self, server_name): - """Actually fetch and parse a .well-known, without checking the cache - - Args: - server_name (bytes): name of the server, from the requested url - - Returns: - Deferred[Tuple[bytes|None|object],int]: - result, cache period, where result is one of: - - the new server name from the .well-known (as a `bytes`) - - None if there was no .well-known file. - - INVALID_WELL_KNOWN if the .well-known was invalid - """ - uri = b"https://%s/.well-known/matrix/server" % (server_name,) - uri_str = uri.decode("ascii") - logger.info("Fetching %s", uri_str) - try: - response = yield make_deferred_yieldable( - self._well_known_agent.request(b"GET", uri) - ) - body = yield make_deferred_yieldable(readBody(response)) - if response.code != 200: - raise Exception("Non-200 response %s" % (response.code,)) - - parsed_body = json.loads(body.decode("utf-8")) - logger.info("Response from .well-known: %s", parsed_body) - if not isinstance(parsed_body, dict): - raise Exception("not a dict") - if "m.server" not in parsed_body: - raise Exception("Missing key 'm.server'") - except Exception as e: - logger.info("Error fetching %s: %s", uri_str, e) - - # add some randomness to the TTL to avoid a stampeding herd every hour - # after startup - cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD - cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) - return (None, cache_period) - - result = parsed_body["m.server"].encode("ascii") - - cache_period = _cache_period_from_headers( - response.headers, time_now=self._reactor.seconds - ) - if cache_period is None: - cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD - # add some randomness to the TTL to avoid a stampeding herd every 24 hours - # after startup - cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) - else: - cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) - cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) - - return (result, cache_period) - @implementer(IStreamClientEndpoint) class LoggingHostnameEndpoint(object): @@ -378,44 +276,6 @@ def connect(self, protocol_factory): return self.ep.connect(protocol_factory) -def _cache_period_from_headers(headers, time_now=time.time): - cache_controls = _parse_cache_control(headers) - - if b"no-store" in cache_controls: - return 0 - - if b"max-age" in cache_controls: - try: - max_age = int(cache_controls[b"max-age"]) - return max_age - except ValueError: - pass - - expires = headers.getRawHeaders(b"expires") - if expires is not None: - try: - expires_date = stringToDatetime(expires[-1]) - return expires_date - time_now() - except ValueError: - # RFC7234 says 'A cache recipient MUST interpret invalid date formats, - # especially the value "0", as representing a time in the past (i.e., - # "already expired"). - return 0 - - return None - - -def _parse_cache_control(headers): - cache_controls = {} - for hdr in headers.getRawHeaders(b"cache-control", []): - for directive in hdr.split(b","): - splits = [x.strip() for x in directive.split(b"=", 1)] - k = splits[0].lower() - v = splits[1] if len(splits) > 1 else None - cache_controls[k] = v - return cache_controls - - @attr.s class _RoutingResult(object): """The result returned by `_route_matrix_uri`. diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py new file mode 100644 index 000000000000..bab4ab015e07 --- /dev/null +++ b/synapse/http/federation/well_known_resolver.py @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import random +import time + +import attr + +from twisted.internet import defer +from twisted.web.client import RedirectAgent, readBody +from twisted.web.http import stringToDatetime + +from synapse.logging.context import make_deferred_yieldable +from synapse.util import Clock +from synapse.util.caches.ttlcache import TTLCache +from synapse.util.metrics import Measure + +# period to cache .well-known results for by default +WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600 + +# jitter to add to the .well-known default cache ttl +WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 10 * 60 + +# period to cache failure to fetch .well-known for +WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600 + +# cap for .well-known cache period +WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600 + +# lower bound for .well-known cache period +WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60 + +logger = logging.getLogger(__name__) + + +@attr.s(slots=True, frozen=True) +class WellKnownLookupResult(object): + delegated_server = attr.ib() + + +class WellKnownResolver(object): + """Handles well-known lookups for matrix servers. + """ + + def __init__(self, reactor, agent, well_known_cache=None): + self._reactor = reactor + self._clock = Clock(reactor) + + if well_known_cache is None: + well_known_cache = TTLCache("well-known") + + self._well_known_cache = well_known_cache + self._well_known_agent = RedirectAgent(agent) + + @defer.inlineCallbacks + def get_well_known(self, server_name): + """Attempt to fetch and parse a .well-known file for the given server + + Args: + server_name (bytes): name of the server, from the requested url + + Returns: + Deferred[WellKnownLookupResult]: The result of the lookup + """ + try: + result = self._well_known_cache[server_name] + except KeyError: + # TODO: should we linearise so that we don't end up doing two .well-known + # requests for the same server in parallel? + with Measure(self._clock, "get_well_known"): + result, cache_period = yield self._do_get_well_known(server_name) + + if cache_period > 0: + self._well_known_cache.set(server_name, result, cache_period) + + return WellKnownLookupResult(delegated_server=result) + + @defer.inlineCallbacks + def _do_get_well_known(self, server_name): + """Actually fetch and parse a .well-known, without checking the cache + + Args: + server_name (bytes): name of the server, from the requested url + + Returns: + Deferred[Tuple[bytes|None|object],int]: + result, cache period, where result is one of: + - the new server name from the .well-known (as a `bytes`) + - None if there was no .well-known file. + - INVALID_WELL_KNOWN if the .well-known was invalid + """ + uri = b"https://%s/.well-known/matrix/server" % (server_name,) + uri_str = uri.decode("ascii") + logger.info("Fetching %s", uri_str) + try: + response = yield make_deferred_yieldable( + self._well_known_agent.request(b"GET", uri) + ) + body = yield make_deferred_yieldable(readBody(response)) + if response.code != 200: + raise Exception("Non-200 response %s" % (response.code,)) + + parsed_body = json.loads(body.decode("utf-8")) + logger.info("Response from .well-known: %s", parsed_body) + if not isinstance(parsed_body, dict): + raise Exception("not a dict") + if "m.server" not in parsed_body: + raise Exception("Missing key 'm.server'") + except Exception as e: + logger.info("Error fetching %s: %s", uri_str, e) + + # add some randomness to the TTL to avoid a stampeding herd every hour + # after startup + cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD + cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) + return (None, cache_period) + + result = parsed_body["m.server"].encode("ascii") + + cache_period = _cache_period_from_headers( + response.headers, time_now=self._reactor.seconds + ) + if cache_period is None: + cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD + # add some randomness to the TTL to avoid a stampeding herd every 24 hours + # after startup + cache_period += random.uniform(0, WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER) + else: + cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD) + cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD) + + return (result, cache_period) + + +def _cache_period_from_headers(headers, time_now=time.time): + cache_controls = _parse_cache_control(headers) + + if b"no-store" in cache_controls: + return 0 + + if b"max-age" in cache_controls: + try: + max_age = int(cache_controls[b"max-age"]) + return max_age + except ValueError: + pass + + expires = headers.getRawHeaders(b"expires") + if expires is not None: + try: + expires_date = stringToDatetime(expires[-1]) + return expires_date - time_now() + except ValueError: + # RFC7234 says 'A cache recipient MUST interpret invalid date formats, + # especially the value "0", as representing a time in the past (i.e., + # "already expired"). + return 0 + + return None + + +def _parse_cache_control(headers): + cache_controls = {} + for hdr in headers.getRawHeaders(b"cache-control", []): + for directive in hdr.split(b","): + splits = [x.strip() for x in directive.split(b"=", 1)] + k = splits[0].lower() + v = splits[1] if len(splits) > 1 else None + cache_controls[k] = v + return cache_controls diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 5e709c0c1739..1435baede26a 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -25,17 +25,19 @@ from twisted.internet.protocol import Factory from twisted.protocols.tls import TLSMemoryBIOFactory from twisted.web._newclient import ResponseNeverReceived +from twisted.web.client import Agent from twisted.web.http import HTTPChannel from twisted.web.http_headers import Headers from twisted.web.iweb import IPolicyForHTTPS from synapse.config.homeserver import HomeServerConfig from synapse.crypto.context_factory import ClientTLSOptionsFactory -from synapse.http.federation.matrix_federation_agent import ( - MatrixFederationAgent, +from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent +from synapse.http.federation.srv_resolver import Server +from synapse.http.federation.well_known_resolver import ( + WellKnownResolver, _cache_period_from_headers, ) -from synapse.http.federation.srv_resolver import Server from synapse.logging.context import LoggingContext from synapse.util.caches.ttlcache import TTLCache @@ -79,9 +81,10 @@ def setUp(self): self._config = config = HomeServerConfig() config.parse_config_dict(config_dict, "", "") + self.tls_factory = ClientTLSOptionsFactory(config) self.agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory(config), + tls_client_options_factory=self.tls_factory, _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, ) @@ -928,20 +931,16 @@ def test_idna_srv_target(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - @defer.inlineCallbacks - def do_get_well_known(self, serv): - try: - result = yield self.agent._get_well_known(serv) - logger.info("Result from well-known fetch: %s", result) - except Exception as e: - logger.warning("Error fetching well-known: %s", e) - raise - return result - def test_well_known_cache(self): + well_known_resolver = WellKnownResolver( + self.reactor, + Agent(self.reactor, contextFactory=self.tls_factory), + well_known_cache=self.well_known_cache, + ) + self.reactor.lookups["testserv"] = "1.2.3.4" - fetch_d = self.do_get_well_known(b"testserv") + fetch_d = well_known_resolver.get_well_known(b"testserv") # there should be an attempt to connect on port 443 for the .well-known clients = self.reactor.tcpClients @@ -958,21 +957,21 @@ def test_well_known_cache(self): ) r = self.successResultOf(fetch_d) - self.assertEqual(r, b"target-server") + self.assertEqual(r.delegated_server, b"target-server") # close the tcp connection well_known_server.loseConnection() # repeat the request: it should hit the cache - fetch_d = self.do_get_well_known(b"testserv") + fetch_d = well_known_resolver.get_well_known(b"testserv") r = self.successResultOf(fetch_d) - self.assertEqual(r, b"target-server") + self.assertEqual(r.delegated_server, b"target-server") # expire the cache self.reactor.pump((1000.0,)) # now it should connect again - fetch_d = self.do_get_well_known(b"testserv") + fetch_d = well_known_resolver.get_well_known(b"testserv") self.assertEqual(len(clients), 1) (host, port, client_factory, _timeout, _bindAddress) = clients.pop(0) @@ -986,7 +985,7 @@ def test_well_known_cache(self): ) r = self.successResultOf(fetch_d) - self.assertEqual(r, b"other-server") + self.assertEqual(r.delegated_server, b"other-server") class TestCachePeriodFromHeaders(TestCase): From a7f0161276c06e0d46a86ced9cc9dc5aa1e36486 Mon Sep 17 00:00:00 2001 From: Thomas Citharel Date: Fri, 9 Aug 2019 18:33:15 +0200 Subject: [PATCH 120/136] Fix curl command typo in purge_remote_media.sh Was verbose option instead of -X, command didn't work Signed-off-by: Thomas Citharel --- changelog.d/5839.bugfix | 1 + contrib/purge_api/purge_remote_media.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5839.bugfix diff --git a/changelog.d/5839.bugfix b/changelog.d/5839.bugfix new file mode 100644 index 000000000000..829aea46874a --- /dev/null +++ b/changelog.d/5839.bugfix @@ -0,0 +1 @@ +The purge_remote_media.sh script was fixed diff --git a/contrib/purge_api/purge_remote_media.sh b/contrib/purge_api/purge_remote_media.sh index 99c07c663d8c..77220d3bd5bc 100644 --- a/contrib/purge_api/purge_remote_media.sh +++ b/contrib/purge_api/purge_remote_media.sh @@ -51,4 +51,4 @@ TOKEN=$(sql "SELECT token FROM access_tokens WHERE user_id='$ADMIN' ORDER BY id # finally start pruning media: ############################################################################### set -x # for debugging the generated string -curl --header "Authorization: Bearer $TOKEN" -v POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP" +curl --header "Authorization: Bearer $TOKEN" -X POST "$API_URL/admin/purge_media_cache/?before_ts=$UNIX_TIMESTAMP" From 41546f946e896ba6ca0ce2b98cf13d04ef516f13 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Aug 2019 09:56:58 +0100 Subject: [PATCH 121/136] Newsfile --- changelog.d/5836.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5836.misc diff --git a/changelog.d/5836.misc b/changelog.d/5836.misc new file mode 100644 index 000000000000..18f248820151 --- /dev/null +++ b/changelog.d/5836.misc @@ -0,0 +1 @@ +Add a lower bound to well-known lookup cache time to avoid repeated lookups. From c9456193d38ef83cef3bb76b30ef94ea28433e6d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Aug 2019 13:56:26 +0100 Subject: [PATCH 122/136] Whitelist history visbility sytests for worker mode --- .buildkite/worker-blacklist | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index 8ed8eef1a356..cda5c84e94be 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -3,10 +3,6 @@ Message history can be paginated -m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users - -m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users - Can re-join room if re-invited /upgrade creates a new room From 156a461cbd9e965fbc1ce386bb68f3c5237cc772 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Aug 2019 13:57:52 +0100 Subject: [PATCH 123/136] Newsfile --- changelog.d/5843.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5843.misc diff --git a/changelog.d/5843.misc b/changelog.d/5843.misc new file mode 100644 index 000000000000..e7e7d572b7de --- /dev/null +++ b/changelog.d/5843.misc @@ -0,0 +1 @@ +Whitelist history visbility sytests in worker mode tests. From 3de6cc245fb2eb950cd7bc5e6c50f462a06937fe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 12 Aug 2019 14:16:42 +0100 Subject: [PATCH 124/136] Changelogs should end in '.' or '!' --- changelog.d/5839.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5839.bugfix b/changelog.d/5839.bugfix index 829aea46874a..5775bfa653d1 100644 --- a/changelog.d/5839.bugfix +++ b/changelog.d/5839.bugfix @@ -1 +1 @@ -The purge_remote_media.sh script was fixed +The purge_remote_media.sh script was fixed. From f218705d2a9ce60b0b996ab29b7c85efb9236109 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2019 10:06:51 +0100 Subject: [PATCH 125/136] Make default well known cache global again. --- synapse/http/federation/well_known_resolver.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index bab4ab015e07..d2866ff67da9 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -47,6 +47,9 @@ logger = logging.getLogger(__name__) +_well_known_cache = TTLCache("well-known") + + @attr.s(slots=True, frozen=True) class WellKnownLookupResult(object): delegated_server = attr.ib() @@ -61,7 +64,7 @@ def __init__(self, reactor, agent, well_known_cache=None): self._clock = Clock(reactor) if well_known_cache is None: - well_known_cache = TTLCache("well-known") + well_known_cache = _well_known_cache self._well_known_cache = well_known_cache self._well_known_agent = RedirectAgent(agent) From fb3469f53ac86c4771caa9fdfc946eaa298977b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2019 10:17:23 +0100 Subject: [PATCH 126/136] Clarify docstring Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/pusher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/pusher.py b/synapse/storage/pusher.py index 888035fe8697..b431d24b8a93 100644 --- a/synapse/storage/pusher.py +++ b/synapse/storage/pusher.py @@ -318,7 +318,7 @@ def update_pusher_last_stream_ordering_and_success( last_success (int) Returns: - Deferred[bool]: Whether the pusher stil exists or not. + Deferred[bool]: True if the pusher still exists; False if it has been deleted. """ updated = yield self._simple_update( table="pushers", From 0b6fbb28a858f56766c77eedede7d1dade9e9b1c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 13 Aug 2019 21:49:28 +1000 Subject: [PATCH 127/136] Don't load the media repo when configured to use an external media repo (#5754) --- .gitignore | 1 + changelog.d/5754.feature | 1 + docs/sample_config.yaml | 7 ++ docs/workers.rst | 7 ++ synapse/app/media_repository.py | 9 ++ synapse/config/repository.py | 20 +++++ synapse/rest/admin/__init__.py | 102 +++------------------- synapse/rest/admin/_base.py | 25 ++++++ synapse/rest/admin/media.py | 101 +++++++++++++++++++++ synapse/rest/media/v1/media_repository.py | 6 +- 10 files changed, 188 insertions(+), 91 deletions(-) create mode 100644 changelog.d/5754.feature create mode 100644 synapse/rest/admin/media.py diff --git a/.gitignore b/.gitignore index a84c41b0c95a..f6168a8819a3 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ _trial_temp*/ /*.log /*.log.config /*.pid +/.python-version /*.signing.key /env/ /homeserver*.yaml diff --git a/changelog.d/5754.feature b/changelog.d/5754.feature new file mode 100644 index 000000000000..c1a09a4dcee5 --- /dev/null +++ b/changelog.d/5754.feature @@ -0,0 +1 @@ +Synapse will no longer serve any media repo admin endpoints when `enable_media_repo` is set to False in the configuration. If a media repo worker is used, the admin APIs relating to the media repo will be served from it instead. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 1b206fe6bf40..0c6be30e513d 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -565,6 +565,13 @@ log_config: "CONFDIR/SERVERNAME.log.config" +## Media Store ## + +# Enable the media store service in the Synapse master. Uncomment the +# following if you are using a separate media store worker. +# +#enable_media_repo: false + # Directory where uploaded images and attachments are stored. # media_store_path: "DATADIR/media_store" diff --git a/docs/workers.rst b/docs/workers.rst index 7b2d2db53327..e11e11741820 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -206,6 +206,13 @@ Handles the media repository. It can handle all endpoints starting with:: /_matrix/media/ +And the following regular expressions matching media-specific administration +APIs:: + + ^/_synapse/admin/v1/purge_media_cache$ + ^/_synapse/admin/v1/room/.*/media$ + ^/_synapse/admin/v1/quarantine_media/.*$ + You should also set ``enable_media_repo: False`` in the shared configuration file to stop the main synapse running background jobs related to managing the media repository. diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index ea26f29acb88..3a168577c737 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -26,6 +26,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging +from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy @@ -35,6 +36,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.server import HomeServer from synapse.storage.engines import create_engine @@ -71,6 +73,12 @@ def _listen_http(self, listener_config): resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) elif name == "media": media_repo = self.get_media_repository_resource() + + # We need to serve the admin servlets for media on the + # worker. + admin_resource = JsonResource(self, canonical_json=False) + register_servlets_for_media_repo(self, admin_resource) + resources.update( { MEDIA_PREFIX: media_repo, @@ -78,6 +86,7 @@ def _listen_http(self, listener_config): CONTENT_REPO_PREFIX: ContentRepoResource( self, self.config.uploads_path ), + "/_synapse/admin": admin_resource, } ) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 80a628d9b0cb..db39697e45b1 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import os from collections import namedtuple @@ -87,6 +88,18 @@ def parse_thumbnail_requirements(thumbnail_sizes): class ContentRepositoryConfig(Config): def read_config(self, config, **kwargs): + + # Only enable the media repo if either the media repo is enabled or the + # current worker app is the media repo. + if ( + self.enable_media_repo is False + and config.worker_app != "synapse.app.media_repository" + ): + self.can_load_media_repo = False + return + else: + self.can_load_media_repo = True + self.max_upload_size = self.parse_size(config.get("max_upload_size", "10M")) self.max_image_pixels = self.parse_size(config.get("max_image_pixels", "32M")) self.max_spider_size = self.parse_size(config.get("max_spider_size", "10M")) @@ -202,6 +215,13 @@ def generate_config_section(self, data_dir_path, **kwargs): return ( r""" + ## Media Store ## + + # Enable the media store service in the Synapse master. Uncomment the + # following if you are using a separate media store worker. + # + #enable_media_repo: false + # Directory where uploaded images and attachments are stored. # media_store_path: "%(media_store)s" diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 0a7d9b81b27b..5720cab42588 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -27,7 +27,7 @@ import synapse from synapse.api.constants import Membership, UserTypes -from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError +from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import JsonResource from synapse.http.servlet import ( RestServlet, @@ -36,7 +36,12 @@ parse_json_object_from_request, parse_string, ) -from synapse.rest.admin._base import assert_requester_is_admin, assert_user_is_admin +from synapse.rest.admin._base import ( + assert_requester_is_admin, + assert_user_is_admin, + historical_admin_path_patterns, +) +from synapse.rest.admin.media import register_servlets_for_media_repo from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string @@ -44,28 +49,6 @@ logger = logging.getLogger(__name__) -def historical_admin_path_patterns(path_regex): - """Returns the list of patterns for an admin endpoint, including historical ones - - This is a backwards-compatibility hack. Previously, the Admin API was exposed at - various paths under /_matrix/client. This function returns a list of patterns - matching those paths (as well as the new one), so that existing scripts which rely - on the endpoints being available there are not broken. - - Note that this should only be used for existing endpoints: new ones should just - register for the /_synapse/admin path. - """ - return list( - re.compile(prefix + path_regex) - for prefix in ( - "^/_synapse/admin/v1", - "^/_matrix/client/api/v1/admin", - "^/_matrix/client/unstable/admin", - "^/_matrix/client/r0/admin", - ) - ) - - class UsersRestServlet(RestServlet): PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)") @@ -255,25 +238,6 @@ def on_GET(self, request, user_id): return (200, ret) -class PurgeMediaCacheRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/purge_media_cache") - - def __init__(self, hs): - self.media_repository = hs.get_media_repository() - self.auth = hs.get_auth() - - @defer.inlineCallbacks - def on_POST(self, request): - yield assert_requester_is_admin(self.auth, request) - - before_ts = parse_integer(request, "before_ts", required=True) - logger.info("before_ts: %r", before_ts) - - ret = yield self.media_repository.delete_old_remote_media(before_ts) - - return (200, ret) - - class PurgeHistoryRestServlet(RestServlet): PATTERNS = historical_admin_path_patterns( "/purge_history/(?P[^/]*)(/(?P[^/]+))?" @@ -542,50 +506,6 @@ def on_POST(self, request, room_id): ) -class QuarantineMediaInRoom(RestServlet): - """Quarantines all media in a room so that no one can download it via - this server. - """ - - PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P[^/]+)") - - def __init__(self, hs): - self.store = hs.get_datastore() - self.auth = hs.get_auth() - - @defer.inlineCallbacks - def on_POST(self, request, room_id): - requester = yield self.auth.get_user_by_req(request) - yield assert_user_is_admin(self.auth, requester.user) - - num_quarantined = yield self.store.quarantine_media_ids_in_room( - room_id, requester.user.to_string() - ) - - return (200, {"num_quarantined": num_quarantined}) - - -class ListMediaInRoom(RestServlet): - """Lists all of the media in a given room. - """ - - PATTERNS = historical_admin_path_patterns("/room/(?P[^/]+)/media") - - def __init__(self, hs): - self.store = hs.get_datastore() - - @defer.inlineCallbacks - def on_GET(self, request, room_id): - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - if not is_admin: - raise AuthError(403, "You are not a server admin") - - local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id) - - return (200, {"local": local_mxcs, "remote": remote_mxcs}) - - class ResetPasswordRestServlet(RestServlet): """Post request to allow an administrator reset password for a user. This needs user to have administrator access in Synapse. @@ -825,7 +745,6 @@ def register_servlets(hs, http_server): def register_servlets_for_client_rest_resource(hs, http_server): """Register only the servlets which need to be exposed on /_matrix/client/xxx""" WhoisRestServlet(hs).register(http_server) - PurgeMediaCacheRestServlet(hs).register(http_server) PurgeHistoryStatusRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) PurgeHistoryRestServlet(hs).register(http_server) @@ -834,10 +753,13 @@ def register_servlets_for_client_rest_resource(hs, http_server): GetUsersPaginatedRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server) ShutdownRoomRestServlet(hs).register(http_server) - QuarantineMediaInRoom(hs).register(http_server) - ListMediaInRoom(hs).register(http_server) UserRegisterServlet(hs).register(http_server) DeleteGroupAdminRestServlet(hs).register(http_server) AccountValidityRenewServlet(hs).register(http_server) + + # Load the media repo ones if we're using them. + if hs.config.can_load_media_repo: + register_servlets_for_media_repo(hs, http_server) + # don't add more things here: new servlets should only be exposed on # /_synapse/admin so should not go here. Instead register them in AdminRestResource. diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index 881d67b89cc4..5a9b08d3ef9c 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -12,11 +12,36 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import re + from twisted.internet import defer from synapse.api.errors import AuthError +def historical_admin_path_patterns(path_regex): + """Returns the list of patterns for an admin endpoint, including historical ones + + This is a backwards-compatibility hack. Previously, the Admin API was exposed at + various paths under /_matrix/client. This function returns a list of patterns + matching those paths (as well as the new one), so that existing scripts which rely + on the endpoints being available there are not broken. + + Note that this should only be used for existing endpoints: new ones should just + register for the /_synapse/admin path. + """ + return list( + re.compile(prefix + path_regex) + for prefix in ( + "^/_synapse/admin/v1", + "^/_matrix/client/api/v1/admin", + "^/_matrix/client/unstable/admin", + "^/_matrix/client/r0/admin", + ) + ) + + @defer.inlineCallbacks def assert_requester_is_admin(auth, request): """Verify that the requester is an admin user diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py new file mode 100644 index 000000000000..824df919f269 --- /dev/null +++ b/synapse/rest/admin/media.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018-2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.api.errors import AuthError +from synapse.http.servlet import RestServlet, parse_integer +from synapse.rest.admin._base import ( + assert_requester_is_admin, + assert_user_is_admin, + historical_admin_path_patterns, +) + +logger = logging.getLogger(__name__) + + +class QuarantineMediaInRoom(RestServlet): + """Quarantines all media in a room so that no one can download it via + this server. + """ + + PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P[^/]+)") + + def __init__(self, hs): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + @defer.inlineCallbacks + def on_POST(self, request, room_id): + requester = yield self.auth.get_user_by_req(request) + yield assert_user_is_admin(self.auth, requester.user) + + num_quarantined = yield self.store.quarantine_media_ids_in_room( + room_id, requester.user.to_string() + ) + + return (200, {"num_quarantined": num_quarantined}) + + +class ListMediaInRoom(RestServlet): + """Lists all of the media in a given room. + """ + + PATTERNS = historical_admin_path_patterns("/room/(?P[^/]+)/media") + + def __init__(self, hs): + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def on_GET(self, request, room_id): + requester = yield self.auth.get_user_by_req(request) + is_admin = yield self.auth.is_server_admin(requester.user) + if not is_admin: + raise AuthError(403, "You are not a server admin") + + local_mxcs, remote_mxcs = yield self.store.get_media_mxcs_in_room(room_id) + + return (200, {"local": local_mxcs, "remote": remote_mxcs}) + + +class PurgeMediaCacheRestServlet(RestServlet): + PATTERNS = historical_admin_path_patterns("/purge_media_cache") + + def __init__(self, hs): + self.media_repository = hs.get_media_repository() + self.auth = hs.get_auth() + + @defer.inlineCallbacks + def on_POST(self, request): + yield assert_requester_is_admin(self.auth, request) + + before_ts = parse_integer(request, "before_ts", required=True) + logger.info("before_ts: %r", before_ts) + + ret = yield self.media_repository.delete_old_remote_media(before_ts) + + return (200, ret) + + +def register_servlets_for_media_repo(hs, http_server): + """ + Media repo specific APIs. + """ + PurgeMediaCacheRestServlet(hs).register(http_server) + QuarantineMediaInRoom(hs).register(http_server) + ListMediaInRoom(hs).register(http_server) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 92beefa1766c..cf5759e9a693 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -33,6 +33,7 @@ RequestSendFailed, SynapseError, ) +from synapse.config._base import ConfigError from synapse.logging.context import defer_to_thread from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.async_helpers import Linearizer @@ -753,8 +754,11 @@ class MediaRepositoryResource(Resource): """ def __init__(self, hs): - Resource.__init__(self) + # If we're not configured to use it, raise if we somehow got here. + if not hs.config.can_load_media_repo: + raise ConfigError("Synapse is not configured to use a media repo.") + super().__init__() media_repo = hs.get_media_repository() self.putChild(b"upload", UploadResource(hs, media_repo)) From 96bdd661b88f373826ea25baa94199cf2b8d25f9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Aug 2019 12:50:36 +0100 Subject: [PATCH 128/136] Remove redundant return --- synapse/push/emailpusher.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index f688d4152d7c..42e5b0c0a563 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -247,7 +247,6 @@ def save_last_stream_ordering_and_success(self, last_stream_ordering): # The pusher has been deleted while we were processing, so # lets just stop and return. self.on_stop() - return def seconds_until(self, ts_msec): secs = (ts_msec - self.clock.time_msec()) / 1000 From 18bdac8ee4813005813a7021b1056ae83b44d6a2 Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Wed, 14 Aug 2019 02:05:11 +1000 Subject: [PATCH 129/136] fix config being a dict, actually --- synapse/config/repository.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/repository.py b/synapse/config/repository.py index db39697e45b1..fdb1f246d086 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -93,7 +93,7 @@ def read_config(self, config, **kwargs): # current worker app is the media repo. if ( self.enable_media_repo is False - and config.worker_app != "synapse.app.media_repository" + and config.get("worker_app") != "synapse.app.media_repository" ): self.can_load_media_repo = False return From 28bce1ac7c900d7f8bd8c12bb0558e28c337fddc Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Wed, 14 Aug 2019 02:08:24 +1000 Subject: [PATCH 130/136] changelog --- changelog.d/5848.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5848.feature diff --git a/changelog.d/5848.feature b/changelog.d/5848.feature new file mode 100644 index 000000000000..c1a09a4dcee5 --- /dev/null +++ b/changelog.d/5848.feature @@ -0,0 +1 @@ +Synapse will no longer serve any media repo admin endpoints when `enable_media_repo` is set to False in the configuration. If a media repo worker is used, the admin APIs relating to the media repo will be served from it instead. \ No newline at end of file From f70d0a1dd998cf131ff3e6fc9f4319c51903b9f3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 13 Aug 2019 18:17:47 +0100 Subject: [PATCH 131/136] 1.3.0rc1 --- CHANGES.md | 74 ++++++++++++++++++++++++++++++++++++++++ changelog.d/5678.removal | 1 - changelog.d/5686.feature | 1 - changelog.d/5693.bugfix | 1 - changelog.d/5694.misc | 1 - changelog.d/5695.misc | 1 - changelog.d/5706.misc | 1 - changelog.d/5713.misc | 1 - changelog.d/5715.misc | 1 - changelog.d/5717.misc | 1 - changelog.d/5719.misc | 1 - changelog.d/5720.misc | 1 - changelog.d/5722.misc | 1 - changelog.d/5724.bugfix | 1 - changelog.d/5725.bugfix | 1 - changelog.d/5729.removal | 1 - changelog.d/5730.misc | 1 - changelog.d/5731.misc | 1 - changelog.d/5732.feature | 1 - changelog.d/5733.misc | 1 - changelog.d/5736.misc | 1 - changelog.d/5738.misc | 1 - changelog.d/5740.misc | 1 - changelog.d/5743.bugfix | 1 - changelog.d/5746.misc | 1 - changelog.d/5749.misc | 1 - changelog.d/5750.misc | 1 - changelog.d/5752.misc | 1 - changelog.d/5753.misc | 1 - changelog.d/5754.feature | 1 - changelog.d/5768.misc | 1 - changelog.d/5770.misc | 1 - changelog.d/5774.misc | 1 - changelog.d/5775.bugfix | 1 - changelog.d/5780.misc | 1 - changelog.d/5782.removal | 1 - changelog.d/5783.feature | 1 - changelog.d/5785.misc | 1 - changelog.d/5787.misc | 1 - changelog.d/5788.bugfix | 1 - changelog.d/5789.bugfix | 1 - changelog.d/5790.misc | 1 - changelog.d/5792.misc | 1 - changelog.d/5793.misc | 1 - changelog.d/5794.misc | 1 - changelog.d/5796.misc | 1 - changelog.d/5798.bugfix | 1 - changelog.d/5801.misc | 1 - changelog.d/5802.misc | 1 - changelog.d/5804.bugfix | 1 - changelog.d/5805.misc | 1 - changelog.d/5806.bugfix | 1 - changelog.d/5807.feature | 1 - changelog.d/5808.misc | 1 - changelog.d/5809.misc | 1 - changelog.d/5810.misc | 1 - changelog.d/5825.bugfix | 1 - changelog.d/5826.misc | 1 - changelog.d/5836.misc | 1 - changelog.d/5839.bugfix | 1 - changelog.d/5843.misc | 1 - changelog.d/5848.feature | 1 - synapse/__init__.py | 2 +- 63 files changed, 75 insertions(+), 62 deletions(-) delete mode 100644 changelog.d/5678.removal delete mode 100644 changelog.d/5686.feature delete mode 100644 changelog.d/5693.bugfix delete mode 100644 changelog.d/5694.misc delete mode 100644 changelog.d/5695.misc delete mode 100644 changelog.d/5706.misc delete mode 100644 changelog.d/5713.misc delete mode 100644 changelog.d/5715.misc delete mode 100644 changelog.d/5717.misc delete mode 100644 changelog.d/5719.misc delete mode 100644 changelog.d/5720.misc delete mode 100644 changelog.d/5722.misc delete mode 100644 changelog.d/5724.bugfix delete mode 100644 changelog.d/5725.bugfix delete mode 100644 changelog.d/5729.removal delete mode 100644 changelog.d/5730.misc delete mode 100644 changelog.d/5731.misc delete mode 100644 changelog.d/5732.feature delete mode 100644 changelog.d/5733.misc delete mode 100644 changelog.d/5736.misc delete mode 100644 changelog.d/5738.misc delete mode 100644 changelog.d/5740.misc delete mode 100644 changelog.d/5743.bugfix delete mode 100644 changelog.d/5746.misc delete mode 100644 changelog.d/5749.misc delete mode 100644 changelog.d/5750.misc delete mode 100644 changelog.d/5752.misc delete mode 100644 changelog.d/5753.misc delete mode 100644 changelog.d/5754.feature delete mode 100644 changelog.d/5768.misc delete mode 100644 changelog.d/5770.misc delete mode 100644 changelog.d/5774.misc delete mode 100644 changelog.d/5775.bugfix delete mode 100644 changelog.d/5780.misc delete mode 100644 changelog.d/5782.removal delete mode 100644 changelog.d/5783.feature delete mode 100644 changelog.d/5785.misc delete mode 100644 changelog.d/5787.misc delete mode 100644 changelog.d/5788.bugfix delete mode 100644 changelog.d/5789.bugfix delete mode 100644 changelog.d/5790.misc delete mode 100644 changelog.d/5792.misc delete mode 100644 changelog.d/5793.misc delete mode 100644 changelog.d/5794.misc delete mode 100644 changelog.d/5796.misc delete mode 100644 changelog.d/5798.bugfix delete mode 100644 changelog.d/5801.misc delete mode 100644 changelog.d/5802.misc delete mode 100644 changelog.d/5804.bugfix delete mode 100644 changelog.d/5805.misc delete mode 100644 changelog.d/5806.bugfix delete mode 100644 changelog.d/5807.feature delete mode 100644 changelog.d/5808.misc delete mode 100644 changelog.d/5809.misc delete mode 100644 changelog.d/5810.misc delete mode 100644 changelog.d/5825.bugfix delete mode 100644 changelog.d/5826.misc delete mode 100644 changelog.d/5836.misc delete mode 100644 changelog.d/5839.bugfix delete mode 100644 changelog.d/5843.misc delete mode 100644 changelog.d/5848.feature diff --git a/CHANGES.md b/CHANGES.md index 7bdc7ae6cc31..eca9c82f55b8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,77 @@ +Synapse 1.3.0rc1 (2019-08-13) +========================== + +Features +-------- + +- Use `M_USER_DEACTIVATED` instead of `M_UNKNOWN` for errcode when a deactivated user attempts to login. ([\#5686](https://github.com/matrix-org/synapse/issues/5686)) +- Add sd_notify hooks to ease systemd integration and allows usage of Type=Notify. ([\#5732](https://github.com/matrix-org/synapse/issues/5732)) +- Synapse will no longer serve any media repo admin endpoints when `enable_media_repo` is set to False in the configuration. If a media repo worker is used, the admin APIs relating to the media repo will be served from it instead. ([\#5754](https://github.com/matrix-org/synapse/issues/5754), [\#5848](https://github.com/matrix-org/synapse/issues/5848)) +- Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers. ([\#5783](https://github.com/matrix-org/synapse/issues/5783)) +- Allow defining HTML templates to serve the user on account renewal attempt when using the account validity feature. ([\#5807](https://github.com/matrix-org/synapse/issues/5807)) + + +Bugfixes +-------- + +- Fix UISIs during homeserver outage. ([\#5693](https://github.com/matrix-org/synapse/issues/5693), [\#5789](https://github.com/matrix-org/synapse/issues/5789)) +- Fix stack overflow in server key lookup code. ([\#5724](https://github.com/matrix-org/synapse/issues/5724)) +- start.sh no longer uses deprecated cli option. ([\#5725](https://github.com/matrix-org/synapse/issues/5725)) +- Log when we receive an event receipt from an unexpected origin. ([\#5743](https://github.com/matrix-org/synapse/issues/5743)) +- Fix debian packaging scripts to correctly build sid packages. ([\#5775](https://github.com/matrix-org/synapse/issues/5775)) +- Correctly handle redactions of redactions. ([\#5788](https://github.com/matrix-org/synapse/issues/5788)) +- Return 404 instead of 403 when accessing /rooms/{roomId}/event/{eventId} for an event without the appropriate permissions. ([\#5798](https://github.com/matrix-org/synapse/issues/5798)) +- Fix check that tombstone is a state event in push rules. ([\#5804](https://github.com/matrix-org/synapse/issues/5804)) +- Fix error when trying to login as a deactivated user when using a worker to handle login. ([\#5806](https://github.com/matrix-org/synapse/issues/5806)) +- Fix bug where user `/sync` stream could get wedged in rare circumstances. ([\#5825](https://github.com/matrix-org/synapse/issues/5825)) +- The purge_remote_media.sh script was fixed. ([\#5839](https://github.com/matrix-org/synapse/issues/5839)) + + +Deprecations and Removals +------------------------- + +- Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration. ([\#5678](https://github.com/matrix-org/synapse/issues/5678), [\#5729](https://github.com/matrix-org/synapse/issues/5729)) +- Remove non-functional 'expire_access_token' setting. ([\#5782](https://github.com/matrix-org/synapse/issues/5782)) + + +Internal Changes +---------------- + +- Make Jaeger fully configurable. ([\#5694](https://github.com/matrix-org/synapse/issues/5694)) +- Add precautionary measures to prevent future abuse of `window.opener` in default welcome page. ([\#5695](https://github.com/matrix-org/synapse/issues/5695)) +- Reduce database IO usage by optimising queries for current membership. ([\#5706](https://github.com/matrix-org/synapse/issues/5706), [\#5738](https://github.com/matrix-org/synapse/issues/5738), [\#5746](https://github.com/matrix-org/synapse/issues/5746), [\#5752](https://github.com/matrix-org/synapse/issues/5752), [\#5770](https://github.com/matrix-org/synapse/issues/5770), [\#5774](https://github.com/matrix-org/synapse/issues/5774), [\#5792](https://github.com/matrix-org/synapse/issues/5792), [\#5793](https://github.com/matrix-org/synapse/issues/5793)) +- Improve caching when fetching `get_filtered_current_state_ids`. ([\#5713](https://github.com/matrix-org/synapse/issues/5713)) +- Don't accept opentracing data from clients. ([\#5715](https://github.com/matrix-org/synapse/issues/5715)) +- Speed up PostgreSQL unit tests in CI. ([\#5717](https://github.com/matrix-org/synapse/issues/5717)) +- Update the coding style document. ([\#5719](https://github.com/matrix-org/synapse/issues/5719)) +- Improve database query performance when recording retry intervals for remote hosts. ([\#5720](https://github.com/matrix-org/synapse/issues/5720)) +- Add a set of opentracing utils. ([\#5722](https://github.com/matrix-org/synapse/issues/5722)) +- Cache result of get_version_string to reduce overhead of `/version` federation requests. ([\#5730](https://github.com/matrix-org/synapse/issues/5730)) +- Return 'user_type' in admin API user endpoints results. ([\#5731](https://github.com/matrix-org/synapse/issues/5731)) +- Don't package the sytest test blacklist file. ([\#5733](https://github.com/matrix-org/synapse/issues/5733)) +- Replace uses of returnValue with plain return, as returnValue is not needed on Python 3. ([\#5736](https://github.com/matrix-org/synapse/issues/5736)) +- Blacklist some flakey tests in worker mode. ([\#5740](https://github.com/matrix-org/synapse/issues/5740)) +- Fix some error cases in the caching layer. ([\#5749](https://github.com/matrix-org/synapse/issues/5749)) +- Add a prometheus metric for pending cache lookups. ([\#5750](https://github.com/matrix-org/synapse/issues/5750)) +- Stop trying to fetch events with event_id=None. ([\#5753](https://github.com/matrix-org/synapse/issues/5753)) +- Convert RedactionTestCase to modern test style. ([\#5768](https://github.com/matrix-org/synapse/issues/5768)) +- Allow looping calls to be given arguments. ([\#5780](https://github.com/matrix-org/synapse/issues/5780)) +- Set the logs emitted when checking typing and presence timeouts to DEBUG level, not INFO. ([\#5785](https://github.com/matrix-org/synapse/issues/5785)) +- Remove DelayedCall debugging from the test suite, as it is no longer required in the vast majority of Synapse's tests. ([\#5787](https://github.com/matrix-org/synapse/issues/5787)) +- Remove some spurious exceptions from the logs where we failed to talk to a remote server. ([\#5790](https://github.com/matrix-org/synapse/issues/5790)) +- Improve performance when making `.well-known` requests by sharing the SSL options between requests. ([\#5794](https://github.com/matrix-org/synapse/issues/5794)) +- Disable codecov GitHub comments on PRs. ([\#5796](https://github.com/matrix-org/synapse/issues/5796)) +- Don't allow clients to send tombstone events that reference the room it's sent in. ([\#5801](https://github.com/matrix-org/synapse/issues/5801)) +- Deny redactions of events sent in a different room. ([\#5802](https://github.com/matrix-org/synapse/issues/5802)) +- Deny sending well known state types as non-state events. ([\#5805](https://github.com/matrix-org/synapse/issues/5805)) +- Handle incorrectly encoded query params correctly by returning a 400. ([\#5808](https://github.com/matrix-org/synapse/issues/5808)) +- Handle pusher being deleted during processing rather than logging an exception. ([\#5809](https://github.com/matrix-org/synapse/issues/5809)) +- Return 502 not 500 when failing to reach any remote server. ([\#5810](https://github.com/matrix-org/synapse/issues/5810)) +- Reduce global pauses in the events stream caused by expensive state resolution during persistence. ([\#5826](https://github.com/matrix-org/synapse/issues/5826)) +- Add a lower bound to well-known lookup cache time to avoid repeated lookups. ([\#5836](https://github.com/matrix-org/synapse/issues/5836)) +- Whitelist history visbility sytests in worker mode tests. ([\#5843](https://github.com/matrix-org/synapse/issues/5843)) + + Synapse 1.2.1 (2019-07-26) ========================== diff --git a/changelog.d/5678.removal b/changelog.d/5678.removal deleted file mode 100644 index 085b84fda69d..000000000000 --- a/changelog.d/5678.removal +++ /dev/null @@ -1 +0,0 @@ -Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration. diff --git a/changelog.d/5686.feature b/changelog.d/5686.feature deleted file mode 100644 index 367aa1eca251..000000000000 --- a/changelog.d/5686.feature +++ /dev/null @@ -1 +0,0 @@ -Use `M_USER_DEACTIVATED` instead of `M_UNKNOWN` for errcode when a deactivated user attempts to login. diff --git a/changelog.d/5693.bugfix b/changelog.d/5693.bugfix deleted file mode 100644 index d6f4e590aef9..000000000000 --- a/changelog.d/5693.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix UISIs during homeserver outage. diff --git a/changelog.d/5694.misc b/changelog.d/5694.misc deleted file mode 100644 index 3b12dcc849df..000000000000 --- a/changelog.d/5694.misc +++ /dev/null @@ -1 +0,0 @@ -Make Jaeger fully configurable. diff --git a/changelog.d/5695.misc b/changelog.d/5695.misc deleted file mode 100644 index 4741d32e255b..000000000000 --- a/changelog.d/5695.misc +++ /dev/null @@ -1 +0,0 @@ -Add precautionary measures to prevent future abuse of `window.opener` in default welcome page. diff --git a/changelog.d/5706.misc b/changelog.d/5706.misc deleted file mode 100644 index 5e15dfd5faa7..000000000000 --- a/changelog.d/5706.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database IO usage by optimising queries for current membership. diff --git a/changelog.d/5713.misc b/changelog.d/5713.misc deleted file mode 100644 index 01ea1cf8d741..000000000000 --- a/changelog.d/5713.misc +++ /dev/null @@ -1 +0,0 @@ -Improve caching when fetching `get_filtered_current_state_ids`. diff --git a/changelog.d/5715.misc b/changelog.d/5715.misc deleted file mode 100644 index a77366e0c0ed..000000000000 --- a/changelog.d/5715.misc +++ /dev/null @@ -1 +0,0 @@ -Don't accept opentracing data from clients. diff --git a/changelog.d/5717.misc b/changelog.d/5717.misc deleted file mode 100644 index 07dc3bca946b..000000000000 --- a/changelog.d/5717.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up PostgreSQL unit tests in CI. diff --git a/changelog.d/5719.misc b/changelog.d/5719.misc deleted file mode 100644 index 6d5294724c7f..000000000000 --- a/changelog.d/5719.misc +++ /dev/null @@ -1 +0,0 @@ -Update the coding style document. diff --git a/changelog.d/5720.misc b/changelog.d/5720.misc deleted file mode 100644 index 590f64f19d0c..000000000000 --- a/changelog.d/5720.misc +++ /dev/null @@ -1 +0,0 @@ -Improve database query performance when recording retry intervals for remote hosts. diff --git a/changelog.d/5722.misc b/changelog.d/5722.misc deleted file mode 100644 index f2d236188dee..000000000000 --- a/changelog.d/5722.misc +++ /dev/null @@ -1 +0,0 @@ -Add a set of opentracing utils. diff --git a/changelog.d/5724.bugfix b/changelog.d/5724.bugfix deleted file mode 100644 index 1b3683daf6b2..000000000000 --- a/changelog.d/5724.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix stack overflow in server key lookup code. \ No newline at end of file diff --git a/changelog.d/5725.bugfix b/changelog.d/5725.bugfix deleted file mode 100644 index 73ef419727d9..000000000000 --- a/changelog.d/5725.bugfix +++ /dev/null @@ -1 +0,0 @@ -start.sh no longer uses deprecated cli option. diff --git a/changelog.d/5729.removal b/changelog.d/5729.removal deleted file mode 100644 index 3af5198e6bb4..000000000000 --- a/changelog.d/5729.removal +++ /dev/null @@ -1 +0,0 @@ - Synapse now no longer accepts the `-v`/`--verbose`, `-f`/`--log-file`, or `--log-config` command line flags, and removes the deprecated `verbose` and `log_file` configuration file options. Users of these options should migrate their options into the dedicated log configuration. diff --git a/changelog.d/5730.misc b/changelog.d/5730.misc deleted file mode 100644 index a99677f5e7a8..000000000000 --- a/changelog.d/5730.misc +++ /dev/null @@ -1 +0,0 @@ -Cache result of get_version_string to reduce overhead of `/version` federation requests. diff --git a/changelog.d/5731.misc b/changelog.d/5731.misc deleted file mode 100644 index dffae5d874e7..000000000000 --- a/changelog.d/5731.misc +++ /dev/null @@ -1 +0,0 @@ -Return 'user_type' in admin API user endpoints results. diff --git a/changelog.d/5732.feature b/changelog.d/5732.feature deleted file mode 100644 index 9021864350cf..000000000000 --- a/changelog.d/5732.feature +++ /dev/null @@ -1 +0,0 @@ -Add sd_notify hooks to ease systemd integration and allows usage of Type=Notify. diff --git a/changelog.d/5733.misc b/changelog.d/5733.misc deleted file mode 100644 index a2a8c26383f6..000000000000 --- a/changelog.d/5733.misc +++ /dev/null @@ -1 +0,0 @@ -Don't package the sytest test blacklist file. diff --git a/changelog.d/5736.misc b/changelog.d/5736.misc deleted file mode 100644 index 5713b8b32d77..000000000000 --- a/changelog.d/5736.misc +++ /dev/null @@ -1 +0,0 @@ -Replace uses of returnValue with plain return, as returnValue is not needed on Python 3. diff --git a/changelog.d/5738.misc b/changelog.d/5738.misc deleted file mode 100644 index 5e15dfd5faa7..000000000000 --- a/changelog.d/5738.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database IO usage by optimising queries for current membership. diff --git a/changelog.d/5740.misc b/changelog.d/5740.misc deleted file mode 100644 index 97a476bef557..000000000000 --- a/changelog.d/5740.misc +++ /dev/null @@ -1 +0,0 @@ -Blacklist some flakey tests in worker mode. diff --git a/changelog.d/5743.bugfix b/changelog.d/5743.bugfix deleted file mode 100644 index 65728ff079fb..000000000000 --- a/changelog.d/5743.bugfix +++ /dev/null @@ -1 +0,0 @@ -Log when we receive an event receipt from an unexpected origin. diff --git a/changelog.d/5746.misc b/changelog.d/5746.misc deleted file mode 100644 index 5e15dfd5faa7..000000000000 --- a/changelog.d/5746.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database IO usage by optimising queries for current membership. diff --git a/changelog.d/5749.misc b/changelog.d/5749.misc deleted file mode 100644 index 48dd61f4617d..000000000000 --- a/changelog.d/5749.misc +++ /dev/null @@ -1 +0,0 @@ -Fix some error cases in the caching layer. diff --git a/changelog.d/5750.misc b/changelog.d/5750.misc deleted file mode 100644 index 6beaa460a5a7..000000000000 --- a/changelog.d/5750.misc +++ /dev/null @@ -1 +0,0 @@ -Add a prometheus metric for pending cache lookups. \ No newline at end of file diff --git a/changelog.d/5752.misc b/changelog.d/5752.misc deleted file mode 100644 index 5e15dfd5faa7..000000000000 --- a/changelog.d/5752.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database IO usage by optimising queries for current membership. diff --git a/changelog.d/5753.misc b/changelog.d/5753.misc deleted file mode 100644 index 22bba9ce3c05..000000000000 --- a/changelog.d/5753.misc +++ /dev/null @@ -1 +0,0 @@ -Stop trying to fetch events with event_id=None. diff --git a/changelog.d/5754.feature b/changelog.d/5754.feature deleted file mode 100644 index c1a09a4dcee5..000000000000 --- a/changelog.d/5754.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse will no longer serve any media repo admin endpoints when `enable_media_repo` is set to False in the configuration. If a media repo worker is used, the admin APIs relating to the media repo will be served from it instead. \ No newline at end of file diff --git a/changelog.d/5768.misc b/changelog.d/5768.misc deleted file mode 100644 index 7a9c88b4c258..000000000000 --- a/changelog.d/5768.misc +++ /dev/null @@ -1 +0,0 @@ -Convert RedactionTestCase to modern test style. diff --git a/changelog.d/5770.misc b/changelog.d/5770.misc deleted file mode 100644 index 5e15dfd5faa7..000000000000 --- a/changelog.d/5770.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database IO usage by optimising queries for current membership. diff --git a/changelog.d/5774.misc b/changelog.d/5774.misc deleted file mode 100644 index 5e15dfd5faa7..000000000000 --- a/changelog.d/5774.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database IO usage by optimising queries for current membership. diff --git a/changelog.d/5775.bugfix b/changelog.d/5775.bugfix deleted file mode 100644 index b124897d802c..000000000000 --- a/changelog.d/5775.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix debian packaging scripts to correctly build sid packages. diff --git a/changelog.d/5780.misc b/changelog.d/5780.misc deleted file mode 100644 index b7eb56e625b2..000000000000 --- a/changelog.d/5780.misc +++ /dev/null @@ -1 +0,0 @@ -Allow looping calls to be given arguments. diff --git a/changelog.d/5782.removal b/changelog.d/5782.removal deleted file mode 100644 index 658bf923ab57..000000000000 --- a/changelog.d/5782.removal +++ /dev/null @@ -1 +0,0 @@ -Remove non-functional 'expire_access_token' setting. diff --git a/changelog.d/5783.feature b/changelog.d/5783.feature deleted file mode 100644 index 18f5a3cb288c..000000000000 --- a/changelog.d/5783.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse can now be configured to not join remote rooms of a given "complexity" (currently, state events) over federation. This option can be used to prevent adverse performance on resource-constrained homeservers. diff --git a/changelog.d/5785.misc b/changelog.d/5785.misc deleted file mode 100644 index 0691222c421c..000000000000 --- a/changelog.d/5785.misc +++ /dev/null @@ -1 +0,0 @@ -Set the logs emitted when checking typing and presence timeouts to DEBUG level, not INFO. diff --git a/changelog.d/5787.misc b/changelog.d/5787.misc deleted file mode 100644 index ead0b04b6210..000000000000 --- a/changelog.d/5787.misc +++ /dev/null @@ -1 +0,0 @@ -Remove DelayedCall debugging from the test suite, as it is no longer required in the vast majority of Synapse's tests. diff --git a/changelog.d/5788.bugfix b/changelog.d/5788.bugfix deleted file mode 100644 index 5632f3cb99ba..000000000000 --- a/changelog.d/5788.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correctly handle redactions of redactions. diff --git a/changelog.d/5789.bugfix b/changelog.d/5789.bugfix deleted file mode 100644 index d6f4e590aef9..000000000000 --- a/changelog.d/5789.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix UISIs during homeserver outage. diff --git a/changelog.d/5790.misc b/changelog.d/5790.misc deleted file mode 100644 index 3e9e435d7aa7..000000000000 --- a/changelog.d/5790.misc +++ /dev/null @@ -1 +0,0 @@ -Remove some spurious exceptions from the logs where we failed to talk to a remote server. diff --git a/changelog.d/5792.misc b/changelog.d/5792.misc deleted file mode 100644 index 5e15dfd5faa7..000000000000 --- a/changelog.d/5792.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database IO usage by optimising queries for current membership. diff --git a/changelog.d/5793.misc b/changelog.d/5793.misc deleted file mode 100644 index 5e15dfd5faa7..000000000000 --- a/changelog.d/5793.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce database IO usage by optimising queries for current membership. diff --git a/changelog.d/5794.misc b/changelog.d/5794.misc deleted file mode 100644 index 720e0ddcfb89..000000000000 --- a/changelog.d/5794.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance when making `.well-known` requests by sharing the SSL options between requests. diff --git a/changelog.d/5796.misc b/changelog.d/5796.misc deleted file mode 100644 index be520946c7e0..000000000000 --- a/changelog.d/5796.misc +++ /dev/null @@ -1 +0,0 @@ -Disable codecov GitHub comments on PRs. diff --git a/changelog.d/5798.bugfix b/changelog.d/5798.bugfix deleted file mode 100644 index 7db2c37af5d9..000000000000 --- a/changelog.d/5798.bugfix +++ /dev/null @@ -1 +0,0 @@ -Return 404 instead of 403 when accessing /rooms/{roomId}/event/{eventId} for an event without the appropriate permissions. diff --git a/changelog.d/5801.misc b/changelog.d/5801.misc deleted file mode 100644 index e19854de822f..000000000000 --- a/changelog.d/5801.misc +++ /dev/null @@ -1 +0,0 @@ -Don't allow clients to send tombstone events that reference the room it's sent in. diff --git a/changelog.d/5802.misc b/changelog.d/5802.misc deleted file mode 100644 index de31192652da..000000000000 --- a/changelog.d/5802.misc +++ /dev/null @@ -1 +0,0 @@ -Deny redactions of events sent in a different room. diff --git a/changelog.d/5804.bugfix b/changelog.d/5804.bugfix deleted file mode 100644 index 75c17b460dbb..000000000000 --- a/changelog.d/5804.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix check that tombstone is a state event in push rules. diff --git a/changelog.d/5805.misc b/changelog.d/5805.misc deleted file mode 100644 index 352cb3db04d1..000000000000 --- a/changelog.d/5805.misc +++ /dev/null @@ -1 +0,0 @@ -Deny sending well known state types as non-state events. diff --git a/changelog.d/5806.bugfix b/changelog.d/5806.bugfix deleted file mode 100644 index c5ca0f562917..000000000000 --- a/changelog.d/5806.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error when trying to login as a deactivated user when using a worker to handle login. diff --git a/changelog.d/5807.feature b/changelog.d/5807.feature deleted file mode 100644 index 8b7d29a23cb4..000000000000 --- a/changelog.d/5807.feature +++ /dev/null @@ -1 +0,0 @@ -Allow defining HTML templates to serve the user on account renewal attempt when using the account validity feature. diff --git a/changelog.d/5808.misc b/changelog.d/5808.misc deleted file mode 100644 index cac3fd34d12e..000000000000 --- a/changelog.d/5808.misc +++ /dev/null @@ -1 +0,0 @@ -Handle incorrectly encoded query params correctly by returning a 400. diff --git a/changelog.d/5809.misc b/changelog.d/5809.misc deleted file mode 100644 index 82a812480e0d..000000000000 --- a/changelog.d/5809.misc +++ /dev/null @@ -1 +0,0 @@ -Handle pusher being deleted during processing rather than logging an exception. diff --git a/changelog.d/5810.misc b/changelog.d/5810.misc deleted file mode 100644 index 0a5ccbbb3f3d..000000000000 --- a/changelog.d/5810.misc +++ /dev/null @@ -1 +0,0 @@ -Return 502 not 500 when failing to reach any remote server. diff --git a/changelog.d/5825.bugfix b/changelog.d/5825.bugfix deleted file mode 100644 index fb2c6f821d9f..000000000000 --- a/changelog.d/5825.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where user `/sync` stream could get wedged in rare circumstances. diff --git a/changelog.d/5826.misc b/changelog.d/5826.misc deleted file mode 100644 index 9abed11bbe2a..000000000000 --- a/changelog.d/5826.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce global pauses in the events stream caused by expensive state resolution during persistence. diff --git a/changelog.d/5836.misc b/changelog.d/5836.misc deleted file mode 100644 index 18f248820151..000000000000 --- a/changelog.d/5836.misc +++ /dev/null @@ -1 +0,0 @@ -Add a lower bound to well-known lookup cache time to avoid repeated lookups. diff --git a/changelog.d/5839.bugfix b/changelog.d/5839.bugfix deleted file mode 100644 index 5775bfa653d1..000000000000 --- a/changelog.d/5839.bugfix +++ /dev/null @@ -1 +0,0 @@ -The purge_remote_media.sh script was fixed. diff --git a/changelog.d/5843.misc b/changelog.d/5843.misc deleted file mode 100644 index e7e7d572b7de..000000000000 --- a/changelog.d/5843.misc +++ /dev/null @@ -1 +0,0 @@ -Whitelist history visbility sytests in worker mode tests. diff --git a/changelog.d/5848.feature b/changelog.d/5848.feature deleted file mode 100644 index c1a09a4dcee5..000000000000 --- a/changelog.d/5848.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse will no longer serve any media repo admin endpoints when `enable_media_repo` is set to False in the configuration. If a media repo worker is used, the admin APIs relating to the media repo will be served from it instead. \ No newline at end of file diff --git a/synapse/__init__.py b/synapse/__init__.py index 8301a13d8f92..d2316c7df932 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ except ImportError: pass -__version__ = "1.2.1" +__version__ = "1.3.0rc1" From 1b63ccd8483e99aa7ea72b91f99f4cd94ded2e36 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 14 Aug 2019 14:05:50 +0100 Subject: [PATCH 132/136] Wrap `get_local_public_room_list` call in `maybeDeferred` because it is cached and so does not always return a `Deferred`. `await` does not silently pass-through non-Deferreds like `yield` used to. Signed-off-by: Olivier Wilkinson (reivilibre) --- synapse/federation/transport/server.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index ea4e1b6d0f28..9a86bd02638f 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -19,6 +19,8 @@ import logging import re +from twisted.internet.defer import maybeDeferred + import synapse import synapse.logging.opentracing as opentracing from synapse.api.errors import Codes, FederationDeniedError, SynapseError @@ -745,8 +747,12 @@ async def on_GET(self, origin, content, query): else: network_tuple = ThirdPartyInstanceID(None, None) - data = await self.handler.get_local_public_room_list( - limit, since_token, network_tuple=network_tuple, from_federation=True + data = await maybeDeferred( + self.handler.get_local_public_room_list, + limit, + since_token, + network_tuple=network_tuple, + from_federation=True, ) return 200, data From 3ad24ab3865ab0e52bab7cbb7bb50f10c3cab7d8 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Wed, 14 Aug 2019 14:09:31 +0100 Subject: [PATCH 133/136] Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/5851.bugfix | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 changelog.d/5851.bugfix diff --git a/changelog.d/5851.bugfix b/changelog.d/5851.bugfix new file mode 100644 index 000000000000..6da40e6e3824 --- /dev/null +++ b/changelog.d/5851.bugfix @@ -0,0 +1,2 @@ +Fixes 500 Internal Server Error on `publicRooms` when the public room list was +cached. \ No newline at end of file From d6de55bce967e89c7f8ffdbbe04ba655f969845c Mon Sep 17 00:00:00 2001 From: reivilibre <38398653+reivilibre@users.noreply.github.com> Date: Wed, 14 Aug 2019 14:48:18 +0100 Subject: [PATCH 134/136] Update changelog.d/5851.bugfix Use imperative Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5851.bugfix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog.d/5851.bugfix b/changelog.d/5851.bugfix index 6da40e6e3824..58f7c0c1b828 100644 --- a/changelog.d/5851.bugfix +++ b/changelog.d/5851.bugfix @@ -1,2 +1,2 @@ -Fixes 500 Internal Server Error on `publicRooms` when the public room list was -cached. \ No newline at end of file +Fix 500 Internal Server Error on `publicRooms` when the public room list was +cached. From 8cf7fbbce089d03d01e69fa52f035904d929a3bf Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 15 Aug 2019 11:32:23 +0100 Subject: [PATCH 135/136] Remove libsqlite3-dev from required build dependencies. (#5766) --- changelog.d/5766.misc | 1 + debian/changelog | 7 +++---- debian/control | 1 - 3 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5766.misc diff --git a/changelog.d/5766.misc b/changelog.d/5766.misc new file mode 100644 index 000000000000..163ca2f0d454 --- /dev/null +++ b/changelog.d/5766.misc @@ -0,0 +1 @@ +Remove libsqlite3-dev from required build dependencies. diff --git a/debian/changelog b/debian/changelog index 6634c1085a6b..55c28be8537a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,7 @@ -matrix-synapse-py3 (1.2.1) stable; urgency=medium +matrix-synapse-py3 (1.3.0) UNRELEASED; urgency=medium - * New synapse release 1.2.1. - - -- Synapse Packaging team Fri, 26 Jul 2019 11:32:47 +0100 + [ Andrew Morgan ] + * Remove libsqlite3-dev from required build dependencies. matrix-synapse-py3 (1.2.0) stable; urgency=medium diff --git a/debian/control b/debian/control index 9e679c9d428f..bae14b41e4f6 100644 --- a/debian/control +++ b/debian/control @@ -15,7 +15,6 @@ Build-Depends: python3-setuptools, python3-pip, python3-venv, - libsqlite3-dev, tar, Standards-Version: 3.9.8 Homepage: https://github.com/matrix-org/synapse From fb5acd703973249063f2a78266eed65c8dfaaf84 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 15 Aug 2019 12:05:24 +0100 Subject: [PATCH 136/136] 1.3.0 --- CHANGES.md | 10 ++++++++++ changelog.d/5766.misc | 1 - changelog.d/5851.bugfix | 2 -- debian/changelog | 5 +++-- synapse/__init__.py | 2 +- 5 files changed, 14 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/5766.misc delete mode 100644 changelog.d/5851.bugfix diff --git a/CHANGES.md b/CHANGES.md index eca9c82f55b8..d13dcb717efd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,13 @@ +Synapse 1.3.0 (2019-08-15) +========================== + +Bugfixes +-------- + +- Fix 500 Internal Server Error on `publicRooms` when the public room list was + cached. ([\#5851](https://github.com/matrix-org/synapse/issues/5851)) + + Synapse 1.3.0rc1 (2019-08-13) ========================== diff --git a/changelog.d/5766.misc b/changelog.d/5766.misc deleted file mode 100644 index 163ca2f0d454..000000000000 --- a/changelog.d/5766.misc +++ /dev/null @@ -1 +0,0 @@ -Remove libsqlite3-dev from required build dependencies. diff --git a/changelog.d/5851.bugfix b/changelog.d/5851.bugfix deleted file mode 100644 index 58f7c0c1b828..000000000000 --- a/changelog.d/5851.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix 500 Internal Server Error on `publicRooms` when the public room list was -cached. diff --git a/debian/changelog b/debian/changelog index 55c28be8537a..83232a0bad7a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -matrix-synapse-py3 (1.3.0) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.3.0) stable; urgency=medium [ Andrew Morgan ] * Remove libsqlite3-dev from required build dependencies. @@ -13,8 +13,9 @@ matrix-synapse-py3 (1.2.0) stable; urgency=medium [ Synapse Packaging team ] * New synapse release 1.2.0. + * New synapse release 1.3.0. - -- Synapse Packaging team Thu, 25 Jul 2019 14:10:07 +0100 + -- Synapse Packaging team Thu, 15 Aug 2019 12:04:23 +0100 matrix-synapse-py3 (1.1.0) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index d2316c7df932..02ae90b0725f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ except ImportError: pass -__version__ = "1.3.0rc1" +__version__ = "1.3.0"