From 91e886d615dd24ff0258e06470bca0cc8ffe22eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 19 Jun 2020 13:56:35 +0100 Subject: [PATCH 01/15] Speed up state res v2 across large state differences. (#7725) --- changelog.d/7725.misc | 1 + synapse/state/v2.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7725.misc diff --git a/changelog.d/7725.misc b/changelog.d/7725.misc new file mode 100644 index 000000000000..f295a455211f --- /dev/null +++ b/changelog.d/7725.misc @@ -0,0 +1 @@ +Speed up state res v2 across large state differences. diff --git a/synapse/state/v2.py b/synapse/state/v2.py index e25bc5d26486..57eadce4e64a 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -133,8 +133,9 @@ def resolve_events_with_store( # OK, so we've now resolved the power events. Now sort the remaining # events using the mainline of the resolved power level. + set_power_events = set(sorted_power_events) leftover_events = [ - ev_id for ev_id in full_conflicted_set if ev_id not in sorted_power_events + ev_id for ev_id in full_conflicted_set if ev_id not in set_power_events ] logger.debug("sorting %d remaining events", len(leftover_events)) From e060bf44625da4aa0a61fc792c3ef9af8ad05769 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 22 Jun 2020 07:18:00 -0400 Subject: [PATCH 02/15] Convert directory handler to async/await (#7727) --- changelog.d/7727.misc | 1 + synapse/handlers/directory.py | 68 +++++++++++++++-------------------- synapse/handlers/message.py | 4 ++- 3 files changed, 33 insertions(+), 40 deletions(-) create mode 100644 changelog.d/7727.misc diff --git a/changelog.d/7727.misc b/changelog.d/7727.misc new file mode 100644 index 000000000000..4d12d10fda2c --- /dev/null +++ b/changelog.d/7727.misc @@ -0,0 +1 @@ +Convert directory handler to async/await. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index f2f16b1e435b..79a2df62015a 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -17,8 +17,6 @@ import string from typing import Iterable, List, Optional -from twisted.internet import defer - from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes from synapse.api.errors import ( AuthError, @@ -55,8 +53,7 @@ def __init__(self, hs): self.spam_checker = hs.get_spam_checker() - @defer.inlineCallbacks - def _create_association( + async def _create_association( self, room_alias: RoomAlias, room_id: str, @@ -76,13 +73,13 @@ def _create_association( # TODO(erikj): Add transactions. # TODO(erikj): Check if there is a current association. if not servers: - users = yield self.state.get_current_users_in_room(room_id) + users = await self.state.get_current_users_in_room(room_id) servers = {get_domain_from_id(u) for u in users} if not servers: raise SynapseError(400, "Failed to get server list") - yield self.store.create_room_alias_association( + await self.store.create_room_alias_association( room_alias, room_id, servers, creator=creator ) @@ -93,7 +90,7 @@ async def create_association( room_id: str, servers: Optional[List[str]] = None, check_membership: bool = True, - ): + ) -> None: """Attempt to create a new alias Args: @@ -103,9 +100,6 @@ async def create_association( servers: Iterable of servers that others servers should try and join via check_membership: Whether to check if the user is in the room before the alias can be set (if the server's config requires it). - - Returns: - Deferred """ user_id = requester.user.to_string() @@ -148,7 +142,7 @@ async def create_association( # per alias creation rule? raise SynapseError(403, "Not allowed to create alias") - can_create = await self.can_modify_alias(room_alias, user_id=user_id) + can_create = self.can_modify_alias(room_alias, user_id=user_id) if not can_create: raise AuthError( 400, @@ -158,7 +152,9 @@ async def create_association( await self._create_association(room_alias, room_id, servers, creator=user_id) - async def delete_association(self, requester: Requester, room_alias: RoomAlias): + async def delete_association( + self, requester: Requester, room_alias: RoomAlias + ) -> str: """Remove an alias from the directory (this is only meant for human users; AS users should call @@ -169,7 +165,7 @@ async def delete_association(self, requester: Requester, room_alias: RoomAlias): room_alias Returns: - Deferred[unicode]: room id that the alias used to point to + room id that the alias used to point to Raises: NotFoundError: if the alias doesn't exist @@ -191,7 +187,7 @@ async def delete_association(self, requester: Requester, room_alias: RoomAlias): if not can_delete: raise AuthError(403, "You don't have permission to delete the alias.") - can_delete = await self.can_modify_alias(room_alias, user_id=user_id) + can_delete = self.can_modify_alias(room_alias, user_id=user_id) if not can_delete: raise SynapseError( 400, @@ -208,8 +204,7 @@ async def delete_association(self, requester: Requester, room_alias: RoomAlias): return room_id - @defer.inlineCallbacks - def delete_appservice_association( + async def delete_appservice_association( self, service: ApplicationService, room_alias: RoomAlias ): if not service.is_interested_in_alias(room_alias.to_string()): @@ -218,29 +213,27 @@ def delete_appservice_association( "This application service has not reserved this kind of alias", errcode=Codes.EXCLUSIVE, ) - yield self._delete_association(room_alias) + await self._delete_association(room_alias) - @defer.inlineCallbacks - def _delete_association(self, room_alias: RoomAlias): + async def _delete_association(self, room_alias: RoomAlias): if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room alias must be local") - room_id = yield self.store.delete_room_alias(room_alias) + room_id = await self.store.delete_room_alias(room_alias) return room_id - @defer.inlineCallbacks - def get_association(self, room_alias: RoomAlias): + async def get_association(self, room_alias: RoomAlias): room_id = None if self.hs.is_mine(room_alias): - result = yield self.get_association_from_room_alias(room_alias) + result = await self.get_association_from_room_alias(room_alias) if result: room_id = result.room_id servers = result.servers else: try: - result = yield self.federation.make_query( + result = await self.federation.make_query( destination=room_alias.domain, query_type="directory", args={"room_alias": room_alias.to_string()}, @@ -265,7 +258,7 @@ def get_association(self, room_alias: RoomAlias): Codes.NOT_FOUND, ) - users = yield self.state.get_current_users_in_room(room_id) + users = await self.state.get_current_users_in_room(room_id) extra_servers = {get_domain_from_id(u) for u in users} servers = set(extra_servers) | set(servers) @@ -277,13 +270,12 @@ def get_association(self, room_alias: RoomAlias): return {"room_id": room_id, "servers": servers} - @defer.inlineCallbacks - def on_directory_query(self, args): + async def on_directory_query(self, args): room_alias = RoomAlias.from_string(args["room_alias"]) if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room Alias is not hosted on this homeserver") - result = yield self.get_association_from_room_alias(room_alias) + result = await self.get_association_from_room_alias(room_alias) if result is not None: return {"room_id": result.room_id, "servers": result.servers} @@ -344,16 +336,15 @@ async def _update_canonical_alias( ratelimit=False, ) - @defer.inlineCallbacks - def get_association_from_room_alias(self, room_alias: RoomAlias): - result = yield self.store.get_association_from_room_alias(room_alias) + async def get_association_from_room_alias(self, room_alias: RoomAlias): + result = await self.store.get_association_from_room_alias(room_alias) if not result: # Query AS to see if it exists as_handler = self.appservice_handler - result = yield as_handler.query_room_alias_exists(room_alias) + result = await as_handler.query_room_alias_exists(room_alias) return result - def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None): + def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None) -> bool: # Any application service "interested" in an alias they are regexing on # can modify the alias. # Users can only modify the alias if ALL the interested services have @@ -366,12 +357,12 @@ def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None): for service in interested_services: if user_id == service.sender: # this user IS the app service so they can do whatever they like - return defer.succeed(True) + return True elif service.is_exclusive_alias(alias.to_string()): # another service has an exclusive lock on this alias. - return defer.succeed(False) + return False # either no interested services, or no service with an exclusive lock - return defer.succeed(True) + return True async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): """Determine whether a user can delete an alias. @@ -459,8 +450,7 @@ async def edit_published_room_list( await self.store.set_room_is_public(room_id, making_public) - @defer.inlineCallbacks - def edit_published_appservice_room_list( + async def edit_published_appservice_room_list( self, appservice_id: str, network_id: str, room_id: str, visibility: str ): """Add or remove a room from the appservice/network specific public @@ -475,7 +465,7 @@ def edit_published_appservice_room_list( if visibility not in ["public", "private"]: raise SynapseError(400, "Invalid visibility setting") - yield self.store.set_room_is_public_appservice( + await self.store.set_room_is_public_appservice( room_id, appservice_id, network_id, visibility == "public" ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 200127d29124..665ad19b5d3b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -879,7 +879,9 @@ def _validate_canonical_alias( """ room_alias = RoomAlias.from_string(room_alias_str) try: - mapping = yield directory_handler.get_association(room_alias) + mapping = yield defer.ensureDeferred( + directory_handler.get_association(room_alias) + ) except SynapseError as e: # Turn M_NOT_FOUND errors into M_BAD_ALIAS errors. if e.errcode == Codes.NOT_FOUND: From 95e41f368b19996872a1661d7066670fe65f1eba Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 22 Jun 2020 08:04:14 -0400 Subject: [PATCH 03/15] Allow local media to be marked as safe from being quarantined. (#7718) --- changelog.d/7718.feature | 1 + scripts/synapse_port_db | 1 + .../data_stores/main/media_repository.py | 9 ++ synapse/storage/data_stores/main/room.py | 42 +----- ...08_media_safe_from_quarantine.sql.postgres | 18 +++ .../08_media_safe_from_quarantine.sql.sqlite | 18 +++ tests/rest/admin/test_admin.py | 137 +++++++++--------- 7 files changed, 119 insertions(+), 107 deletions(-) create mode 100644 changelog.d/7718.feature create mode 100644 synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres create mode 100644 synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite diff --git a/changelog.d/7718.feature b/changelog.d/7718.feature new file mode 100644 index 000000000000..17071b9ea9f6 --- /dev/null +++ b/changelog.d/7718.feature @@ -0,0 +1 @@ +Media can now be marked as safe from quarantined. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 810e08beb57a..c2023f3e4d3c 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -89,6 +89,7 @@ BOOLEAN_COLUMNS = { "account_validity": ["email_sent"], "redactions": ["have_censored"], "room_stats_state": ["is_federatable"], + "local_media_repository": ["safe_from_quarantine"], } diff --git a/synapse/storage/data_stores/main/media_repository.py b/synapse/storage/data_stores/main/media_repository.py index 8aecd414c274..15bc13cbd0ee 100644 --- a/synapse/storage/data_stores/main/media_repository.py +++ b/synapse/storage/data_stores/main/media_repository.py @@ -81,6 +81,15 @@ def store_local_media( desc="store_local_media", ) + def mark_local_media_as_safe(self, media_id: str): + """Mark a local media as safe from quarantining.""" + return self.db.simple_update_one( + table="local_media_repository", + keyvalues={"media_id": media_id}, + updatevalues={"safe_from_quarantine": True}, + desc="mark_local_media_as_safe", + ) + def get_url_cache(self, url, ts): """Get the media_id and ts for a cached URL as of the given timestamp Returns: diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index 46f643c6b904..13e366536a77 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -626,36 +626,10 @@ def quarantine_media_ids_in_room(self, room_id, quarantined_by): def _quarantine_media_in_room_txn(txn): local_mxcs, remote_mxcs = self._get_media_mxcs_in_room_txn(txn, room_id) - total_media_quarantined = 0 - - # Now update all the tables to set the quarantined_by flag - - txn.executemany( - """ - UPDATE local_media_repository - SET quarantined_by = ? - WHERE media_id = ? - """, - ((quarantined_by, media_id) for media_id in local_mxcs), - ) - - txn.executemany( - """ - UPDATE remote_media_cache - SET quarantined_by = ? - WHERE media_origin = ? AND media_id = ? - """, - ( - (quarantined_by, origin, media_id) - for origin, media_id in remote_mxcs - ), + return self._quarantine_media_txn( + txn, local_mxcs, remote_mxcs, quarantined_by ) - total_media_quarantined += len(local_mxcs) - total_media_quarantined += len(remote_mxcs) - - return total_media_quarantined - return self.db.runInteraction( "quarantine_media_in_room", _quarantine_media_in_room_txn ) @@ -805,17 +779,17 @@ def _quarantine_media_txn( Returns: The total number of media items quarantined """ - total_media_quarantined = 0 - # Update all the tables to set the quarantined_by flag txn.executemany( """ UPDATE local_media_repository SET quarantined_by = ? - WHERE media_id = ? + WHERE media_id = ? AND safe_from_quarantine = ? """, - ((quarantined_by, media_id) for media_id in local_mxcs), + ((quarantined_by, media_id, False) for media_id in local_mxcs), ) + # Note that a rowcount of -1 can be used to indicate no rows were affected. + total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0 txn.executemany( """ @@ -825,9 +799,7 @@ def _quarantine_media_txn( """, ((quarantined_by, origin, media_id) for origin, media_id in remote_mxcs), ) - - total_media_quarantined += len(local_mxcs) - total_media_quarantined += len(remote_mxcs) + total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0 return total_media_quarantined diff --git a/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres b/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres new file mode 100644 index 000000000000..597f2ffd3d85 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres @@ -0,0 +1,18 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The local_media_repository should have files which do not get quarantined, +-- e.g. files from sticker packs. +ALTER TABLE local_media_repository ADD COLUMN safe_from_quarantine BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite b/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite new file mode 100644 index 000000000000..69db89ac0eb6 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite @@ -0,0 +1,18 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The local_media_repository should have files which do not get quarantined, +-- e.g. files from sticker packs. +ALTER TABLE local_media_repository ADD COLUMN safe_from_quarantine BOOLEAN NOT NULL DEFAULT 0; diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 977615ebef7e..b1a4decced76 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -220,6 +220,24 @@ def write_to(r): return hs + def _ensure_quarantined(self, admin_user_tok, server_and_media_id): + """Ensure a piece of media is quarantined when trying to access it.""" + request, channel = self.make_request( + "GET", server_and_media_id, shorthand=False, access_token=admin_user_tok, + ) + request.render(self.download_resource) + self.pump(1.0) + + # Should be quarantined + self.assertEqual( + 404, + int(channel.code), + msg=( + "Expected to receive a 404 on accessing quarantined media: %s" + % server_and_media_id + ), + ) + def test_quarantine_media_requires_admin(self): self.register_user("nonadmin", "pass", admin=False) non_admin_user_tok = self.login("nonadmin", "pass") @@ -292,24 +310,7 @@ def test_quarantine_media_by_id(self): self.assertEqual(200, int(channel.code), msg=channel.result["body"]) # Attempt to access the media - request, channel = self.make_request( - "GET", - server_name_and_media_id, - shorthand=False, - access_token=admin_user_tok, - ) - request.render(self.download_resource) - self.pump(1.0) - - # Should be quarantined - self.assertEqual( - 404, - int(channel.code), - msg=( - "Expected to receive a 404 on accessing quarantined media: %s" - % server_name_and_media_id - ), - ) + self._ensure_quarantined(admin_user_tok, server_name_and_media_id) def test_quarantine_all_media_in_room(self, override_url_template=None): self.register_user("room_admin", "pass", admin=True) @@ -371,45 +372,10 @@ def test_quarantine_all_media_in_room(self, override_url_template=None): server_and_media_id_2 = mxc_2[6:] # Test that we cannot download any of the media anymore - request, channel = self.make_request( - "GET", - server_and_media_id_1, - shorthand=False, - access_token=non_admin_user_tok, - ) - request.render(self.download_resource) - self.pump(1.0) - - # Should be quarantined - self.assertEqual( - 404, - int(channel.code), - msg=( - "Expected to receive a 404 on accessing quarantined media: %s" - % server_and_media_id_1 - ), - ) - - request, channel = self.make_request( - "GET", - server_and_media_id_2, - shorthand=False, - access_token=non_admin_user_tok, - ) - request.render(self.download_resource) - self.pump(1.0) - - # Should be quarantined - self.assertEqual( - 404, - int(channel.code), - msg=( - "Expected to receive a 404 on accessing quarantined media: %s" - % server_and_media_id_2 - ), - ) + self._ensure_quarantined(admin_user_tok, server_and_media_id_1) + self._ensure_quarantined(admin_user_tok, server_and_media_id_2) - def test_quaraantine_all_media_in_room_deprecated_api_path(self): + def test_quarantine_all_media_in_room_deprecated_api_path(self): # Perform the above test with the deprecated API path self.test_quarantine_all_media_in_room("/_synapse/admin/v1/quarantine_media/%s") @@ -449,25 +415,52 @@ def test_quarantine_all_media_by_user(self): ) # Attempt to access each piece of media + self._ensure_quarantined(admin_user_tok, server_and_media_id_1) + self._ensure_quarantined(admin_user_tok, server_and_media_id_2) + + def test_cannot_quarantine_safe_media(self): + self.register_user("user_admin", "pass", admin=True) + admin_user_tok = self.login("user_admin", "pass") + + non_admin_user = self.register_user("user_nonadmin", "pass", admin=False) + non_admin_user_tok = self.login("user_nonadmin", "pass") + + # Upload some media + response_1 = self.helper.upload_media( + self.upload_resource, self.image_data, tok=non_admin_user_tok + ) + response_2 = self.helper.upload_media( + self.upload_resource, self.image_data, tok=non_admin_user_tok + ) + + # Extract media IDs + server_and_media_id_1 = response_1["content_uri"][6:] + server_and_media_id_2 = response_2["content_uri"][6:] + + # Mark the second item as safe from quarantine. + _, media_id_2 = server_and_media_id_2.split("/") + self.get_success(self.store.mark_local_media_as_safe(media_id_2)) + + # Quarantine all media by this user + url = "/_synapse/admin/v1/user/%s/media/quarantine" % urllib.parse.quote( + non_admin_user + ) request, channel = self.make_request( - "GET", - server_and_media_id_1, - shorthand=False, - access_token=non_admin_user_tok, + "POST", url.encode("ascii"), access_token=admin_user_tok, ) - request.render(self.download_resource) + self.render(request) self.pump(1.0) - - # Should be quarantined + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) self.assertEqual( - 404, - int(channel.code), - msg=( - "Expected to receive a 404 on accessing quarantined media: %s" - % server_and_media_id_1, - ), + json.loads(channel.result["body"].decode("utf-8")), + {"num_quarantined": 1}, + "Expected 1 quarantined item", ) + # Attempt to access each piece of media, the first should fail, the + # second should succeed. + self._ensure_quarantined(admin_user_tok, server_and_media_id_1) + # Attempt to access each piece of media request, channel = self.make_request( "GET", @@ -478,12 +471,12 @@ def test_quarantine_all_media_by_user(self): request.render(self.download_resource) self.pump(1.0) - # Should be quarantined + # Shouldn't be quarantined self.assertEqual( - 404, + 200, int(channel.code), msg=( - "Expected to receive a 404 on accessing quarantined media: %s" + "Expected to receive a 200 on accessing not-quarantined media: %s" % server_and_media_id_2 ), ) From 24110255cd5f304bd380e8f58c30137489e7522e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 23 Jun 2020 07:33:25 -0400 Subject: [PATCH 04/15] Sync ignored table names in synapse_port_db to current database schema (#7717) --- changelog.d/7717.bugfix | 1 + scripts/synapse_port_db | 16 ++++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) create mode 100644 changelog.d/7717.bugfix diff --git a/changelog.d/7717.bugfix b/changelog.d/7717.bugfix new file mode 100644 index 000000000000..bcbf146fea42 --- /dev/null +++ b/changelog.d/7717.bugfix @@ -0,0 +1 @@ +Fix the tables ignored by `synapse_port_db` to be in sync the current database schema. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index c2023f3e4d3c..2eb795192ff8 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -129,10 +129,20 @@ APPEND_ONLY_TABLES = [ IGNORED_TABLES = { + # We don't port these tables, as they're a faff and we can regenerate + # them anyway. "user_directory", "user_directory_search", - "users_who_share_rooms", - "users_in_pubic_room", + "user_directory_search_content", + "user_directory_search_docsize", + "user_directory_search_segdir", + "user_directory_search_segments", + "user_directory_search_stat", + "user_directory_search_pos", + "users_who_share_private_rooms", + "users_in_public_room", + # UI auth sessions have foreign keys so additional care needs to be taken, + # the sessions are transient anyway, so ignore them. "ui_auth_sessions", "ui_auth_sessions_credentials", } @@ -301,8 +311,6 @@ class Porter(object): return if table in IGNORED_TABLES: - # We don't port these tables, as they're a faff and we can regenerate - # them anyway. self.progress.update(table, table_size) # Mark table as done return From 8bbe87f42d7736b3f11db0fcfb4557b214e0356d Mon Sep 17 00:00:00 2001 From: Christian Svensson Date: Tue, 23 Jun 2020 19:06:01 +0200 Subject: [PATCH 05/15] Set Content-Length for Metrics requests (#7730) HTTP requires the response to contain a Content-Length header unless chunked encoding is being used. Prometheus metrics endpoint did not set this, causing software such as prometheus-proxy to not be able to scrape synapse for metrics. Signed-off-by: Christian Svensson --- changelog.d/7730.bugfix | 1 + synapse/metrics/_exposition.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/7730.bugfix diff --git a/changelog.d/7730.bugfix b/changelog.d/7730.bugfix new file mode 100644 index 000000000000..9da254b56cc3 --- /dev/null +++ b/changelog.d/7730.bugfix @@ -0,0 +1 @@ +Fix missing `Content-Length` on HTTP responses from the metrics handler. diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index ab7f948ed453..4304c60d56b9 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -208,6 +208,7 @@ def do_GET(self): raise self.send_response(200) self.send_header("Content-Type", CONTENT_TYPE_LATEST) + self.send_header("Content-Length", str(len(output))) self.end_headers() self.wfile.write(output) @@ -261,4 +262,6 @@ def __init__(self, registry=REGISTRY): def render_GET(self, request): request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii")) - return generate_latest(self.registry) + response = generate_latest(self.registry) + request.setHeader(b"Content-Length", str(len(response))) + return response From 6920e58136671f086536332bdd6844dff0d4b429 Mon Sep 17 00:00:00 2001 From: Sorunome Date: Wed, 24 Jun 2020 11:23:55 +0200 Subject: [PATCH 06/15] add org.matrix.login.jwt so that m.login.jwt can be deprecated (#7675) --- changelog.d/7675.removal | 1 + synapse/rest/client/v1/login.py | 5 ++++- tests/rest/client/v1/test_login.py | 10 +++++++--- 3 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 changelog.d/7675.removal diff --git a/changelog.d/7675.removal b/changelog.d/7675.removal new file mode 100644 index 000000000000..2500e2c578e7 --- /dev/null +++ b/changelog.d/7675.removal @@ -0,0 +1 @@ +Deprecate `m.login.jwt` login method in favour of `org.matrix.login.jwt`, as `m.login.jwt` is not part of the Matrix spec. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index c2c9a9c3aa77..bf0f9bd07787 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -81,7 +81,8 @@ class LoginRestServlet(RestServlet): CAS_TYPE = "m.login.cas" SSO_TYPE = "m.login.sso" TOKEN_TYPE = "m.login.token" - JWT_TYPE = "m.login.jwt" + JWT_TYPE = "org.matrix.login.jwt" + JWT_TYPE_DEPRECATED = "m.login.jwt" def __init__(self, hs): super(LoginRestServlet, self).__init__() @@ -116,6 +117,7 @@ def on_GET(self, request): flows = [] if self.jwt_enabled: flows.append({"type": LoginRestServlet.JWT_TYPE}) + flows.append({"type": LoginRestServlet.JWT_TYPE_DEPRECATED}) if self.cas_enabled: # we advertise CAS for backwards compat, though MSC1721 renamed it @@ -149,6 +151,7 @@ async def on_POST(self, request): try: if self.jwt_enabled and ( login_submission["type"] == LoginRestServlet.JWT_TYPE + or login_submission["type"] == LoginRestServlet.JWT_TYPE_DEPRECATED ): result = await self.do_jwt_login(login_submission) elif login_submission["type"] == LoginRestServlet.TOKEN_TYPE: diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 9033f09fd2e3..fd9799995654 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -526,7 +526,9 @@ def jwt_encode(self, token, secret=jwt_secret): return jwt.encode(token, secret, "HS256").decode("ascii") def jwt_login(self, *args): - params = json.dumps({"type": "m.login.jwt", "token": self.jwt_encode(*args)}) + params = json.dumps( + {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)} + ) request, channel = self.make_request(b"POST", LOGIN_URL, params) self.render(request) return channel @@ -568,7 +570,7 @@ def test_login_no_sub(self): self.assertEqual(channel.json_body["error"], "Invalid JWT") def test_login_no_token(self): - params = json.dumps({"type": "m.login.jwt"}) + params = json.dumps({"type": "org.matrix.login.jwt"}) request, channel = self.make_request(b"POST", LOGIN_URL, params) self.render(request) self.assertEqual(channel.result["code"], b"401", channel.result) @@ -640,7 +642,9 @@ def jwt_encode(self, token, secret=jwt_privatekey): return jwt.encode(token, secret, "RS256").decode("ascii") def jwt_login(self, *args): - params = json.dumps({"type": "m.login.jwt", "token": self.jwt_encode(*args)}) + params = json.dumps( + {"type": "org.matrix.login.jwt", "token": self.jwt_encode(*args)} + ) request, channel = self.make_request(b"POST", LOGIN_URL, params) self.render(request) return channel From 0e0a2817a29391fd777f7ee683dc03d63cf40302 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 24 Jun 2020 18:48:18 +0100 Subject: [PATCH 07/15] Yield during large v2 state res. (#7735) State res v2 across large data sets can be very CPU intensive, and if all the relevant events are in the cache the algorithm will run from start to finish within a single reactor tick. This can result in blocking the reactor tick for several seconds, which can have major repercussions on other requests. To fix this we simply add the occaisonal `sleep(0)` during iterations to yield execution until the next reactor tick. The aim is to only do this for large data sets so that we don't impact otherwise quick resolutions.= --- changelog.d/7735.bugfix | 1 + synapse/handlers/federation.py | 1 + synapse/state/__init__.py | 6 +++- synapse/state/v2.py | 56 ++++++++++++++++++++++++++++------ tests/state/test_v2.py | 9 ++++++ 5 files changed, 62 insertions(+), 11 deletions(-) create mode 100644 changelog.d/7735.bugfix diff --git a/changelog.d/7735.bugfix b/changelog.d/7735.bugfix new file mode 100644 index 000000000000..86959a5ca4d0 --- /dev/null +++ b/changelog.d/7735.bugfix @@ -0,0 +1 @@ +Fix large state resolutions from stalling Synapse for seconds at a time. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 873f6bc39f69..3828ff0ef061 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -376,6 +376,7 @@ async def on_receive_pdu(self, origin, pdu, sent_to_us_directly=False) -> None: room_version = await self.store.get_room_version_id(room_id) state_map = await resolve_events_with_store( + self.clock, room_id, room_version, state_maps, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 50fd843f664f..495d9f04c80c 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -32,6 +32,7 @@ from synapse.state import v1, v2 from synapse.storage.data_stores.main.events_worker import EventRedactBehaviour from synapse.types import StateMap +from synapse.util import Clock from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import Measure, measure_func @@ -414,6 +415,7 @@ def resolve_events(self, room_version, state_sets, event): with Measure(self.clock, "state._resolve_events"): new_state = yield resolve_events_with_store( + self.clock, event.room_id, room_version, state_set_ids, @@ -516,6 +518,7 @@ def resolve_state_groups( logger.info("Resolving conflicted state for %r", room_id) with Measure(self.clock, "state._resolve_events"): new_state = yield resolve_events_with_store( + self.clock, room_id, room_version, list(state_groups_ids.values()), @@ -589,6 +592,7 @@ def _make_state_cache_entry(new_state, state_groups_ids): def resolve_events_with_store( + clock: Clock, room_id: str, room_version: str, state_sets: List[StateMap[str]], @@ -625,7 +629,7 @@ def resolve_events_with_store( ) else: return v2.resolve_events_with_store( - room_id, room_version, state_sets, event_map, state_res_store + clock, room_id, room_version, state_sets, event_map, state_res_store ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 57eadce4e64a..7181ecda9a81 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -27,12 +27,20 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.types import StateMap +from synapse.util import Clock logger = logging.getLogger(__name__) +# We want to yield to the reactor occasionally during state res when dealing +# with large data sets, so that we don't exhaust the reactor. This is done by +# yielding to reactor during loops every N iterations. +_YIELD_AFTER_ITERATIONS = 100 + + @defer.inlineCallbacks def resolve_events_with_store( + clock: Clock, room_id: str, room_version: str, state_sets: List[StateMap[str]], @@ -42,13 +50,11 @@ def resolve_events_with_store( """Resolves the state using the v2 state resolution algorithm Args: + clock room_id: the room we are working in - room_version: The room version - state_sets: List of dicts of (type, state_key) -> event_id, which are the different state groups to resolve. - event_map: a dict from event_id to event, for any events that we happen to have in flight (eg, those currently being persisted). This will be @@ -113,7 +119,7 @@ def resolve_events_with_store( ) sorted_power_events = yield _reverse_topological_power_sort( - room_id, power_events, event_map, state_res_store, full_conflicted_set + clock, room_id, power_events, event_map, state_res_store, full_conflicted_set ) logger.debug("sorted %d power events", len(sorted_power_events)) @@ -142,7 +148,7 @@ def resolve_events_with_store( pl = resolved_state.get((EventTypes.PowerLevels, ""), None) leftover_events = yield _mainline_sort( - room_id, leftover_events, pl, event_map, state_res_store + clock, room_id, leftover_events, pl, event_map, state_res_store ) logger.debug("resolving remaining events") @@ -317,12 +323,13 @@ def _add_event_and_auth_chain_to_graph( @defer.inlineCallbacks def _reverse_topological_power_sort( - room_id, event_ids, event_map, state_res_store, auth_diff + clock, room_id, event_ids, event_map, state_res_store, auth_diff ): """Returns a list of the event_ids sorted by reverse topological ordering, and then by power level and origin_server_ts Args: + clock (Clock) room_id (str): the room we are working in event_ids (list[str]): The events to sort event_map (dict[str,FrozenEvent]) @@ -334,18 +341,28 @@ def _reverse_topological_power_sort( """ graph = {} - for event_id in event_ids: + for idx, event_id in enumerate(event_ids, start=1): yield _add_event_and_auth_chain_to_graph( graph, room_id, event_id, event_map, state_res_store, auth_diff ) + # We yield occasionally when we're working with large data sets to + # ensure that we don't block the reactor loop for too long. + if idx % _YIELD_AFTER_ITERATIONS == 0: + yield clock.sleep(0) + event_to_pl = {} - for event_id in graph: + for idx, event_id in enumerate(graph, start=1): pl = yield _get_power_level_for_sender( room_id, event_id, event_map, state_res_store ) event_to_pl[event_id] = pl + # We yield occasionally when we're working with large data sets to + # ensure that we don't block the reactor loop for too long. + if idx % _YIELD_AFTER_ITERATIONS == 0: + yield clock.sleep(0) + def _get_power_order(event_id): ev = event_map[event_id] pl = event_to_pl[event_id] @@ -423,12 +440,13 @@ def _iterative_auth_checks( @defer.inlineCallbacks def _mainline_sort( - room_id, event_ids, resolved_power_event_id, event_map, state_res_store + clock, room_id, event_ids, resolved_power_event_id, event_map, state_res_store ): """Returns a sorted list of event_ids sorted by mainline ordering based on the given event resolved_power_event_id Args: + clock (Clock) room_id (str): room we're working in event_ids (list[str]): Events to sort resolved_power_event_id (str): The final resolved power level event ID @@ -438,8 +456,14 @@ def _mainline_sort( Returns: Deferred[list[str]]: The sorted list """ + if not event_ids: + # It's possible for there to be no event IDs here to sort, so we can + # skip calculating the mainline in that case. + return [] + mainline = [] pl = resolved_power_event_id + idx = 0 while pl: mainline.append(pl) pl_ev = yield _get_event(room_id, pl, event_map, state_res_store) @@ -453,17 +477,29 @@ def _mainline_sort( pl = aid break + # We yield occasionally when we're working with large data sets to + # ensure that we don't block the reactor loop for too long. + if idx != 0 and idx % _YIELD_AFTER_ITERATIONS == 0: + yield clock.sleep(0) + + idx += 1 + mainline_map = {ev_id: i + 1 for i, ev_id in enumerate(reversed(mainline))} event_ids = list(event_ids) order_map = {} - for ev_id in event_ids: + for idx, ev_id in enumerate(event_ids, start=1): depth = yield _get_mainline_depth_for_event( event_map[ev_id], mainline_map, event_map, state_res_store ) order_map[ev_id] = (depth, event_map[ev_id].origin_server_ts, ev_id) + # We yield occasionally when we're working with large data sets to + # ensure that we don't block the reactor loop for too long. + if idx % _YIELD_AFTER_ITERATIONS == 0: + yield clock.sleep(0) + event_ids.sort(key=lambda ev_id: order_map[ev_id]) return event_ids diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index cdc347bc5363..38f9b423efef 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -17,6 +17,8 @@ import attr +from twisted.internet import defer + from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.room_versions import RoomVersions from synapse.event_auth import auth_types_for_event @@ -41,6 +43,11 @@ ORIGIN_SERVER_TS = 0 +class FakeClock: + def sleep(self, msec): + return defer.succeed(None) + + class FakeEvent(object): """A fake event we use as a convenience. @@ -417,6 +424,7 @@ def do_check(self, events, edges, expected_state_ids): state_before = dict(state_at_event[prev_events[0]]) else: state_d = resolve_events_with_store( + FakeClock(), ROOM_ID, RoomVersions.V2.identifier, [state_at_event[n] for n in prev_events], @@ -565,6 +573,7 @@ def test_event_map_none(self): # Test that we correctly handle passing `None` as the event_map state_d = resolve_events_with_store( + FakeClock(), ROOM_ID, RoomVersions.V2.identifier, [self.state_at_bob, self.state_at_charlie], From b099ef07d622a3d45bce167b4bcd94ff03d0b51e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dagfinn=20Ilmari=20Manns=C3=A5ker?= Date: Thu, 25 Jun 2020 17:45:35 +0100 Subject: [PATCH 08/15] Make tox actions work on Debian 10 (#7703) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Remove the requirement for a specific version of Python - Move dep comment to a separate line, Tox 3.7.0 like trailing ones Signed-off-by: Dagfinn Ilmari Mannsåker --- changelog.d/7703.misc | 1 + tox.ini | 6 ++---- 2 files changed, 3 insertions(+), 4 deletions(-) create mode 100644 changelog.d/7703.misc diff --git a/changelog.d/7703.misc b/changelog.d/7703.misc new file mode 100644 index 000000000000..6e89897e455f --- /dev/null +++ b/changelog.d/7703.misc @@ -0,0 +1 @@ +Make Tox actions work on Debian 10. diff --git a/tox.ini b/tox.ini index 463a34d13776..812fbff200d8 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,6 @@ envlist = packaging, py35, py36, py37, py38, check_codestyle, check_isort [base] -basepython = python3.7 deps = mock python-subunit @@ -120,11 +119,11 @@ commands = [testenv:check_codestyle] skip_install = True -basepython = python3.6 deps = flake8 flake8-comprehensions - black==19.10b0 # We pin so that our tests don't start failing on new releases of black. + # We pin so that our tests don't start failing on new releases of black. + black==19.10b0 commands = python -m black --check --diff . /bin/sh -c "flake8 synapse tests scripts scripts-dev synctl {env:PEP8SUFFIX:}" @@ -140,7 +139,6 @@ skip_install = True deps = towncrier>=18.6.0rc1 commands = python -m towncrier.check --compare-with=origin/develop -basepython = python3.6 [testenv:check-sampleconfig] commands = {toxinidir}/scripts-dev/generate_sample_config --check From 177b2d0c19e4127229488ad43d57aba459c719ed Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 25 Jun 2020 17:58:55 +0100 Subject: [PATCH 09/15] Move flake8 to end. Don't exit script on failure (#7738) --- changelog.d/7738.misc | 1 + scripts-dev/lint.sh | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7738.misc diff --git a/changelog.d/7738.misc b/changelog.d/7738.misc new file mode 100644 index 000000000000..424ac15d66b4 --- /dev/null +++ b/changelog.d/7738.misc @@ -0,0 +1 @@ +Move `flake8` to the end of `scripts-dev/lint.sh` as it takes the longest and could cause the script to exit early. diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 34c4854e1136..6f1ba2293196 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -2,8 +2,8 @@ # # Runs linting scripts over the local Synapse checkout # isort - sorts import statements -# flake8 - lints and finds mistakes # black - opinionated code formatter +# flake8 - lints and finds mistakes set -e @@ -16,6 +16,6 @@ fi echo "Linting these locations: $files" isort -y -rc $files -flake8 $files python3 -m black $files ./scripts-dev/config-lint.sh +flake8 $files From 831b31e563eb181a8bc0311a8c28519ef9f131a1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 26 Jun 2020 10:44:52 +0100 Subject: [PATCH 10/15] Add another yield point to state res v2 (#7746) --- changelog.d/7746.bugfix | 1 + synapse/state/v2.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 changelog.d/7746.bugfix diff --git a/changelog.d/7746.bugfix b/changelog.d/7746.bugfix new file mode 100644 index 000000000000..86959a5ca4d0 --- /dev/null +++ b/changelog.d/7746.bugfix @@ -0,0 +1 @@ +Fix large state resolutions from stalling Synapse for seconds at a time. diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 7181ecda9a81..bf6caa094633 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -126,6 +126,7 @@ def resolve_events_with_store( # Now sequentially auth each one resolved_state = yield _iterative_auth_checks( + clock, room_id, room_version, sorted_power_events, @@ -154,6 +155,7 @@ def resolve_events_with_store( logger.debug("resolving remaining events") resolved_state = yield _iterative_auth_checks( + clock, room_id, room_version, leftover_events, @@ -378,12 +380,13 @@ def _get_power_order(event_id): @defer.inlineCallbacks def _iterative_auth_checks( - room_id, room_version, event_ids, base_state, event_map, state_res_store + clock, room_id, room_version, event_ids, base_state, event_map, state_res_store ): """Sequentially apply auth checks to each event in given list, updating the state as it goes along. Args: + clock (Clock) room_id (str) room_version (str) event_ids (list[str]): Ordered list of events to apply auth checks to @@ -397,7 +400,7 @@ def _iterative_auth_checks( resolved_state = base_state.copy() room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - for event_id in event_ids: + for idx, event_id in enumerate(event_ids, start=1): event = event_map[event_id] auth_events = {} @@ -435,6 +438,11 @@ def _iterative_auth_checks( except AuthError: pass + # We yield occasionally when we're working with large data sets to + # ensure that we don't block the reactor loop for too long. + if idx % _YIELD_AFTER_ITERATIONS == 0: + yield clock.sleep(0) + return resolved_state From 2f6afdd8b45c27f30f91873ab3e42cf6ada7045a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 30 Jun 2020 10:11:36 +0100 Subject: [PATCH 11/15] Explain the purpose of the "tests" conditional dependency requirement (#7751) --- changelog.d/7751.misc | 1 + synapse/python_dependencies.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/7751.misc diff --git a/changelog.d/7751.misc b/changelog.d/7751.misc new file mode 100644 index 000000000000..eb10ecd92e5d --- /dev/null +++ b/changelog.d/7751.misc @@ -0,0 +1 @@ +Explain the "test" conditional requirement for dependencies is not all of the modules necessary to run the unit tests. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 92d3709ae38f..b1cac901eb92 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -93,6 +93,10 @@ "oidc": ["authlib>=0.14.0"], "systemd": ["systemd-python>=231"], "url_preview": ["lxml>=3.5.0"], + # Dependencies which are exclusively required by unit test code. This is + # NOT a list of all modules that are necessary to run the unit tests. + # Tests assume that all optional dependencies are installed. + # # parameterized_class decorator was introduced in parameterized 0.7.0 "test": ["mock>=2.0", "parameterized>=0.7.0"], "sentry": ["sentry-sdk>=0.7.2"], From a99658074dc3b2b0f6abcb4f98d56bc1386398aa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 30 Jun 2020 16:58:06 +0100 Subject: [PATCH 12/15] Add some metrics for inbound and outbound federation processing times (#7755) --- changelog.d/7755.misc | 1 + synapse/federation/federation_server.py | 37 ++++++++++++++----------- synapse/federation/sender/__init__.py | 10 ++++++- synapse/handlers/appservice.py | 6 ++++ synapse/metrics/__init__.py | 6 ++++ 5 files changed, 43 insertions(+), 17 deletions(-) create mode 100644 changelog.d/7755.misc diff --git a/changelog.d/7755.misc b/changelog.d/7755.misc new file mode 100644 index 000000000000..1fc29206ac5d --- /dev/null +++ b/changelog.d/7755.misc @@ -0,0 +1 @@ +Add some metrics for inbound and outbound federation latencies: `synapse_federation_server_pdu_process_time` and `synapse_event_processing_lag_by_event`. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index afe0a8238bdc..e704cf2f4437 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -18,7 +18,7 @@ from typing import Any, Callable, Dict, List, Match, Optional, Tuple, Union from canonicaljson import json -from prometheus_client import Counter +from prometheus_client import Counter, Histogram from twisted.internet import defer from twisted.internet.abstract import isIPAddress @@ -70,6 +70,10 @@ "synapse_federation_server_received_queries", "", ["type"] ) +pdu_process_time = Histogram( + "synapse_federation_server_pdu_process_time", "Time taken to process an event", +) + class FederationServer(FederationBase): def __init__(self, hs): @@ -271,21 +275,22 @@ async def process_pdus_for_room(room_id: str): for pdu in pdus_by_room[room_id]: event_id = pdu.event_id - with nested_logging_context(event_id): - try: - await self._handle_received_pdu(origin, pdu) - pdu_results[event_id] = {} - except FederationError as e: - logger.warning("Error handling PDU %s: %s", event_id, e) - pdu_results[event_id] = {"error": str(e)} - except Exception as e: - f = failure.Failure() - pdu_results[event_id] = {"error": str(e)} - logger.error( - "Failed to handle PDU %s", - event_id, - exc_info=(f.type, f.value, f.getTracebackObject()), - ) + with pdu_process_time.time(): + with nested_logging_context(event_id): + try: + await self._handle_received_pdu(origin, pdu) + pdu_results[event_id] = {} + except FederationError as e: + logger.warning("Error handling PDU %s: %s", event_id, e) + pdu_results[event_id] = {"error": str(e)} + except Exception as e: + f = failure.Failure() + pdu_results[event_id] = {"error": str(e)} + logger.error( + "Failed to handle PDU %s", + event_id, + exc_info=(f.type, f.value, f.getTracebackObject()), + ) await concurrently_execute( process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 5b8faea4e72e..23fb5156834f 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -201,7 +201,15 @@ async def handle_event(event: EventBase) -> None: logger.debug("Sending %s to %r", event, destinations) - self._send_pdu(event, destinations) + if destinations: + self._send_pdu(event, destinations) + + now = self.clock.time_msec() + ts = await self.store.get_received_ts(event.event_id) + + synapse.metrics.event_processing_lag_by_event.labels( + "federation_sender" + ).observe(now - ts) async def handle_room_events(events: Iterable[EventBase]) -> None: with Measure(self.clock, "handle_room_events"): diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index ac1b64caff2a..f7d9fd621ef2 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -114,6 +114,12 @@ def start_scheduler(): for service in services: self.scheduler.submit_event_for_as(service, event) + now = self.clock.time_msec() + ts = yield self.store.get_received_ts(event.event_id) + synapse.metrics.event_processing_lag_by_event.labels( + "appservice_sender" + ).observe(now - ts) + @defer.inlineCallbacks def handle_room_events(events): for event in events: diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 087a49d65df7..6035672698bd 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -463,6 +463,12 @@ def collect(self): # finished being processed. event_processing_lag = Gauge("synapse_event_processing_lag", "", ["name"]) +event_processing_lag_by_event = Histogram( + "synapse_event_processing_lag_by_event", + "Time between an event being persisted and it being queued up to be sent to the relevant remote servers", + ["name"], +) + # Build info of the running server. build_info = Gauge( "synapse_build_info", "Build information", ["pythonversion", "version", "osversion"] From 71cccf1593bd73a1baef87483117b9be9a99b837 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 30 Jun 2020 15:41:36 -0400 Subject: [PATCH 13/15] Additional configuration options for auto-join rooms (#7763) --- changelog.d/7763.feature | 1 + docs/sample_config.yaml | 60 ++++++++- synapse/config/registration.py | 106 ++++++++++++++- synapse/handlers/register.py | 230 +++++++++++++++++++++++--------- synapse/rest/admin/rooms.py | 4 +- tests/handlers/test_register.py | 212 ++++++++++++++++++++++++++++- 6 files changed, 542 insertions(+), 71 deletions(-) create mode 100644 changelog.d/7763.feature diff --git a/changelog.d/7763.feature b/changelog.d/7763.feature new file mode 100644 index 000000000000..4a7563dad392 --- /dev/null +++ b/changelog.d/7763.feature @@ -0,0 +1 @@ +Expand the configuration options for auto-join rooms. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 05e7bf215ac8..2d27b0b34dcc 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1210,7 +1210,11 @@ account_threepid_delegates: #enable_3pid_changes: false # Users who register on this homeserver will automatically be joined -# to these rooms +# to these rooms. +# +# By default, any room aliases included in this list will be created +# as a publicly joinable room when the first user registers for the +# homeserver. This behaviour can be customised with the settings below. # #auto_join_rooms: # - "#example:example.com" @@ -1218,10 +1222,62 @@ account_threepid_delegates: # Where auto_join_rooms are specified, setting this flag ensures that the # the rooms exist by creating them when the first user on the # homeserver registers. +# +# By default the auto-created rooms are publicly joinable from any federated +# server. Use the autocreate_auto_join_rooms_federated and +# autocreate_auto_join_room_preset settings below to customise this behaviour. +# # Setting to false means that if the rooms are not manually created, # users cannot be auto-joined since they do not exist. # -#autocreate_auto_join_rooms: true +# Defaults to true. Uncomment the following line to disable automatically +# creating auto-join rooms. +# +#autocreate_auto_join_rooms: false + +# Whether the auto_join_rooms that are auto-created are available via +# federation. Only has an effect if autocreate_auto_join_rooms is true. +# +# Note that whether a room is federated cannot be modified after +# creation. +# +# Defaults to true: the room will be joinable from other servers. +# Uncomment the following to prevent users from other homeservers from +# joining these rooms. +# +#autocreate_auto_join_rooms_federated: false + +# The room preset to use when auto-creating one of auto_join_rooms. Only has an +# effect if autocreate_auto_join_rooms is true. +# +# This can be one of "public_chat", "private_chat", or "trusted_private_chat". +# If a value of "private_chat" or "trusted_private_chat" is used then +# auto_join_mxid_localpart must also be configured. +# +# Defaults to "public_chat", meaning that the room is joinable by anyone, including +# federated servers if autocreate_auto_join_rooms_federated is true (the default). +# Uncomment the following to require an invitation to join these rooms. +# +#autocreate_auto_join_room_preset: private_chat + +# The local part of the user id which is used to create auto_join_rooms if +# autocreate_auto_join_rooms is true. If this is not provided then the +# initial user account that registers will be used to create the rooms. +# +# The user id is also used to invite new users to any auto-join rooms which +# are set to invite-only. +# +# It *must* be configured if autocreate_auto_join_room_preset is set to +# "private_chat" or "trusted_private_chat". +# +# Note that this must be specified in order for new users to be correctly +# invited to any auto-join rooms which have been set to invite-only (either +# at the time of creation or subsequently). +# +# Note that, if the room already exists, this user must be joined and +# have the appropriate permissions to invite new members. +# +#auto_join_mxid_localpart: system # When auto_join_rooms is specified, setting this flag to false prevents # guest accounts from being automatically joined to the rooms. diff --git a/synapse/config/registration.py b/synapse/config/registration.py index fecced2d57ed..6badf4e75d07 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -18,8 +18,9 @@ import pkg_resources +from synapse.api.constants import RoomCreationPreset from synapse.config._base import Config, ConfigError -from synapse.types import RoomAlias +from synapse.types import RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols @@ -127,7 +128,50 @@ def read_config(self, config, **kwargs): for room_alias in self.auto_join_rooms: if not RoomAlias.is_valid(room_alias): raise ConfigError("Invalid auto_join_rooms entry %s" % (room_alias,)) + + # Options for creating auto-join rooms if they do not exist yet. self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True) + self.autocreate_auto_join_rooms_federated = config.get( + "autocreate_auto_join_rooms_federated", True + ) + self.autocreate_auto_join_room_preset = ( + config.get("autocreate_auto_join_room_preset") + or RoomCreationPreset.PUBLIC_CHAT + ) + self.auto_join_room_requires_invite = self.autocreate_auto_join_room_preset in { + RoomCreationPreset.PRIVATE_CHAT, + RoomCreationPreset.TRUSTED_PRIVATE_CHAT, + } + + # Pull the creater/inviter from the configuration, this gets used to + # send invites for invite-only rooms. + mxid_localpart = config.get("auto_join_mxid_localpart") + self.auto_join_user_id = None + if mxid_localpart: + # Convert the localpart to a full mxid. + self.auto_join_user_id = UserID( + mxid_localpart, self.server_name + ).to_string() + + if self.autocreate_auto_join_rooms: + # Ensure the preset is a known value. + if self.autocreate_auto_join_room_preset not in { + RoomCreationPreset.PUBLIC_CHAT, + RoomCreationPreset.PRIVATE_CHAT, + RoomCreationPreset.TRUSTED_PRIVATE_CHAT, + }: + raise ConfigError("Invalid value for autocreate_auto_join_room_preset") + # If the preset requires invitations to be sent, ensure there's a + # configured user to send them from. + if self.auto_join_room_requires_invite: + if not mxid_localpart: + raise ConfigError( + "The configuration option `auto_join_mxid_localpart` is required if " + "`autocreate_auto_join_room_preset` is set to private_chat or trusted_private_chat, such that " + "Synapse knows who to send invitations from. Please " + "configure `auto_join_mxid_localpart`." + ) + self.auto_join_rooms_for_guests = config.get("auto_join_rooms_for_guests", True) self.enable_set_displayname = config.get("enable_set_displayname", True) @@ -357,7 +401,11 @@ def generate_config_section(self, generate_secrets=False, **kwargs): #enable_3pid_changes: false # Users who register on this homeserver will automatically be joined - # to these rooms + # to these rooms. + # + # By default, any room aliases included in this list will be created + # as a publicly joinable room when the first user registers for the + # homeserver. This behaviour can be customised with the settings below. # #auto_join_rooms: # - "#example:example.com" @@ -365,10 +413,62 @@ def generate_config_section(self, generate_secrets=False, **kwargs): # Where auto_join_rooms are specified, setting this flag ensures that the # the rooms exist by creating them when the first user on the # homeserver registers. + # + # By default the auto-created rooms are publicly joinable from any federated + # server. Use the autocreate_auto_join_rooms_federated and + # autocreate_auto_join_room_preset settings below to customise this behaviour. + # # Setting to false means that if the rooms are not manually created, # users cannot be auto-joined since they do not exist. # - #autocreate_auto_join_rooms: true + # Defaults to true. Uncomment the following line to disable automatically + # creating auto-join rooms. + # + #autocreate_auto_join_rooms: false + + # Whether the auto_join_rooms that are auto-created are available via + # federation. Only has an effect if autocreate_auto_join_rooms is true. + # + # Note that whether a room is federated cannot be modified after + # creation. + # + # Defaults to true: the room will be joinable from other servers. + # Uncomment the following to prevent users from other homeservers from + # joining these rooms. + # + #autocreate_auto_join_rooms_federated: false + + # The room preset to use when auto-creating one of auto_join_rooms. Only has an + # effect if autocreate_auto_join_rooms is true. + # + # This can be one of "public_chat", "private_chat", or "trusted_private_chat". + # If a value of "private_chat" or "trusted_private_chat" is used then + # auto_join_mxid_localpart must also be configured. + # + # Defaults to "public_chat", meaning that the room is joinable by anyone, including + # federated servers if autocreate_auto_join_rooms_federated is true (the default). + # Uncomment the following to require an invitation to join these rooms. + # + #autocreate_auto_join_room_preset: private_chat + + # The local part of the user id which is used to create auto_join_rooms if + # autocreate_auto_join_rooms is true. If this is not provided then the + # initial user account that registers will be used to create the rooms. + # + # The user id is also used to invite new users to any auto-join rooms which + # are set to invite-only. + # + # It *must* be configured if autocreate_auto_join_room_preset is set to + # "private_chat" or "trusted_private_chat". + # + # Note that this must be specified in order for new users to be correctly + # invited to any auto-join rooms which have been set to invite-only (either + # at the time of creation or subsequently). + # + # Note that, if the room already exists, this user must be joined and + # have the appropriate permissions to invite new members. + # + #auto_join_mxid_localpart: system # When auto_join_rooms is specified, setting this flag to false prevents # guest accounts from being automatically joined to the rooms. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 51979ea43e2f..78c3772ac15c 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -17,7 +17,7 @@ import logging from synapse import types -from synapse.api.constants import MAX_USERID_LENGTH, LoginType +from synapse.api.constants import MAX_USERID_LENGTH, EventTypes, JoinRules, LoginType from synapse.api.errors import AuthError, Codes, ConsentNotGivenError, SynapseError from synapse.config.server import is_threepid_reserved from synapse.http.servlet import assert_params_in_dict @@ -26,7 +26,8 @@ ReplicationPostRegisterActionsServlet, ReplicationRegisterServlet, ) -from synapse.types import RoomAlias, RoomID, UserID, create_requester +from synapse.storage.state import StateFilter +from synapse.types import RoomAlias, UserID, create_requester from synapse.util.async_helpers import Linearizer from ._base import BaseHandler @@ -270,51 +271,157 @@ async def register_user( return user_id - async def _auto_join_rooms(self, user_id): - """Automatically joins users to auto join rooms - creating the room in the first place - if the user is the first to be created. + async def _create_and_join_rooms(self, user_id: str): + """ + Create the auto-join rooms and join or invite the user to them. + + This should only be called when the first "real" user registers. Args: - user_id(str): The user to join + user_id: The user to join """ - # auto-join the user to any rooms we're supposed to dump them into - fake_requester = create_requester(user_id) + # Getting the handlers during init gives a dependency loop. + room_creation_handler = self.hs.get_room_creation_handler() + room_member_handler = self.hs.get_room_member_handler() - # try to create the room if we're the first real user on the server. Note - # that an auto-generated support or bot user is not a real user and will never be - # the user to create the room - should_auto_create_rooms = False - is_real_user = await self.store.is_real_user(user_id) - if self.hs.config.autocreate_auto_join_rooms and is_real_user: - count = await self.store.count_real_users() - should_auto_create_rooms = count == 1 - for r in self.hs.config.auto_join_rooms: + # Generate a stub for how the rooms will be configured. + stub_config = { + "preset": self.hs.config.registration.autocreate_auto_join_room_preset, + } + + # If the configuration providers a user ID to create rooms with, use + # that instead of the first user registered. + requires_join = False + if self.hs.config.registration.auto_join_user_id: + fake_requester = create_requester( + self.hs.config.registration.auto_join_user_id + ) + + # If the room requires an invite, add the user to the list of invites. + if self.hs.config.registration.auto_join_room_requires_invite: + stub_config["invite"] = [user_id] + + # If the room is being created by a different user, the first user + # registered needs to join it. Note that in the case of an invitation + # being necessary this will occur after the invite was sent. + requires_join = True + else: + fake_requester = create_requester(user_id) + + # Choose whether to federate the new room. + if not self.hs.config.registration.autocreate_auto_join_rooms_federated: + stub_config["creation_content"] = {"m.federate": False} + + for r in self.hs.config.registration.auto_join_rooms: logger.info("Auto-joining %s to %s", user_id, r) + try: - if should_auto_create_rooms: - room_alias = RoomAlias.from_string(r) - if self.hs.hostname != room_alias.domain: - logger.warning( - "Cannot create room alias %s, " - "it does not match server domain", - r, - ) - else: - # create room expects the localpart of the room alias - room_alias_localpart = room_alias.localpart - - # getting the RoomCreationHandler during init gives a dependency - # loop - await self.hs.get_room_creation_handler().create_room( - fake_requester, - config={ - "preset": "public_chat", - "room_alias_name": room_alias_localpart, - }, + room_alias = RoomAlias.from_string(r) + + if self.hs.hostname != room_alias.domain: + logger.warning( + "Cannot create room alias %s, " + "it does not match server domain", + r, + ) + else: + # A shallow copy is OK here since the only key that is + # modified is room_alias_name. + config = stub_config.copy() + # create room expects the localpart of the room alias + config["room_alias_name"] = room_alias.localpart + + info, _ = await room_creation_handler.create_room( + fake_requester, config=config, ratelimit=False, + ) + + # If the room does not require an invite, but another user + # created it, then ensure the first user joins it. + if requires_join: + await room_member_handler.update_membership( + requester=create_requester(user_id), + target=UserID.from_string(user_id), + room_id=info["room_id"], + # Since it was just created, there are no remote hosts. + remote_room_hosts=[], + action="join", ratelimit=False, ) + + except ConsentNotGivenError as e: + # Technically not necessary to pull out this error though + # moving away from bare excepts is a good thing to do. + logger.error("Failed to join new user to %r: %r", r, e) + except Exception as e: + logger.error("Failed to join new user to %r: %r", r, e) + + async def _join_rooms(self, user_id: str): + """ + Join or invite the user to the auto-join rooms. + + Args: + user_id: The user to join + """ + room_member_handler = self.hs.get_room_member_handler() + + for r in self.hs.config.registration.auto_join_rooms: + logger.info("Auto-joining %s to %s", user_id, r) + + try: + room_alias = RoomAlias.from_string(r) + + if RoomAlias.is_valid(r): + ( + room_id, + remote_room_hosts, + ) = await room_member_handler.lookup_room_alias(room_alias) + room_id = room_id.to_string() else: - await self._join_user_to_room(fake_requester, r) + raise SynapseError( + 400, "%s was not legal room ID or room alias" % (r,) + ) + + # Calculate whether the room requires an invite or can be + # joined directly. Note that unless a join rule of public exists, + # it is treated as requiring an invite. + requires_invite = True + + state = await self.store.get_filtered_current_state_ids( + room_id, StateFilter.from_types([(EventTypes.JoinRules, "")]) + ) + + event_id = state.get((EventTypes.JoinRules, "")) + if event_id: + join_rules_event = await self.store.get_event( + event_id, allow_none=True + ) + if join_rules_event: + join_rule = join_rules_event.content.get("join_rule", None) + requires_invite = join_rule and join_rule != JoinRules.PUBLIC + + # Send the invite, if necessary. + if requires_invite: + await room_member_handler.update_membership( + requester=create_requester( + self.hs.config.registration.auto_join_user_id + ), + target=UserID.from_string(user_id), + room_id=room_id, + remote_room_hosts=remote_room_hosts, + action="invite", + ratelimit=False, + ) + + # Send the join. + await room_member_handler.update_membership( + requester=create_requester(user_id), + target=UserID.from_string(user_id), + room_id=room_id, + remote_room_hosts=remote_room_hosts, + action="join", + ratelimit=False, + ) + except ConsentNotGivenError as e: # Technically not necessary to pull out this error though # moving away from bare excepts is a good thing to do. @@ -322,6 +429,29 @@ async def _auto_join_rooms(self, user_id): except Exception as e: logger.error("Failed to join new user to %r: %r", r, e) + async def _auto_join_rooms(self, user_id: str): + """Automatically joins users to auto join rooms - creating the room in the first place + if the user is the first to be created. + + Args: + user_id: The user to join + """ + # auto-join the user to any rooms we're supposed to dump them into + + # try to create the room if we're the first real user on the server. Note + # that an auto-generated support or bot user is not a real user and will never be + # the user to create the room + should_auto_create_rooms = False + is_real_user = await self.store.is_real_user(user_id) + if self.hs.config.registration.autocreate_auto_join_rooms and is_real_user: + count = await self.store.count_real_users() + should_auto_create_rooms = count == 1 + + if should_auto_create_rooms: + await self._create_and_join_rooms(user_id) + else: + await self._join_rooms(user_id) + async def post_consent_actions(self, user_id): """A series of registration actions that can only be carried out once consent has been granted @@ -392,30 +522,6 @@ async def _generate_user_id(self): self._next_generated_user_id += 1 return str(id) - async def _join_user_to_room(self, requester, room_identifier): - room_member_handler = self.hs.get_room_member_handler() - if RoomID.is_valid(room_identifier): - room_id = room_identifier - elif RoomAlias.is_valid(room_identifier): - room_alias = RoomAlias.from_string(room_identifier) - room_id, remote_room_hosts = await room_member_handler.lookup_room_alias( - room_alias - ) - room_id = room_id.to_string() - else: - raise SynapseError( - 400, "%s was not legal room ID or room alias" % (room_identifier,) - ) - - await room_member_handler.update_membership( - requester=requester, - target=requester.user, - room_id=room_id, - remote_room_hosts=remote_room_hosts, - action="join", - ratelimit=False, - ) - def check_registration_ratelimit(self, address): """A simple helper method to check whether the registration rate limit has been hit for a given IP address diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 8173baef8f2c..e07c32118db8 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -15,7 +15,7 @@ import logging from typing import List, Optional -from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.constants import EventTypes, JoinRules, Membership, RoomCreationPreset from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( RestServlet, @@ -77,7 +77,7 @@ async def on_POST(self, request, room_id): info, stream_id = await self._room_creation_handler.create_room( room_creator_requester, config={ - "preset": "public_chat", + "preset": RoomCreationPreset.PUBLIC_CHAT, "name": room_name, "power_level_content_override": {"users_default": -10}, }, diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index ca32f993a35f..6d45c4b2332f 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -22,6 +22,8 @@ from synapse.handlers.register import RegistrationHandler from synapse.types import RoomAlias, UserID, create_requester +from tests.unittest import override_config + from .. import unittest @@ -145,9 +147,9 @@ def test_auto_join_rooms_for_guests(self): rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) + @override_config({"auto_join_rooms": ["#room:test"]}) def test_auto_create_auto_join_rooms(self): room_alias_str = "#room:test" - self.hs.config.auto_join_rooms = [room_alias_str] user_id = self.get_success(self.handler.register_user(localpart="jeff")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) directory_handler = self.hs.get_handlers().directory_handler @@ -193,9 +195,9 @@ def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self): room_alias = RoomAlias.from_string(room_alias_str) self.get_failure(directory_handler.get_association(room_alias), SynapseError) + @override_config({"auto_join_rooms": ["#room:test"]}) def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self): room_alias_str = "#room:test" - self.hs.config.auto_join_rooms = [room_alias_str] self.store.count_real_users = Mock(return_value=defer.succeed(1)) self.store.is_real_user = Mock(return_value=defer.succeed(True)) @@ -218,6 +220,212 @@ def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user(self): rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) + @override_config( + { + "auto_join_rooms": ["#room:test"], + "autocreate_auto_join_rooms_federated": False, + } + ) + def test_auto_create_auto_join_rooms_federated(self): + """ + Auto-created rooms that are private require an invite to go to the user + (instead of directly joining it). + """ + room_alias_str = "#room:test" + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + + # Ensure the room was created. + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_alias_str) + room_id = self.get_success(directory_handler.get_association(room_alias)) + + # Ensure the room is properly not federated. + room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + self.assertFalse(room["federatable"]) + self.assertFalse(room["public"]) + self.assertEqual(room["join_rules"], "public") + self.assertIsNone(room["guest_access"]) + + # The user should be in the room. + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + self.assertIn(room_id["room_id"], rooms) + + @override_config( + {"auto_join_rooms": ["#room:test"], "auto_join_mxid_localpart": "support"} + ) + def test_auto_join_mxid_localpart(self): + """ + Ensure the user still needs up in the room created by a different user. + """ + # Ensure the support user exists. + inviter = "@support:test" + + room_alias_str = "#room:test" + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + + # Ensure the room was created. + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_alias_str) + room_id = self.get_success(directory_handler.get_association(room_alias)) + + # Ensure the room is properly a public room. + room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + self.assertEqual(room["join_rules"], "public") + + # Both users should be in the room. + rooms = self.get_success(self.store.get_rooms_for_user(inviter)) + self.assertIn(room_id["room_id"], rooms) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + self.assertIn(room_id["room_id"], rooms) + + # Register a second user, which should also end up in the room. + user_id = self.get_success(self.handler.register_user(localpart="bob")) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + self.assertIn(room_id["room_id"], rooms) + + @override_config( + { + "auto_join_rooms": ["#room:test"], + "autocreate_auto_join_room_preset": "private_chat", + "auto_join_mxid_localpart": "support", + } + ) + def test_auto_create_auto_join_room_preset(self): + """ + Auto-created rooms that are private require an invite to go to the user + (instead of directly joining it). + """ + # Ensure the support user exists. + inviter = "@support:test" + + room_alias_str = "#room:test" + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + + # Ensure the room was created. + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_alias_str) + room_id = self.get_success(directory_handler.get_association(room_alias)) + + # Ensure the room is properly a private room. + room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + self.assertFalse(room["public"]) + self.assertEqual(room["join_rules"], "invite") + self.assertEqual(room["guest_access"], "can_join") + + # Both users should be in the room. + rooms = self.get_success(self.store.get_rooms_for_user(inviter)) + self.assertIn(room_id["room_id"], rooms) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + self.assertIn(room_id["room_id"], rooms) + + # Register a second user, which should also end up in the room. + user_id = self.get_success(self.handler.register_user(localpart="bob")) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + self.assertIn(room_id["room_id"], rooms) + + @override_config( + { + "auto_join_rooms": ["#room:test"], + "autocreate_auto_join_room_preset": "private_chat", + "auto_join_mxid_localpart": "support", + } + ) + def test_auto_create_auto_join_room_preset_guest(self): + """ + Auto-created rooms that are private require an invite to go to the user + (instead of directly joining it). + + This should also work for guests. + """ + inviter = "@support:test" + + room_alias_str = "#room:test" + user_id = self.get_success( + self.handler.register_user(localpart="jeff", make_guest=True) + ) + + # Ensure the room was created. + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_alias_str) + room_id = self.get_success(directory_handler.get_association(room_alias)) + + # Ensure the room is properly a private room. + room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + self.assertFalse(room["public"]) + self.assertEqual(room["join_rules"], "invite") + self.assertEqual(room["guest_access"], "can_join") + + # Both users should be in the room. + rooms = self.get_success(self.store.get_rooms_for_user(inviter)) + self.assertIn(room_id["room_id"], rooms) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + self.assertIn(room_id["room_id"], rooms) + + @override_config( + { + "auto_join_rooms": ["#room:test"], + "autocreate_auto_join_room_preset": "private_chat", + "auto_join_mxid_localpart": "support", + } + ) + def test_auto_create_auto_join_room_preset_invalid_permissions(self): + """ + Auto-created rooms that are private require an invite, check that + registration doesn't completely break if the inviter doesn't have proper + permissions. + """ + inviter = "@support:test" + + # Register an initial user to create the room and such (essentially this + # is a subset of test_auto_create_auto_join_room_preset). + room_alias_str = "#room:test" + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + + # Ensure the room was created. + directory_handler = self.hs.get_handlers().directory_handler + room_alias = RoomAlias.from_string(room_alias_str) + room_id = self.get_success(directory_handler.get_association(room_alias)) + + # Ensure the room exists. + self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + + # Both users should be in the room. + rooms = self.get_success(self.store.get_rooms_for_user(inviter)) + self.assertIn(room_id["room_id"], rooms) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + self.assertIn(room_id["room_id"], rooms) + + # Lower the permissions of the inviter. + event_creation_handler = self.hs.get_event_creation_handler() + requester = create_requester(inviter) + event, context = self.get_success( + event_creation_handler.create_event( + requester, + { + "type": "m.room.power_levels", + "state_key": "", + "room_id": room_id["room_id"], + "content": {"invite": 100, "users": {inviter: 0}}, + "sender": inviter, + }, + ) + ) + self.get_success( + event_creation_handler.send_nonmember_event(requester, event, context) + ) + + # Register a second user, which won't be be in the room (or even have an invite) + # since the inviter no longer has the proper permissions. + user_id = self.get_success(self.handler.register_user(localpart="bob")) + + # This user should not be in any rooms. + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) + invited_rooms = self.get_success( + self.store.get_invited_rooms_for_local_user(user_id) + ) + self.assertEqual(rooms, set()) + self.assertEqual(invited_rooms, []) + def test_auto_create_auto_join_where_no_consent(self): """Test to ensure that the first user is not auto-joined to a room if they have not given general consent. From 74d3e177f0443f27e670f0b99299d715c58fd238 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 1 Jul 2020 11:08:25 +0100 Subject: [PATCH 14/15] Back out MSC2625 implementation (#7761) --- changelog.d/7673.feature | 1 - changelog.d/7716.feature | 1 - changelog.d/7761.feature | 1 + synapse/handlers/sync.py | 3 - synapse/push/bulk_push_rule_evaluator.py | 7 +- synapse/push/push_tools.py | 5 +- synapse/rest/client/v1/push_rule.py | 4 +- .../data_stores/main/event_push_actions.py | 133 ++++-------------- .../delta/58/07push_summary_unread_count.sql | 23 --- .../replication/slave/storage/test_events.py | 19 +-- tests/storage/test_event_push_actions.py | 45 +++--- 11 files changed, 53 insertions(+), 189 deletions(-) delete mode 100644 changelog.d/7673.feature delete mode 100644 changelog.d/7716.feature create mode 100644 changelog.d/7761.feature delete mode 100644 synapse/storage/data_stores/main/schema/delta/58/07push_summary_unread_count.sql diff --git a/changelog.d/7673.feature b/changelog.d/7673.feature deleted file mode 100644 index ecc3ffd8d5fc..000000000000 --- a/changelog.d/7673.feature +++ /dev/null @@ -1 +0,0 @@ -Add a per-room counter for unread messages in responses to `/sync` requests. Implements [MSC2625](https://github.com/matrix-org/matrix-doc/pull/2625). diff --git a/changelog.d/7716.feature b/changelog.d/7716.feature deleted file mode 100644 index ecc3ffd8d5fc..000000000000 --- a/changelog.d/7716.feature +++ /dev/null @@ -1 +0,0 @@ -Add a per-room counter for unread messages in responses to `/sync` requests. Implements [MSC2625](https://github.com/matrix-org/matrix-doc/pull/2625). diff --git a/changelog.d/7761.feature b/changelog.d/7761.feature new file mode 100644 index 000000000000..c97864677aac --- /dev/null +++ b/changelog.d/7761.feature @@ -0,0 +1 @@ +Add unread messages count to sync responses. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0b82aa72a67e..4c7524493ef6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1893,9 +1893,6 @@ async def _generate_room_entry( if notifs is not None: unread_notifications["notification_count"] = notifs["notify_count"] unread_notifications["highlight_count"] = notifs["highlight_count"] - unread_notifications["org.matrix.msc2625.unread_count"] = notifs[ - "unread_count" - ] sync_result_builder.joined.append(room_sync) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 5b00602a56ae..43ffe6faf030 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -189,11 +189,8 @@ def action_for_event_by_user(self, event, context): ) if matches: actions = [x for x in rule["actions"] if x != "dont_notify"] - if ( - "notify" in actions - or "org.matrix.msc2625.mark_unread" in actions - ): - # Push rules say we should act on this event. + if actions and "notify" in actions: + # Push rules say we should notify the user of this event actions_by_user[uid] = actions break diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 4ea683fee010..5dae4648c0f7 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -39,10 +39,7 @@ def get_badge_count(store, user_id): ) # return one badge count per conversation, as count per # message is so noisy as to be almost useless - # We're populating this badge using the unread_count (instead of the - # notify_count) as this badge is the number of missed messages, not the - # number of missed notifications. - badge += 1 if notifs.get("unread_count") else 0 + badge += 1 if notifs["notify_count"] else 0 return badge diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index f563b3dc3572..9fd490813693 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2014-2020 The Matrix.org Foundation C.I.C. +# Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -267,7 +267,7 @@ def _check_actions(actions): raise InvalidRuleException("No actions found") for a in actions: - if a in ["notify", "dont_notify", "coalesce", "org.matrix.msc2625.mark_unread"]: + if a in ["notify", "dont_notify", "coalesce"]: pass elif isinstance(a, dict) and "set_tweak" in a: pass diff --git a/synapse/storage/data_stores/main/event_push_actions.py b/synapse/storage/data_stores/main/event_push_actions.py index 815d52ab4cfb..bc9f4f08eac4 100644 --- a/synapse/storage/data_stores/main/event_push_actions.py +++ b/synapse/storage/data_stores/main/event_push_actions.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2015-2020 The Matrix.org Foundation C.I.C. +# Copyright 2015 OpenMarket Ltd +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,9 +15,7 @@ # limitations under the License. import logging -from typing import Dict, Tuple -import attr from canonicaljson import json from twisted.internet import defer @@ -37,16 +36,6 @@ ] -@attr.s -class EventPushSummary: - """Summary of pending event push actions for a given user in a given room.""" - - unread_count = attr.ib(type=int) - stream_ordering = attr.ib(type=int) - old_user_id = attr.ib(type=str) - notif_count = attr.ib(type=int) - - def _serialize_action(actions, is_highlight): """Custom serializer for actions. This allows us to "compress" common actions. @@ -123,7 +112,7 @@ def _get_unread_counts_by_receipt_txn( txn.execute(sql, (room_id, last_read_event_id)) results = txn.fetchall() if len(results) == 0: - return {"notify_count": 0, "highlight_count": 0, "unread_count": 0} + return {"notify_count": 0, "highlight_count": 0} stream_ordering = results[0][0] @@ -133,42 +122,25 @@ def _get_unread_counts_by_receipt_txn( def _get_unread_counts_by_pos_txn(self, txn, room_id, user_id, stream_ordering): - # First get number of actions, grouped on whether the action notifies. + # First get number of notifications. + # We don't need to put a notif=1 clause as all rows always have + # notif=1 sql = ( - "SELECT count(*), notif" + "SELECT count(*)" " FROM event_push_actions ea" " WHERE" " user_id = ?" " AND room_id = ?" " AND stream_ordering > ?" - " GROUP BY notif" ) - txn.execute(sql, (user_id, room_id, stream_ordering)) - rows = txn.fetchall() - # We should get a maximum number of two rows: one for notif = 0, which is the - # number of actions that contribute to the unread_count but not to the - # notify_count, and one for notif = 1, which is the number of actions that - # contribute to both counters. If one or both rows don't appear, then the - # value for the matching counter should be 0. - unread_count = 0 - notify_count = 0 - for row in rows: - # We always increment unread_count because actions that notify also - # contribute to it. - unread_count += row[0] - if row[1] == 1: - notify_count = row[0] - elif row[1] != 0: - logger.warning( - "Unexpected value %d for column 'notif' in table" - " 'event_push_actions'", - row[1], - ) + txn.execute(sql, (user_id, room_id, stream_ordering)) + row = txn.fetchone() + notify_count = row[0] if row else 0 txn.execute( """ - SELECT notif_count, unread_count FROM event_push_summary + SELECT notif_count FROM event_push_summary WHERE room_id = ? AND user_id = ? AND stream_ordering > ? """, (room_id, user_id, stream_ordering), @@ -176,7 +148,6 @@ def _get_unread_counts_by_pos_txn(self, txn, room_id, user_id, stream_ordering): rows = txn.fetchall() if rows: notify_count += rows[0][0] - unread_count += rows[0][1] # Now get the number of highlights sql = ( @@ -193,11 +164,7 @@ def _get_unread_counts_by_pos_txn(self, txn, room_id, user_id, stream_ordering): row = txn.fetchone() highlight_count = row[0] if row else 0 - return { - "unread_count": unread_count, - "notify_count": notify_count, - "highlight_count": highlight_count, - } + return {"notify_count": notify_count, "highlight_count": highlight_count} @defer.inlineCallbacks def get_push_action_users_in_range(self, min_stream_ordering, max_stream_ordering): @@ -255,7 +222,6 @@ def get_after_receipt(txn): " AND ep.user_id = ?" " AND ep.stream_ordering > ?" " AND ep.stream_ordering <= ?" - " AND ep.notif = 1" " ORDER BY ep.stream_ordering ASC LIMIT ?" ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] @@ -284,7 +250,6 @@ def get_no_receipt(txn): " AND ep.user_id = ?" " AND ep.stream_ordering > ?" " AND ep.stream_ordering <= ?" - " AND ep.notif = 1" " ORDER BY ep.stream_ordering ASC LIMIT ?" ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] @@ -357,7 +322,6 @@ def get_after_receipt(txn): " AND ep.user_id = ?" " AND ep.stream_ordering > ?" " AND ep.stream_ordering <= ?" - " AND ep.notif = 1" " ORDER BY ep.stream_ordering DESC LIMIT ?" ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] @@ -386,7 +350,6 @@ def get_no_receipt(txn): " AND ep.user_id = ?" " AND ep.stream_ordering > ?" " AND ep.stream_ordering <= ?" - " AND ep.notif = 1" " ORDER BY ep.stream_ordering DESC LIMIT ?" ) args = [user_id, user_id, min_stream_ordering, max_stream_ordering, limit] @@ -436,7 +399,7 @@ def get_if_maybe_push_in_range_for_user(self, user_id, min_stream_ordering): def _get_if_maybe_push_in_range_for_user_txn(txn): sql = """ SELECT 1 FROM event_push_actions - WHERE user_id = ? AND stream_ordering > ? AND notif = 1 + WHERE user_id = ? AND stream_ordering > ? LIMIT 1 """ @@ -465,15 +428,14 @@ def add_push_actions_to_staging(self, event_id, user_id_actions): return # This is a helper function for generating the necessary tuple that - # can be used to insert into the `event_push_actions_staging` table. + # can be used to inert into the `event_push_actions_staging` table. def _gen_entry(user_id, actions): is_highlight = 1 if _action_has_highlight(actions) else 0 - notif = 0 if "org.matrix.msc2625.mark_unread" in actions else 1 return ( event_id, # event_id column user_id, # user_id column _serialize_action(actions, is_highlight), # actions column - notif, # notif column + 1, # notif column is_highlight, # highlight column ) @@ -855,51 +817,24 @@ def _rotate_notifs_before_txn(self, txn, rotate_to_stream_ordering): # Calculate the new counts that should be upserted into event_push_summary sql = """ SELECT user_id, room_id, - coalesce(old.%s, 0) + upd.cnt, + coalesce(old.notif_count, 0) + upd.notif_count, upd.stream_ordering, old.user_id FROM ( - SELECT user_id, room_id, count(*) as cnt, + SELECT user_id, room_id, count(*) as notif_count, max(stream_ordering) as stream_ordering FROM event_push_actions WHERE ? <= stream_ordering AND stream_ordering < ? AND highlight = 0 - %s GROUP BY user_id, room_id ) AS upd LEFT JOIN event_push_summary AS old USING (user_id, room_id) """ - # First get the count of unread messages. - txn.execute( - sql % ("unread_count", ""), - (old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - - # We need to merge both lists into a single object because we might not have the - # same amount of rows in each of them. In this case we use a dict indexed on the - # user ID and room ID to make it easier to populate. - summaries = {} # type: Dict[Tuple[str, str], EventPushSummary] - for row in txn: - summaries[(row[0], row[1])] = EventPushSummary( - unread_count=row[2], - stream_ordering=row[3], - old_user_id=row[4], - notif_count=0, - ) - - # Then get the count of notifications. - txn.execute( - sql % ("notif_count", "AND notif = 1"), - (old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - - # notif_rows is populated based on a subset of the query used to populate - # unread_rows, so we can be sure that there will be no KeyError here. - for row in txn: - summaries[(row[0], row[1])].notif_count = row[2] + txn.execute(sql, (old_rotate_stream_ordering, rotate_to_stream_ordering)) + rows = txn.fetchall() - logger.info("Rotating notifications, handling %d rows", len(summaries)) + logger.info("Rotating notifications, handling %d rows", len(rows)) # If the `old.user_id` above is NULL then we know there isn't already an # entry in the table, so we simply insert it. Otherwise we update the @@ -909,34 +844,22 @@ def _rotate_notifs_before_txn(self, txn, rotate_to_stream_ordering): table="event_push_summary", values=[ { - "user_id": user_id, - "room_id": room_id, - "notif_count": summary.notif_count, - "unread_count": summary.unread_count, - "stream_ordering": summary.stream_ordering, + "user_id": row[0], + "room_id": row[1], + "notif_count": row[2], + "stream_ordering": row[3], } - for ((user_id, room_id), summary) in summaries.items() - if summary.old_user_id is None + for row in rows + if row[4] is None ], ) txn.executemany( """ - UPDATE event_push_summary - SET notif_count = ?, unread_count = ?, stream_ordering = ? + UPDATE event_push_summary SET notif_count = ?, stream_ordering = ? WHERE user_id = ? AND room_id = ? """, - ( - ( - summary.notif_count, - summary.unread_count, - summary.stream_ordering, - user_id, - room_id, - ) - for ((user_id, room_id), summary) in summaries.items() - if summary.old_user_id is not None - ), + ((row[2], row[3], row[0], row[1]) for row in rows if row[4] is not None), ) txn.execute( diff --git a/synapse/storage/data_stores/main/schema/delta/58/07push_summary_unread_count.sql b/synapse/storage/data_stores/main/schema/delta/58/07push_summary_unread_count.sql deleted file mode 100644 index f1459ef7f064..000000000000 --- a/synapse/storage/data_stores/main/schema/delta/58/07push_summary_unread_count.sql +++ /dev/null @@ -1,23 +0,0 @@ -/* Copyright 2020 The Matrix.org Foundation C.I.C - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - --- Store the number of unread messages, i.e. messages that triggered either a notify --- action or a mark_unread one. -ALTER TABLE event_push_summary ADD COLUMN unread_count BIGINT NOT NULL DEFAULT 0; - --- Pre-populate the new column with the count of pending notifications. --- We expect event_push_summary to be relatively small, so we can do this update --- synchronously without impacting Synapse's startup time too much. -UPDATE event_push_summary SET unread_count = notif_count; \ No newline at end of file diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index cd8680e8127c..1a88c7fb8005 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -160,7 +160,7 @@ def test_push_actions_for_user(self): self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], - {"highlight_count": 0, "notify_count": 0, "unread_count": 0}, + {"highlight_count": 0, "notify_count": 0}, ) self.persist( @@ -173,7 +173,7 @@ def test_push_actions_for_user(self): self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], - {"highlight_count": 0, "notify_count": 1, "unread_count": 1}, + {"highlight_count": 0, "notify_count": 1}, ) self.persist( @@ -188,20 +188,7 @@ def test_push_actions_for_user(self): self.check( "get_unread_event_push_actions_by_room_for_user", [ROOM_ID, USER_ID_2, event1.event_id], - {"highlight_count": 1, "notify_count": 2, "unread_count": 2}, - ) - - self.persist( - type="m.room.message", - msgtype="m.text", - body="world", - push_actions=[(USER_ID_2, ["org.matrix.msc2625.mark_unread"])], - ) - self.replicate() - self.check( - "get_unread_event_push_actions_by_room_for_user", - [ROOM_ID, USER_ID_2, event1.event_id], - {"highlight_count": 1, "notify_count": 2, "unread_count": 3}, + {"highlight_count": 1, "notify_count": 2}, ) def test_get_rooms_for_user_with_stream_ordering(self): diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 303dc8571c4c..b45bc9c1151f 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -22,10 +22,6 @@ USER_ID = "@user:example.com" -MARK_UNREAD = [ - "org.matrix.msc2625.mark_unread", - {"set_tweak": "highlight", "value": False}, -] PlAIN_NOTIF = ["notify", {"set_tweak": "highlight", "value": False}] HIGHLIGHT = [ "notify", @@ -59,17 +55,13 @@ def test_count_aggregation(self): user_id = "@user1235:example.com" @defer.inlineCallbacks - def _assert_counts(unread_count, notif_count, highlight_count): + def _assert_counts(noitf_count, highlight_count): counts = yield self.store.db.runInteraction( "", self.store._get_unread_counts_by_pos_txn, room_id, user_id, 0 ) self.assertEquals( counts, - { - "unread_count": unread_count, - "notify_count": notif_count, - "highlight_count": highlight_count, - }, + {"notify_count": noitf_count, "highlight_count": highlight_count}, ) @defer.inlineCallbacks @@ -104,23 +96,23 @@ def _mark_read(stream, depth): stream, ) - yield _assert_counts(0, 0, 0) + yield _assert_counts(0, 0) yield _inject_actions(1, PlAIN_NOTIF) - yield _assert_counts(1, 1, 0) + yield _assert_counts(1, 0) yield _rotate(2) - yield _assert_counts(1, 1, 0) + yield _assert_counts(1, 0) yield _inject_actions(3, PlAIN_NOTIF) - yield _assert_counts(2, 2, 0) + yield _assert_counts(2, 0) yield _rotate(4) - yield _assert_counts(2, 2, 0) + yield _assert_counts(2, 0) yield _inject_actions(5, PlAIN_NOTIF) yield _mark_read(3, 3) - yield _assert_counts(1, 1, 0) + yield _assert_counts(1, 0) yield _mark_read(5, 5) - yield _assert_counts(0, 0, 0) + yield _assert_counts(0, 0) yield _inject_actions(6, PlAIN_NOTIF) yield _rotate(7) @@ -129,22 +121,17 @@ def _mark_read(stream, depth): table="event_push_actions", keyvalues={"1": 1}, desc="" ) - yield _assert_counts(1, 1, 0) + yield _assert_counts(1, 0) yield _mark_read(7, 7) - yield _assert_counts(0, 0, 0) + yield _assert_counts(0, 0) - yield _inject_actions(8, MARK_UNREAD) - yield _assert_counts(1, 0, 0) + yield _inject_actions(8, HIGHLIGHT) + yield _assert_counts(1, 1) yield _rotate(9) - yield _assert_counts(1, 0, 0) - - yield _inject_actions(10, HIGHLIGHT) - yield _assert_counts(2, 1, 1) - yield _rotate(11) - yield _assert_counts(2, 1, 1) - yield _rotate(12) - yield _assert_counts(2, 1, 1) + yield _assert_counts(1, 1) + yield _rotate(10) + yield _assert_counts(1, 1) @defer.inlineCallbacks def test_find_first_stream_ordering_after_ts(self): From dc80a0762d70650663f0fb023216b9d8f46718e4 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 1 Jul 2020 11:26:58 +0100 Subject: [PATCH 15/15] 1.16.0rc1 --- CHANGES.md | 70 ++++++++++++++++++++++++++++++++++++++++ changelog.d/7606.bugfix | 1 - changelog.d/7636.misc | 1 - changelog.d/7639.feature | 1 - changelog.d/7648.bugfix | 1 - changelog.d/7652.doc | 1 - changelog.d/7657.misc | 1 - changelog.d/7659.doc | 1 - changelog.d/7663.bugfix | 1 - changelog.d/7664.misc | 1 - changelog.d/7675.removal | 1 - changelog.d/7677.bugfix | 1 - changelog.d/7678.misc | 1 - changelog.d/7679.misc | 1 - changelog.d/7680.misc | 1 - changelog.d/7681.misc | 1 - changelog.d/7687.bugfix | 1 - changelog.d/7688.bugfix | 1 - changelog.d/7689.bugfix | 1 - changelog.d/7691.bugfix | 1 - changelog.d/7692.misc | 1 - changelog.d/7697.misc | 1 - changelog.d/7698.bugfix | 1 - changelog.d/7701.bugfix | 1 - changelog.d/7703.misc | 1 - changelog.d/7704.misc | 1 - changelog.d/7706.feature | 1 - changelog.d/7708.bugfix | 1 - changelog.d/7711.bugfix | 1 - changelog.d/7712.misc | 1 - changelog.d/7714.bugfix | 1 - changelog.d/7717.bugfix | 1 - changelog.d/7718.feature | 1 - changelog.d/7724.doc | 1 - changelog.d/7725.misc | 1 - changelog.d/7727.misc | 1 - changelog.d/7730.bugfix | 1 - changelog.d/7735.bugfix | 1 - changelog.d/7738.misc | 1 - changelog.d/7746.bugfix | 1 - changelog.d/7751.misc | 1 - changelog.d/7755.misc | 1 - changelog.d/7761.feature | 1 - changelog.d/7763.feature | 1 - synapse/__init__.py | 2 +- 45 files changed, 71 insertions(+), 44 deletions(-) delete mode 100644 changelog.d/7606.bugfix delete mode 100644 changelog.d/7636.misc delete mode 100644 changelog.d/7639.feature delete mode 100644 changelog.d/7648.bugfix delete mode 100644 changelog.d/7652.doc delete mode 100644 changelog.d/7657.misc delete mode 100644 changelog.d/7659.doc delete mode 100644 changelog.d/7663.bugfix delete mode 100644 changelog.d/7664.misc delete mode 100644 changelog.d/7675.removal delete mode 100644 changelog.d/7677.bugfix delete mode 100644 changelog.d/7678.misc delete mode 100644 changelog.d/7679.misc delete mode 100644 changelog.d/7680.misc delete mode 100644 changelog.d/7681.misc delete mode 100644 changelog.d/7687.bugfix delete mode 100644 changelog.d/7688.bugfix delete mode 100644 changelog.d/7689.bugfix delete mode 100644 changelog.d/7691.bugfix delete mode 100644 changelog.d/7692.misc delete mode 100644 changelog.d/7697.misc delete mode 100644 changelog.d/7698.bugfix delete mode 100644 changelog.d/7701.bugfix delete mode 100644 changelog.d/7703.misc delete mode 100644 changelog.d/7704.misc delete mode 100644 changelog.d/7706.feature delete mode 100644 changelog.d/7708.bugfix delete mode 100644 changelog.d/7711.bugfix delete mode 100644 changelog.d/7712.misc delete mode 100644 changelog.d/7714.bugfix delete mode 100644 changelog.d/7717.bugfix delete mode 100644 changelog.d/7718.feature delete mode 100644 changelog.d/7724.doc delete mode 100644 changelog.d/7725.misc delete mode 100644 changelog.d/7727.misc delete mode 100644 changelog.d/7730.bugfix delete mode 100644 changelog.d/7735.bugfix delete mode 100644 changelog.d/7738.misc delete mode 100644 changelog.d/7746.bugfix delete mode 100644 changelog.d/7751.misc delete mode 100644 changelog.d/7755.misc delete mode 100644 changelog.d/7761.feature delete mode 100644 changelog.d/7763.feature diff --git a/CHANGES.md b/CHANGES.md index 9a30a2e9014e..e9b5bb01aefe 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,73 @@ +Synapse 1.16.0rc1 (2020-07-01) +============================== + +Features +-------- + +- Add an option to enable encryption by default for new rooms. ([\#7639](https://github.com/matrix-org/synapse/issues/7639)) +- Add support for running multiple media repository workers. See [docs/workers.md](docs/workers.md) for instructions. ([\#7706](https://github.com/matrix-org/synapse/issues/7706)) +- Media can now be marked as safe from quarantined. ([\#7718](https://github.com/matrix-org/synapse/issues/7718)) +- Add unread messages count to sync responses. ([\#7761](https://github.com/matrix-org/synapse/issues/7761)) +- Expand the configuration options for auto-join rooms. ([\#7763](https://github.com/matrix-org/synapse/issues/7763)) + + +Bugfixes +-------- + +- Remove `user_id` from the response to `GET /_matrix/client/r0/presence/{userId}/status` to match the specification. ([\#7606](https://github.com/matrix-org/synapse/issues/7606)) +- In working mode, ensure that replicated data has not already been received. ([\#7648](https://github.com/matrix-org/synapse/issues/7648)) +- Fix intermittent exception during startup, introduced in Synapse 1.14.0. ([\#7663](https://github.com/matrix-org/synapse/issues/7663)) +- Include a user-agent for federation and well-known requests. ([\#7677](https://github.com/matrix-org/synapse/issues/7677)) +- Accept the proper field (`phone`) for the `m.id.phone` identifier type. The legacy field of `number` is still accepted as a fallback. Bug introduced in v0.20.0-rc1. ([\#7687](https://github.com/matrix-org/synapse/issues/7687)) +- Fix "Starting db txn 'get_completed_ui_auth_stages' from sentinel context" warning. The bug was introduced in 1.13.0rc1. ([\#7688](https://github.com/matrix-org/synapse/issues/7688)) +- Compare the URI and method during user interactive authentication (instead of the URI twice). Bug introduced in 1.13.0rc1. ([\#7689](https://github.com/matrix-org/synapse/issues/7689)) +- Fix a long standing bug where the response to the `GET room_keys/version` endpoint had the incorrect type for the `etag` field. ([\#7691](https://github.com/matrix-org/synapse/issues/7691)) +- Fix logged error during device resync in opentracing. Broke in v1.14.0. ([\#7698](https://github.com/matrix-org/synapse/issues/7698)) +- Do not break push rule evaluation when receiving an event with a non-string body. This is a long-standing bug. ([\#7701](https://github.com/matrix-org/synapse/issues/7701)) +- Fixs a long standing bug which resulted in an exception: "TypeError: argument of type 'ObservableDeferred' is not iterable". ([\#7708](https://github.com/matrix-org/synapse/issues/7708)) +- The `synapse_port_db` script no longer fails when the `ui_auth_sessions` table is non-empty. This bug has existed since v1.13.0rc1. ([\#7711](https://github.com/matrix-org/synapse/issues/7711)) +- Synapse will now fetch media from the proper specified URL (using the r0 prefix instead of the unspecified v1). ([\#7714](https://github.com/matrix-org/synapse/issues/7714)) +- Fix the tables ignored by `synapse_port_db` to be in sync the current database schema. ([\#7717](https://github.com/matrix-org/synapse/issues/7717)) +- Fix missing `Content-Length` on HTTP responses from the metrics handler. ([\#7730](https://github.com/matrix-org/synapse/issues/7730)) +- Fix large state resolutions from stalling Synapse for seconds at a time. ([\#7735](https://github.com/matrix-org/synapse/issues/7735), [\#7746](https://github.com/matrix-org/synapse/issues/7746)) + + +Improved Documentation +---------------------- + +- Spelling correction in sample_config.yaml. ([\#7652](https://github.com/matrix-org/synapse/issues/7652)) +- Added instructions for how to use Keycloak via OpenID Connect to authenticate with Synapse. ([\#7659](https://github.com/matrix-org/synapse/issues/7659)) +- Corrected misspelling of PostgreSQL. ([\#7724](https://github.com/matrix-org/synapse/issues/7724)) + + +Deprecations and Removals +------------------------- + +- Deprecate `m.login.jwt` login method in favour of `org.matrix.login.jwt`, as `m.login.jwt` is not part of the Matrix spec. ([\#7675](https://github.com/matrix-org/synapse/issues/7675)) + + +Internal Changes +---------------- + +- Refactor getting replication updates from database. ([\#7636](https://github.com/matrix-org/synapse/issues/7636)) +- Clean-up the login fallback code. ([\#7657](https://github.com/matrix-org/synapse/issues/7657)) +- Increase the default SAML session expirary time to 15 minutes. ([\#7664](https://github.com/matrix-org/synapse/issues/7664)) +- Convert the device message and pagination handlers to async/await. ([\#7678](https://github.com/matrix-org/synapse/issues/7678)) +- Convert typing handler to async/await. ([\#7679](https://github.com/matrix-org/synapse/issues/7679)) +- Require `parameterized` package version to be at least 0.7.0. ([\#7680](https://github.com/matrix-org/synapse/issues/7680)) +- Refactor handling of `listeners` configuration settings. ([\#7681](https://github.com/matrix-org/synapse/issues/7681)) +- Replace uses of `six.iterkeys`/`iteritems`/`itervalues` with `keys()`/`items()`/`values()`. ([\#7692](https://github.com/matrix-org/synapse/issues/7692)) +- Add support for using `rust-python-jaeger-reporter` library to reduce jaeger tracing overhead. ([\#7697](https://github.com/matrix-org/synapse/issues/7697)) +- Make Tox actions work on Debian 10. ([\#7703](https://github.com/matrix-org/synapse/issues/7703)) +- Replace all remaining uses of `six` with native Python 3 equivalents. Contributed by @ilmari. ([\#7704](https://github.com/matrix-org/synapse/issues/7704)) +- Fix broken link in sample config. ([\#7712](https://github.com/matrix-org/synapse/issues/7712)) +- Speed up state res v2 across large state differences. ([\#7725](https://github.com/matrix-org/synapse/issues/7725)) +- Convert directory handler to async/await. ([\#7727](https://github.com/matrix-org/synapse/issues/7727)) +- Move `flake8` to the end of `scripts-dev/lint.sh` as it takes the longest and could cause the script to exit early. ([\#7738](https://github.com/matrix-org/synapse/issues/7738)) +- Explain the "test" conditional requirement for dependencies is not all of the modules necessary to run the unit tests. ([\#7751](https://github.com/matrix-org/synapse/issues/7751)) +- Add some metrics for inbound and outbound federation latencies: `synapse_federation_server_pdu_process_time` and `synapse_event_processing_lag_by_event`. ([\#7755](https://github.com/matrix-org/synapse/issues/7755)) + + Synapse 1.15.1 (2020-06-16) =========================== diff --git a/changelog.d/7606.bugfix b/changelog.d/7606.bugfix deleted file mode 100644 index 6c488c39966f..000000000000 --- a/changelog.d/7606.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove `user_id` from the response to `GET /_matrix/client/r0/presence/{userId}/status` to match the specification. diff --git a/changelog.d/7636.misc b/changelog.d/7636.misc deleted file mode 100644 index f93149502e8c..000000000000 --- a/changelog.d/7636.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor getting replication updates from database. diff --git a/changelog.d/7639.feature b/changelog.d/7639.feature deleted file mode 100644 index ca80fc0ca296..000000000000 --- a/changelog.d/7639.feature +++ /dev/null @@ -1 +0,0 @@ -Add an option to enable encryption by default for new rooms. diff --git a/changelog.d/7648.bugfix b/changelog.d/7648.bugfix deleted file mode 100644 index ff2417bfb692..000000000000 --- a/changelog.d/7648.bugfix +++ /dev/null @@ -1 +0,0 @@ -In working mode, ensure that replicated data has not already been received. diff --git a/changelog.d/7652.doc b/changelog.d/7652.doc deleted file mode 100644 index c3ebbeb2624c..000000000000 --- a/changelog.d/7652.doc +++ /dev/null @@ -1 +0,0 @@ -Spelling correction in sample_config.yaml. diff --git a/changelog.d/7657.misc b/changelog.d/7657.misc deleted file mode 100644 index 384c05d2cd27..000000000000 --- a/changelog.d/7657.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up the login fallback code. diff --git a/changelog.d/7659.doc b/changelog.d/7659.doc deleted file mode 100644 index 1d3458a353d1..000000000000 --- a/changelog.d/7659.doc +++ /dev/null @@ -1 +0,0 @@ -Added instructions for how to use Keycloak via OpenID Connect to authenticate with Synapse. diff --git a/changelog.d/7663.bugfix b/changelog.d/7663.bugfix deleted file mode 100644 index b58316b34f65..000000000000 --- a/changelog.d/7663.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix intermittent exception during startup, introduced in Synapse 1.14.0. diff --git a/changelog.d/7664.misc b/changelog.d/7664.misc deleted file mode 100644 index bbf0908109a9..000000000000 --- a/changelog.d/7664.misc +++ /dev/null @@ -1 +0,0 @@ -Increase the default SAML session expirary time to 15 minutes. diff --git a/changelog.d/7675.removal b/changelog.d/7675.removal deleted file mode 100644 index 2500e2c578e7..000000000000 --- a/changelog.d/7675.removal +++ /dev/null @@ -1 +0,0 @@ -Deprecate `m.login.jwt` login method in favour of `org.matrix.login.jwt`, as `m.login.jwt` is not part of the Matrix spec. diff --git a/changelog.d/7677.bugfix b/changelog.d/7677.bugfix deleted file mode 100644 index b63f04109698..000000000000 --- a/changelog.d/7677.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include a user-agent for federation and well-known requests. diff --git a/changelog.d/7678.misc b/changelog.d/7678.misc deleted file mode 100644 index ab612200cebc..000000000000 --- a/changelog.d/7678.misc +++ /dev/null @@ -1 +0,0 @@ -Convert the device message and pagination handlers to async/await. diff --git a/changelog.d/7679.misc b/changelog.d/7679.misc deleted file mode 100644 index 7db94691a929..000000000000 --- a/changelog.d/7679.misc +++ /dev/null @@ -1 +0,0 @@ -Convert typing handler to async/await. diff --git a/changelog.d/7680.misc b/changelog.d/7680.misc deleted file mode 100644 index 46cd23257455..000000000000 --- a/changelog.d/7680.misc +++ /dev/null @@ -1 +0,0 @@ -Require `parameterized` package version to be at least 0.7.0. diff --git a/changelog.d/7681.misc b/changelog.d/7681.misc deleted file mode 100644 index e474fc39cd8f..000000000000 --- a/changelog.d/7681.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor handling of `listeners` configuration settings. diff --git a/changelog.d/7687.bugfix b/changelog.d/7687.bugfix deleted file mode 100644 index 0413aff0b340..000000000000 --- a/changelog.d/7687.bugfix +++ /dev/null @@ -1 +0,0 @@ -Accept the proper field (`phone`) for the `m.id.phone` identifier type. The legacy field of `number` is still accepted as a fallback. Bug introduced in v0.20.0-rc1. diff --git a/changelog.d/7688.bugfix b/changelog.d/7688.bugfix deleted file mode 100644 index 2ed718a08791..000000000000 --- a/changelog.d/7688.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix "Starting db txn 'get_completed_ui_auth_stages' from sentinel context" warning. The bug was introduced in 1.13.0rc1. diff --git a/changelog.d/7689.bugfix b/changelog.d/7689.bugfix deleted file mode 100644 index 10ad4c8c139f..000000000000 --- a/changelog.d/7689.bugfix +++ /dev/null @@ -1 +0,0 @@ -Compare the URI and method during user interactive authentication (instead of the URI twice). Bug introduced in 1.13.0rc1. diff --git a/changelog.d/7691.bugfix b/changelog.d/7691.bugfix deleted file mode 100644 index 2a8a480c53f1..000000000000 --- a/changelog.d/7691.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long standing bug where the response to the `GET room_keys/version` endpoint had the incorrect type for the `etag` field. diff --git a/changelog.d/7692.misc b/changelog.d/7692.misc deleted file mode 100644 index ef6cbe0005a8..000000000000 --- a/changelog.d/7692.misc +++ /dev/null @@ -1 +0,0 @@ -Replace uses of `six.iterkeys`/`iteritems`/`itervalues` with `keys()`/`items()`/`values()`. diff --git a/changelog.d/7697.misc b/changelog.d/7697.misc deleted file mode 100644 index 345862b5a55e..000000000000 --- a/changelog.d/7697.misc +++ /dev/null @@ -1 +0,0 @@ -Add support for using `rust-python-jaeger-reporter` library to reduce jaeger tracing overhead. diff --git a/changelog.d/7698.bugfix b/changelog.d/7698.bugfix deleted file mode 100644 index 32de7459eb95..000000000000 --- a/changelog.d/7698.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix logged error during device resync in opentracing. Broke in v1.14.0. diff --git a/changelog.d/7701.bugfix b/changelog.d/7701.bugfix deleted file mode 100644 index e5b10f75fdaf..000000000000 --- a/changelog.d/7701.bugfix +++ /dev/null @@ -1 +0,0 @@ -Do not break push rule evaluation when receiving an event with a non-string body. This is a long-standing bug. diff --git a/changelog.d/7703.misc b/changelog.d/7703.misc deleted file mode 100644 index 6e89897e455f..000000000000 --- a/changelog.d/7703.misc +++ /dev/null @@ -1 +0,0 @@ -Make Tox actions work on Debian 10. diff --git a/changelog.d/7704.misc b/changelog.d/7704.misc deleted file mode 100644 index 7838a613c892..000000000000 --- a/changelog.d/7704.misc +++ /dev/null @@ -1 +0,0 @@ -Replace all remaining uses of `six` with native Python 3 equivalents. Contributed by @ilmari. diff --git a/changelog.d/7706.feature b/changelog.d/7706.feature deleted file mode 100644 index c6b3b20b5531..000000000000 --- a/changelog.d/7706.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for running multiple media repository workers. See [docs/workers.md](docs/workers.md) for instructions. diff --git a/changelog.d/7708.bugfix b/changelog.d/7708.bugfix deleted file mode 100644 index 03a41ca55aeb..000000000000 --- a/changelog.d/7708.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixs a long standing bug which resulted in an exception: "TypeError: argument of type 'ObservableDeferred' is not iterable". diff --git a/changelog.d/7711.bugfix b/changelog.d/7711.bugfix deleted file mode 100644 index 180de0840562..000000000000 --- a/changelog.d/7711.bugfix +++ /dev/null @@ -1 +0,0 @@ -The `synapse_port_db` script no longer fails when the `ui_auth_sessions` table is non-empty. This bug has existed since v1.13.0rc1. diff --git a/changelog.d/7712.misc b/changelog.d/7712.misc deleted file mode 100644 index 4f0987303ec4..000000000000 --- a/changelog.d/7712.misc +++ /dev/null @@ -1 +0,0 @@ -Fix broken link in sample config. diff --git a/changelog.d/7714.bugfix b/changelog.d/7714.bugfix deleted file mode 100644 index 78925d94d19e..000000000000 --- a/changelog.d/7714.bugfix +++ /dev/null @@ -1 +0,0 @@ -Synapse will now fetch media from the proper specified URL (using the r0 prefix instead of the unspecified v1). diff --git a/changelog.d/7717.bugfix b/changelog.d/7717.bugfix deleted file mode 100644 index bcbf146fea42..000000000000 --- a/changelog.d/7717.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the tables ignored by `synapse_port_db` to be in sync the current database schema. diff --git a/changelog.d/7718.feature b/changelog.d/7718.feature deleted file mode 100644 index 17071b9ea9f6..000000000000 --- a/changelog.d/7718.feature +++ /dev/null @@ -1 +0,0 @@ -Media can now be marked as safe from quarantined. diff --git a/changelog.d/7724.doc b/changelog.d/7724.doc deleted file mode 100644 index 909e0345c7ed..000000000000 --- a/changelog.d/7724.doc +++ /dev/null @@ -1 +0,0 @@ -Corrected misspelling of PostgreSQL. diff --git a/changelog.d/7725.misc b/changelog.d/7725.misc deleted file mode 100644 index f295a455211f..000000000000 --- a/changelog.d/7725.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up state res v2 across large state differences. diff --git a/changelog.d/7727.misc b/changelog.d/7727.misc deleted file mode 100644 index 4d12d10fda2c..000000000000 --- a/changelog.d/7727.misc +++ /dev/null @@ -1 +0,0 @@ -Convert directory handler to async/await. diff --git a/changelog.d/7730.bugfix b/changelog.d/7730.bugfix deleted file mode 100644 index 9da254b56cc3..000000000000 --- a/changelog.d/7730.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix missing `Content-Length` on HTTP responses from the metrics handler. diff --git a/changelog.d/7735.bugfix b/changelog.d/7735.bugfix deleted file mode 100644 index 86959a5ca4d0..000000000000 --- a/changelog.d/7735.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix large state resolutions from stalling Synapse for seconds at a time. diff --git a/changelog.d/7738.misc b/changelog.d/7738.misc deleted file mode 100644 index 424ac15d66b4..000000000000 --- a/changelog.d/7738.misc +++ /dev/null @@ -1 +0,0 @@ -Move `flake8` to the end of `scripts-dev/lint.sh` as it takes the longest and could cause the script to exit early. diff --git a/changelog.d/7746.bugfix b/changelog.d/7746.bugfix deleted file mode 100644 index 86959a5ca4d0..000000000000 --- a/changelog.d/7746.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix large state resolutions from stalling Synapse for seconds at a time. diff --git a/changelog.d/7751.misc b/changelog.d/7751.misc deleted file mode 100644 index eb10ecd92e5d..000000000000 --- a/changelog.d/7751.misc +++ /dev/null @@ -1 +0,0 @@ -Explain the "test" conditional requirement for dependencies is not all of the modules necessary to run the unit tests. diff --git a/changelog.d/7755.misc b/changelog.d/7755.misc deleted file mode 100644 index 1fc29206ac5d..000000000000 --- a/changelog.d/7755.misc +++ /dev/null @@ -1 +0,0 @@ -Add some metrics for inbound and outbound federation latencies: `synapse_federation_server_pdu_process_time` and `synapse_event_processing_lag_by_event`. diff --git a/changelog.d/7761.feature b/changelog.d/7761.feature deleted file mode 100644 index c97864677aac..000000000000 --- a/changelog.d/7761.feature +++ /dev/null @@ -1 +0,0 @@ -Add unread messages count to sync responses. diff --git a/changelog.d/7763.feature b/changelog.d/7763.feature deleted file mode 100644 index 4a7563dad392..000000000000 --- a/changelog.d/7763.feature +++ /dev/null @@ -1 +0,0 @@ -Expand the configuration options for auto-join rooms. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4d39996a2e50..f5cd8271a6ac 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ except ImportError: pass -__version__ = "1.15.1" +__version__ = "1.16.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when