From 7f025eb425bae8a48b25a230d17c25ccb67cbe2d Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Mon, 22 Apr 2019 12:59:00 -0400 Subject: [PATCH 001/231] Show heroes if room name or canonical alias are empty Fixes #4194 Signed-off-by: Katie Wolfe --- synapse/handlers/sync.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 153312e39fff..f1a436011eb4 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -583,19 +583,17 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): ) # if the room has a name or canonical_alias set, we can skip - # calculating heroes. we assume that if the event has contents, it'll - # be a valid name or canonical_alias - i.e. we're checking that they - # haven't been "deleted" by blatting {} over the top. + # calculating heroes. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content: + if name and name.content and name.content.name: defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if canonical_alias and canonical_alias.content: + if canonical_alias and canonical_alias.content and canonical_alias.content.alias: defer.returnValue(summary) joined_user_ids = [ From 0a4c135f68249c2345449bee6aa7d7557a61179b Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Mon, 22 Apr 2019 13:03:59 -0400 Subject: [PATCH 002/231] Add changelog.d/5084.bugfix Signed-off-by: Katie Wolfe --- changelog.d/5084.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5084.bugfix diff --git a/changelog.d/5084.bugfix b/changelog.d/5084.bugfix new file mode 100644 index 000000000000..d5e135cde8fe --- /dev/null +++ b/changelog.d/5084.bugfix @@ -0,0 +1 @@ +Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty From 60041eac4b88cad00c1d3da8b90735dab2f0b82b Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Mon, 22 Apr 2019 13:10:57 -0400 Subject: [PATCH 003/231] Add full stop to 5084.bugfix Signed-off-by: Katie Wolfe --- changelog.d/5084.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5084.bugfix b/changelog.d/5084.bugfix index d5e135cde8fe..9d8434460c09 100644 --- a/changelog.d/5084.bugfix +++ b/changelog.d/5084.bugfix @@ -1 +1 @@ -Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty +Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. From 5d3ed79944d47f0c9d9c040bf71530eade23e19c Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Mon, 22 Apr 2019 12:59:00 -0400 Subject: [PATCH 004/231] Show heroes if room name or canonical alias are empty Fixes #4194 Signed-off-by: Katie Wolfe --- synapse/handlers/sync.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f1a436011eb4..b3e6be6dd228 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -583,17 +583,18 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): ) # if the room has a name or canonical_alias set, we can skip - # calculating heroes. + # calculating heroes. Empty strings are falsey, so we check + # for the "name" value and default to an empty string. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content and name.content.name: + if name and name.content and "name" in name.content and name.content.get("name", ""): defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if canonical_alias and canonical_alias.content and canonical_alias.content.alias: + if canonical_alias and canonical_alias.content and canonical_alias.content.get("alias", ""): defer.returnValue(summary) joined_user_ids = [ From b3e5db402dd96271b5a81d7f4c3191c48ac81e6c Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Wed, 24 Apr 2019 12:04:16 -0400 Subject: [PATCH 005/231] Clean up code Signed-off-by: Katie Wolfe --- synapse/handlers/sync.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b3e6be6dd228..9f93a7f2da38 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -587,14 +587,15 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): # for the "name" value and default to an empty string. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content and "name" in name.content and name.content.get("name", ""): + if name and name.content and name.content.get("name", ""): defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if canonical_alias and canonical_alias.content and canonical_alias.content.get("alias", ""): + if (canonical_alias and canonical_alias.content + and canonical_alias.content.get("alias", "")): defer.returnValue(summary) joined_user_ids = [ From 7e07dc429fb6b0207c1725ec0770d5f859623201 Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Wed, 24 Apr 2019 12:43:18 -0400 Subject: [PATCH 006/231] Lint I probably should've just run autopep8 in the first place... Signed-off-by: Katie Wolfe --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9f93a7f2da38..7cf757f65aa5 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -595,7 +595,7 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): canonical_alias_id, allow_none=True, ) if (canonical_alias and canonical_alias.content - and canonical_alias.content.get("alias", "")): + and canonical_alias.content.get("alias", "")): defer.returnValue(summary) joined_user_ids = [ From de7672b78f4dc250f7cfe151f4185bf186f3728c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 13:54:09 +0100 Subject: [PATCH 007/231] Don't bundle events in /sync or /events As we'll send down the annotations too anyway, so this just ends up confusing clients. --- synapse/events/utils.py | 5 +++-- synapse/handlers/events.py | 3 +++ synapse/rest/client/v2_alpha/sync.py | 3 +++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 27a2a9ef986e..e2d4384de199 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -330,12 +330,13 @@ def __init__(self, hs): ) @defer.inlineCallbacks - def serialize_event(self, event, time_now, **kwargs): + def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs): """Serializes a single event. Args: event (EventBase) time_now (int): The current time in milliseconds + bundle_aggregations (bool): Whether to bundle in related events **kwargs: Arguments to pass to `serialize_event` Returns: @@ -350,7 +351,7 @@ def serialize_event(self, event, time_now, **kwargs): # If MSC1849 is enabled then we need to look if thre are any relations # we need to bundle in with the event - if self.experimental_msc1849_support_enabled: + if self.experimental_msc1849_support_enabled and bundle_aggregations: annotations = yield self.store.get_aggregation_groups_for_event( event_id, ) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 6003ad9cca87..eb525070cff8 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -122,6 +122,9 @@ def get_stream(self, auth_user_id, pagin_config, timeout=0, chunks = yield self._event_serializer.serialize_events( events, time_now, as_client_event=as_client_event, + # We don't bundle "live" events, as otherwise clients + # will end up double counting annotations. + bundle_aggregations=False, ) chunk = { diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index c701e534e7b8..d3025025e337 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -358,6 +358,9 @@ def encode_room( def serialize(events): return self._event_serializer.serialize_events( events, time_now=time_now, + # We don't bundle "live" events, as otherwise clients + # will end up double counting annotations. + bundle_aggregations=False, token_id=token_id, event_format=event_formatter, only_event_fields=only_fields, From ef13dc4846793b8969fbe76ebc7b7f3c607c8c1a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 13:59:09 +0100 Subject: [PATCH 008/231] Newsfile --- changelog.d/5220.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5220.feature diff --git a/changelog.d/5220.feature b/changelog.d/5220.feature new file mode 100644 index 000000000000..747098c16624 --- /dev/null +++ b/changelog.d/5220.feature @@ -0,0 +1 @@ +Add experimental support for relations (aka reactions and edits). From 7b0e804a4a684a210abf5107e720582f68f464e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 15:21:38 +0100 Subject: [PATCH 009/231] Fix get_max_topological_token to never return None --- synapse/storage/stream.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 529ad4ea79f3..0b5f5f966375 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -592,8 +592,18 @@ def get_topological_token_for_event(self, event_id): ) def get_max_topological_token(self, room_id, stream_key): + """Get the max topological token in a room that before given stream + ordering. + + Args: + room_id (str) + stream_key (int) + + Returns: + Deferred[int] + """ sql = ( - "SELECT max(topological_ordering) FROM events" + "SELECT coalesce(max(topological_ordering), 0) FROM events" " WHERE room_id = ? AND stream_ordering < ?" ) return self._execute( From c448f35de21119c877e31d2fc6c31de4dbe7db23 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 15:35:13 +0100 Subject: [PATCH 010/231] Newsfile --- changelog.d/5221.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5221.bugfix diff --git a/changelog.d/5221.bugfix b/changelog.d/5221.bugfix new file mode 100644 index 000000000000..03aa363d156c --- /dev/null +++ b/changelog.d/5221.bugfix @@ -0,0 +1 @@ +Fix race when backfilling in rooms with worker mode. From 2dfbeea66f65f23582e045821ffd0dfda1dbab94 Mon Sep 17 00:00:00 2001 From: Steffen <33749463+YellowGarbageBag@users.noreply.github.com> Date: Wed, 22 May 2019 13:53:16 +0200 Subject: [PATCH 011/231] Update README.md (#5222) Add missing backslash --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index b27a692d5bb7..df5d0151e23b 100644 --- a/docker/README.md +++ b/docker/README.md @@ -161,7 +161,7 @@ specify values for `SYNAPSE_CONFIG_PATH`, `SYNAPSE_SERVER_NAME` and example: ``` -docker run -it --rm +docker run -it --rm \ --mount type=volume,src=synapse-data,dst=/data \ -e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \ -e SYNAPSE_SERVER_NAME=my.matrix.host \ From 66b75e2d81a2bacf33d51ea5339fa9066264955d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 22 May 2019 13:55:32 +0100 Subject: [PATCH 012/231] Neilj/ensure get profileinfo available in client reader slaved store (#5213) * expose SlavedProfileStore to ClientReaderSlavedStore --- changelog.d/5200.bugfix | 1 + synapse/app/client_reader.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/5200.bugfix diff --git a/changelog.d/5200.bugfix b/changelog.d/5200.bugfix new file mode 100644 index 000000000000..f346c7b0cc8d --- /dev/null +++ b/changelog.d/5200.bugfix @@ -0,0 +1 @@ +Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 864f1eac4826..a16e037f3227 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -38,6 +38,7 @@ from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.keys import SlavedKeyStore +from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore @@ -81,6 +82,7 @@ class ClientReaderSlavedStore( SlavedApplicationServiceStore, SlavedRegistrationStore, SlavedTransactionStore, + SlavedProfileStore, SlavedClientIpStore, BaseSlavedStore, ): From 62388a1e44278f9aa22244750433ac46142b266a Mon Sep 17 00:00:00 2001 From: Marcus Hoffmann Date: Wed, 22 May 2019 17:48:12 +0200 Subject: [PATCH 013/231] remove urllib3 pin (#5230) requests 2.22.0 as been released supporting urllib3 1.25.2 Signed-off-by: Marcus Hoffmann --- changelog.d/5230.misc | 1 + synapse/python_dependencies.py | 8 -------- 2 files changed, 1 insertion(+), 8 deletions(-) create mode 100644 changelog.d/5230.misc diff --git a/changelog.d/5230.misc b/changelog.d/5230.misc new file mode 100644 index 000000000000..c681bc9748c4 --- /dev/null +++ b/changelog.d/5230.misc @@ -0,0 +1 @@ +Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index e3f828c4bb4b..f64baa4d5896 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -74,14 +74,6 @@ "attrs>=17.4.0", "netaddr>=0.7.18", - - # requests is a transitive dep of treq, and urlib3 is a transitive dep - # of requests, as well as of sentry-sdk. - # - # As of requests 2.21, requests does not yet support urllib3 1.25. - # (If we do not pin it here, pip will give us the latest urllib3 - # due to the dep via sentry-sdk.) - "urllib3<1.25", ] CONDITIONAL_REQUIREMENTS = { From 1a94de60e89090b8930bcdefe51f47f204f52605 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 22 May 2019 18:39:33 +0100 Subject: [PATCH 014/231] Run black on synapse.crypto.keyring (#5232) --- changelog.d/5232.misc | 1 + synapse/crypto/keyring.py | 286 ++++++++++++++++++-------------------- 2 files changed, 138 insertions(+), 149 deletions(-) create mode 100644 changelog.d/5232.misc diff --git a/changelog.d/5232.misc b/changelog.d/5232.misc new file mode 100644 index 000000000000..1cdc71f09511 --- /dev/null +++ b/changelog.d/5232.misc @@ -0,0 +1 @@ +Run black on synapse.crypto.keyring. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d8ba870cca5e..5cc98542ce4e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -56,9 +56,9 @@ logger = logging.getLogger(__name__) -VerifyKeyRequest = namedtuple("VerifyRequest", ( - "server_name", "key_ids", "json_object", "deferred" -)) +VerifyKeyRequest = namedtuple( + "VerifyRequest", ("server_name", "key_ids", "json_object", "deferred") +) """ A request for a verify key to verify a JSON object. @@ -96,9 +96,7 @@ def __init__(self, hs): def verify_json_for_server(self, server_name, json_object): return logcontext.make_deferred_yieldable( - self.verify_json_objects_for_server( - [(server_name, json_object)] - )[0] + self.verify_json_objects_for_server([(server_name, json_object)])[0] ) def verify_json_objects_for_server(self, server_and_json): @@ -130,18 +128,15 @@ def process(server_name, json_object): if not key_ids: return defer.fail( SynapseError( - 400, - "Not signed by %s" % (server_name,), - Codes.UNAUTHORIZED, + 400, "Not signed by %s" % (server_name,), Codes.UNAUTHORIZED ) ) - logger.debug("Verifying for %s with key_ids %s", - server_name, key_ids) + logger.debug("Verifying for %s with key_ids %s", server_name, key_ids) # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( - server_name, key_ids, json_object, defer.Deferred(), + server_name, key_ids, json_object, defer.Deferred() ) verify_requests.append(verify_request) @@ -179,15 +174,13 @@ def _start_key_lookups(self, verify_requests): # any other lookups until we have finished. # The deferreds are called with no logcontext. server_to_deferred = { - rq.server_name: defer.Deferred() - for rq in verify_requests + rq.server_name: defer.Deferred() for rq in verify_requests } # We want to wait for any previous lookups to complete before # proceeding. yield self.wait_for_previous_lookups( - [rq.server_name for rq in verify_requests], - server_to_deferred, + [rq.server_name for rq in verify_requests], server_to_deferred ) # Actually start fetching keys. @@ -216,9 +209,7 @@ def remove_deferreds(res, verify_request): return res for verify_request in verify_requests: - verify_request.deferred.addBoth( - remove_deferreds, verify_request, - ) + verify_request.deferred.addBoth(remove_deferreds, verify_request) except Exception: logger.exception("Error starting key lookups") @@ -248,7 +239,8 @@ def wait_for_previous_lookups(self, server_names, server_to_deferred): break logger.info( "Waiting for existing lookups for %s to complete [loop %i]", - [w[0] for w in wait_on], loop_count, + [w[0] for w in wait_on], + loop_count, ) with PreserveLoggingContext(): yield defer.DeferredList((w[1] for w in wait_on)) @@ -335,13 +327,14 @@ def do_iterations(): with PreserveLoggingContext(): for verify_request in requests_missing_keys: - verify_request.deferred.errback(SynapseError( - 401, - "No key for %s with id %s" % ( - verify_request.server_name, verify_request.key_ids, - ), - Codes.UNAUTHORIZED, - )) + verify_request.deferred.errback( + SynapseError( + 401, + "No key for %s with id %s" + % (verify_request.server_name, verify_request.key_ids), + Codes.UNAUTHORIZED, + ) + ) def on_err(err): with PreserveLoggingContext(): @@ -383,25 +376,26 @@ def get_key(perspective_name, perspective_keys): ) defer.returnValue(result) except KeyLookupError as e: - logger.warning( - "Key lookup failed from %r: %s", perspective_name, e, - ) + logger.warning("Key lookup failed from %r: %s", perspective_name, e) except Exception as e: logger.exception( "Unable to get key from %r: %s %s", perspective_name, - type(e).__name__, str(e), + type(e).__name__, + str(e), ) defer.returnValue({}) - results = yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background(get_key, p_name, p_keys) - for p_name, p_keys in self.perspective_servers.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) + results = yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background(get_key, p_name, p_keys) + for p_name, p_keys in self.perspective_servers.items() + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) union_of_keys = {} for result in results: @@ -412,32 +406,30 @@ def get_key(perspective_name, perspective_keys): @defer.inlineCallbacks def get_keys_from_server(self, server_name_and_key_ids): - results = yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.get_server_verify_key_v2_direct, - server_name, - key_ids, - ) - for server_name, key_ids in server_name_and_key_ids - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) + results = yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.get_server_verify_key_v2_direct, server_name, key_ids + ) + for server_name, key_ids in server_name_and_key_ids + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) merged = {} for result in results: merged.update(result) - defer.returnValue({ - server_name: keys - for server_name, keys in merged.items() - if keys - }) + defer.returnValue( + {server_name: keys for server_name, keys in merged.items() if keys} + ) @defer.inlineCallbacks - def get_server_verify_key_v2_indirect(self, server_names_and_key_ids, - perspective_name, - perspective_keys): + def get_server_verify_key_v2_indirect( + self, server_names_and_key_ids, perspective_name, perspective_keys + ): # TODO(mark): Set the minimum_valid_until_ts to that needed by # the events being validated or the current time if validating # an incoming request. @@ -448,9 +440,7 @@ def get_server_verify_key_v2_indirect(self, server_names_and_key_ids, data={ u"server_keys": { server_name: { - key_id: { - u"minimum_valid_until_ts": 0 - } for key_id in key_ids + key_id: {u"minimum_valid_until_ts": 0} for key_id in key_ids } for server_name, key_ids in server_names_and_key_ids } @@ -458,21 +448,19 @@ def get_server_verify_key_v2_indirect(self, server_names_and_key_ids, long_retries=True, ) except (NotRetryingDestination, RequestSendFailed) as e: - raise_from( - KeyLookupError("Failed to connect to remote server"), e, - ) + raise_from(KeyLookupError("Failed to connect to remote server"), e) except HttpResponseException as e: - raise_from( - KeyLookupError("Remote server returned an error"), e, - ) + raise_from(KeyLookupError("Remote server returned an error"), e) keys = {} responses = query_response["server_keys"] for response in responses: - if (u"signatures" not in response - or perspective_name not in response[u"signatures"]): + if ( + u"signatures" not in response + or perspective_name not in response[u"signatures"] + ): raise KeyLookupError( "Key response not signed by perspective server" " %r" % (perspective_name,) @@ -482,9 +470,7 @@ def get_server_verify_key_v2_indirect(self, server_names_and_key_ids, for key_id in response[u"signatures"][perspective_name]: if key_id in perspective_keys: verify_signed_json( - response, - perspective_name, - perspective_keys[key_id] + response, perspective_name, perspective_keys[key_id] ) verified = True @@ -494,7 +480,7 @@ def get_server_verify_key_v2_indirect(self, server_names_and_key_ids, " known key, signed with: %r, known keys: %r", perspective_name, list(response[u"signatures"][perspective_name]), - list(perspective_keys) + list(perspective_keys), ) raise KeyLookupError( "Response not signed with a known key for perspective" @@ -508,18 +494,20 @@ def get_server_verify_key_v2_indirect(self, server_names_and_key_ids, keys.setdefault(server_name, {}).update(processed_response) - yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.store_keys, - server_name=server_name, - from_server=perspective_name, - verify_keys=response_keys, - ) - for server_name, response_keys in keys.items() - ], - consumeErrors=True - ).addErrback(unwrapFirstError)) + yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.store_keys, + server_name=server_name, + from_server=perspective_name, + verify_keys=response_keys, + ) + for server_name, response_keys in keys.items() + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) defer.returnValue(keys) @@ -534,26 +522,26 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): try: response = yield self.client.get_json( destination=server_name, - path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id), + path="/_matrix/key/v2/server/" + + urllib.parse.quote(requested_key_id), ignore_backoff=True, ) except (NotRetryingDestination, RequestSendFailed) as e: - raise_from( - KeyLookupError("Failed to connect to remote server"), e, - ) + raise_from(KeyLookupError("Failed to connect to remote server"), e) except HttpResponseException as e: - raise_from( - KeyLookupError("Remote server returned an error"), e, - ) + raise_from(KeyLookupError("Remote server returned an error"), e) - if (u"signatures" not in response - or server_name not in response[u"signatures"]): + if ( + u"signatures" not in response + or server_name not in response[u"signatures"] + ): raise KeyLookupError("Key response not signed by remote server") if response["server_name"] != server_name: - raise KeyLookupError("Expected a response for server %r not %r" % ( - server_name, response["server_name"] - )) + raise KeyLookupError( + "Expected a response for server %r not %r" + % (server_name, response["server_name"]) + ) response_keys = yield self.process_v2_response( from_server=server_name, @@ -564,16 +552,12 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): keys.update(response_keys) yield self.store_keys( - server_name=server_name, - from_server=server_name, - verify_keys=keys, + server_name=server_name, from_server=server_name, verify_keys=keys ) defer.returnValue({server_name: keys}) @defer.inlineCallbacks - def process_v2_response( - self, from_server, response_json, requested_ids=[], - ): + def process_v2_response(self, from_server, response_json, requested_ids=[]): """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from @@ -627,20 +611,13 @@ def process_v2_response( for key_id in response_json["signatures"].get(server_name, {}): if key_id not in response_json["verify_keys"]: raise KeyLookupError( - "Key response must include verification keys for all" - " signatures" + "Key response must include verification keys for all" " signatures" ) if key_id in verify_keys: - verify_signed_json( - response_json, - server_name, - verify_keys[key_id] - ) + verify_signed_json(response_json, server_name, verify_keys[key_id]) signed_key_json = sign_json( - response_json, - self.config.server_name, - self.config.signing_key[0], + response_json, self.config.server_name, self.config.signing_key[0] ) signed_key_json_bytes = encode_canonical_json(signed_key_json) @@ -653,21 +630,23 @@ def process_v2_response( response_keys.update(verify_keys) response_keys.update(old_verify_keys) - yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.store.store_server_keys_json, - server_name=server_name, - key_id=key_id, - from_server=from_server, - ts_now_ms=time_now_ms, - ts_expires_ms=ts_valid_until_ms, - key_json_bytes=signed_key_json_bytes, - ) - for key_id in updated_key_ids - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) + yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.store.store_server_keys_json, + server_name=server_name, + key_id=key_id, + from_server=from_server, + ts_now_ms=time_now_ms, + ts_expires_ms=ts_valid_until_ms, + key_json_bytes=signed_key_json_bytes, + ) + for key_id in updated_key_ids + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) defer.returnValue(response_keys) @@ -681,16 +660,21 @@ def store_keys(self, server_name, from_server, verify_keys): A deferred that completes when the keys are stored. """ # TODO(markjh): Store whether the keys have expired. - return logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.store.store_server_verify_key, - server_name, server_name, key.time_added, key - ) - for key_id, key in verify_keys.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) + return logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.store.store_server_verify_key, + server_name, + server_name, + key.time_added, + key, + ) + for key_id, key in verify_keys.items() + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) @defer.inlineCallbacks @@ -713,17 +697,19 @@ def _handle_key_deferred(verify_request): except KeyLookupError as e: logger.warn( "Failed to download keys for %s: %s %s", - server_name, type(e).__name__, str(e), + server_name, + type(e).__name__, + str(e), ) raise SynapseError( - 502, - "Error downloading keys for %s" % (server_name,), - Codes.UNAUTHORIZED, + 502, "Error downloading keys for %s" % (server_name,), Codes.UNAUTHORIZED ) except Exception as e: logger.exception( "Got Exception when downloading keys for %s: %s %s", - server_name, type(e).__name__, str(e), + server_name, + type(e).__name__, + str(e), ) raise SynapseError( 401, @@ -733,22 +719,24 @@ def _handle_key_deferred(verify_request): json_object = verify_request.json_object - logger.debug("Got key %s %s:%s for server %s, verifying" % ( - key_id, verify_key.alg, verify_key.version, server_name, - )) + logger.debug( + "Got key %s %s:%s for server %s, verifying" + % (key_id, verify_key.alg, verify_key.version, server_name) + ) try: verify_signed_json(json_object, server_name, verify_key) except SignatureVerifyException as e: logger.debug( "Error verifying signature for %s:%s:%s with key %s: %s", - server_name, verify_key.alg, verify_key.version, + server_name, + verify_key.alg, + verify_key.version, encode_verify_key_base64(verify_key), str(e), ) raise SynapseError( 401, - "Invalid signature for server %s with key %s:%s: %s" % ( - server_name, verify_key.alg, verify_key.version, str(e), - ), + "Invalid signature for server %s with key %s:%s: %s" + % (server_name, verify_key.alg, verify_key.version, str(e)), Codes.UNAUTHORIZED, ) From 85d1e03b9d50c1c64d2742ef3ec4f2dcd2bf7f9f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 May 2019 11:17:42 +0100 Subject: [PATCH 015/231] Simplifications and comments in do_auth (#5227) I was staring at this function trying to figure out wtf it was actually doing. This is (hopefully) a non-functional refactor which makes it a bit clearer. --- changelog.d/5227.misc | 1 + synapse/handlers/federation.py | 301 +++++++++++++++++++------------ synapse/storage/events_worker.py | 2 +- 3 files changed, 183 insertions(+), 121 deletions(-) create mode 100644 changelog.d/5227.misc diff --git a/changelog.d/5227.misc b/changelog.d/5227.misc new file mode 100644 index 000000000000..32bd7b60094e --- /dev/null +++ b/changelog.d/5227.misc @@ -0,0 +1 @@ +Simplifications and comments in do_auth. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2202ed699aae..cf4fad7de0c9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2013,15 +2013,44 @@ def do_auth(self, origin, event, context, auth_events): Args: origin (str): - event (synapse.events.FrozenEvent): + event (synapse.events.EventBase): context (synapse.events.snapshot.EventContext): - auth_events (dict[(str, str)->str]): + auth_events (dict[(str, str)->synapse.events.EventBase]): + Map from (event_type, state_key) to event + + What we expect the event's auth_events to be, based on the event's + position in the dag. I think? maybe?? + + Also NB that this function adds entries to it. + Returns: + defer.Deferred[None] + """ + room_version = yield self.store.get_room_version(event.room_id) + + yield self._update_auth_events_and_context_for_auth( + origin, event, context, auth_events + ) + try: + self.auth.check(room_version, event, auth_events=auth_events) + except AuthError as e: + logger.warn("Failed auth resolution for %r because %s", event, e) + raise e + + @defer.inlineCallbacks + def _update_auth_events_and_context_for_auth( + self, origin, event, context, auth_events + ): + """Helper for do_auth. See there for docs. + + Args: + origin (str): + event (synapse.events.EventBase): + context (synapse.events.snapshot.EventContext): + auth_events (dict[(str, str)->synapse.events.EventBase]): Returns: defer.Deferred[None] """ - # Check if we have all the auth events. - current_state = set(e.event_id for e in auth_events.values()) event_auth_events = set(event.auth_event_ids()) if event.is_state(): @@ -2029,11 +2058,21 @@ def do_auth(self, origin, event, context, auth_events): else: event_key = None - if event_auth_events - current_state: + # if the event's auth_events refers to events which are not in our + # calculated auth_events, we need to fetch those events from somewhere. + # + # we start by fetching them from the store, and then try calling /event_auth/. + missing_auth = event_auth_events.difference( + e.event_id for e in auth_events.values() + ) + + if missing_auth: # TODO: can we use store.have_seen_events here instead? have_events = yield self.store.get_seen_events_with_rejections( - event_auth_events - current_state + missing_auth ) + logger.debug("Got events %s from store", have_events) + missing_auth.difference_update(have_events.keys()) else: have_events = {} @@ -2042,13 +2081,12 @@ def do_auth(self, origin, event, context, auth_events): for e in auth_events.values() }) - seen_events = set(have_events.keys()) - - missing_auth = event_auth_events - seen_events - current_state - if missing_auth: - logger.info("Missing auth: %s", missing_auth) # If we don't have all the auth events, we need to get them. + logger.info( + "auth_events contains unknown events: %s", + missing_auth, + ) try: remote_auth_chain = yield self.federation_client.get_event_auth( origin, event.room_id, event.event_id @@ -2089,145 +2127,168 @@ def do_auth(self, origin, event, context, auth_events): have_events = yield self.store.get_seen_events_with_rejections( event.auth_event_ids() ) - seen_events = set(have_events.keys()) except Exception: # FIXME: logger.exception("Failed to get auth chain") + if event.internal_metadata.is_outlier(): + logger.info("Skipping auth_event fetch for outlier") + return + # FIXME: Assumes we have and stored all the state for all the # prev_events - current_state = set(e.event_id for e in auth_events.values()) - different_auth = event_auth_events - current_state + different_auth = event_auth_events.difference( + e.event_id for e in auth_events.values() + ) - room_version = yield self.store.get_room_version(event.room_id) + if not different_auth: + return - if different_auth and not event.internal_metadata.is_outlier(): - # Do auth conflict res. - logger.info("Different auth: %s", different_auth) - - different_events = yield logcontext.make_deferred_yieldable( - defer.gatherResults([ - logcontext.run_in_background( - self.store.get_event, - d, - allow_none=True, - allow_rejected=False, - ) - for d in different_auth - if d in have_events and not have_events[d] - ], consumeErrors=True) - ).addErrback(unwrapFirstError) - - if different_events: - local_view = dict(auth_events) - remote_view = dict(auth_events) - remote_view.update({ - (d.type, d.state_key): d for d in different_events if d - }) + logger.info( + "auth_events refers to events which are not in our calculated auth " + "chain: %s", + different_auth, + ) + + room_version = yield self.store.get_room_version(event.room_id) - new_state = yield self.state_handler.resolve_events( - room_version, - [list(local_view.values()), list(remote_view.values())], - event + different_events = yield logcontext.make_deferred_yieldable( + defer.gatherResults([ + logcontext.run_in_background( + self.store.get_event, + d, + allow_none=True, + allow_rejected=False, ) + for d in different_auth + if d in have_events and not have_events[d] + ], consumeErrors=True) + ).addErrback(unwrapFirstError) + + if different_events: + local_view = dict(auth_events) + remote_view = dict(auth_events) + remote_view.update({ + (d.type, d.state_key): d for d in different_events if d + }) - auth_events.update(new_state) + new_state = yield self.state_handler.resolve_events( + room_version, + [list(local_view.values()), list(remote_view.values())], + event + ) - current_state = set(e.event_id for e in auth_events.values()) - different_auth = event_auth_events - current_state + logger.info( + "After state res: updating auth_events with new state %s", + { + (d.type, d.state_key): d.event_id for d in new_state.values() + if auth_events.get((d.type, d.state_key)) != d + }, + ) - yield self._update_context_for_auth_events( - event, context, auth_events, event_key, - ) + auth_events.update(new_state) + + different_auth = event_auth_events.difference( + e.event_id for e in auth_events.values() + ) - if different_auth and not event.internal_metadata.is_outlier(): - logger.info("Different auth after resolution: %s", different_auth) + yield self._update_context_for_auth_events( + event, context, auth_events, event_key, + ) - # Only do auth resolution if we have something new to say. - # We can't rove an auth failure. - do_resolution = False + if not different_auth: + # we're done + return - provable = [ - RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR, - ] + logger.info( + "auth_events still refers to events which are not in the calculated auth " + "chain after state resolution: %s", + different_auth, + ) - for e_id in different_auth: - if e_id in have_events: - if have_events[e_id] in provable: - do_resolution = True - break + # Only do auth resolution if we have something new to say. + # We can't prove an auth failure. + do_resolution = False - if do_resolution: - prev_state_ids = yield context.get_prev_state_ids(self.store) - # 1. Get what we think is the auth chain. - auth_ids = yield self.auth.compute_auth_events( - event, prev_state_ids - ) - local_auth_chain = yield self.store.get_auth_chain( - auth_ids, include_given=True - ) + for e_id in different_auth: + if e_id in have_events: + if have_events[e_id] == RejectedReason.NOT_ANCESTOR: + do_resolution = True + break - try: - # 2. Get remote difference. - result = yield self.federation_client.query_auth( - origin, - event.room_id, - event.event_id, - local_auth_chain, - ) + if not do_resolution: + logger.info( + "Skipping auth resolution due to lack of provable rejection reasons" + ) + return - seen_remotes = yield self.store.have_seen_events( - [e.event_id for e in result["auth_chain"]] - ) + logger.info("Doing auth resolution") - # 3. Process any remote auth chain events we haven't seen. - for ev in result["auth_chain"]: - if ev.event_id in seen_remotes: - continue + prev_state_ids = yield context.get_prev_state_ids(self.store) - if ev.event_id == event.event_id: - continue + # 1. Get what we think is the auth chain. + auth_ids = yield self.auth.compute_auth_events( + event, prev_state_ids + ) + local_auth_chain = yield self.store.get_auth_chain( + auth_ids, include_given=True + ) - try: - auth_ids = ev.auth_event_ids() - auth = { - (e.type, e.state_key): e - for e in result["auth_chain"] - if e.event_id in auth_ids - or event.type == EventTypes.Create - } - ev.internal_metadata.outlier = True + try: + # 2. Get remote difference. + result = yield self.federation_client.query_auth( + origin, + event.room_id, + event.event_id, + local_auth_chain, + ) - logger.debug( - "do_auth %s different_auth: %s", - event.event_id, e.event_id - ) + seen_remotes = yield self.store.have_seen_events( + [e.event_id for e in result["auth_chain"]] + ) - yield self._handle_new_event( - origin, ev, auth_events=auth - ) + # 3. Process any remote auth chain events we haven't seen. + for ev in result["auth_chain"]: + if ev.event_id in seen_remotes: + continue - if ev.event_id in event_auth_events: - auth_events[(ev.type, ev.state_key)] = ev - except AuthError: - pass + if ev.event_id == event.event_id: + continue - except Exception: - # FIXME: - logger.exception("Failed to query auth chain") + try: + auth_ids = ev.auth_event_ids() + auth = { + (e.type, e.state_key): e + for e in result["auth_chain"] + if e.event_id in auth_ids + or event.type == EventTypes.Create + } + ev.internal_metadata.outlier = True + + logger.debug( + "do_auth %s different_auth: %s", + event.event_id, e.event_id + ) - # 4. Look at rejects and their proofs. - # TODO. + yield self._handle_new_event( + origin, ev, auth_events=auth + ) - yield self._update_context_for_auth_events( - event, context, auth_events, event_key, - ) + if ev.event_id in event_auth_events: + auth_events[(ev.type, ev.state_key)] = ev + except AuthError: + pass - try: - self.auth.check(room_version, event, auth_events=auth_events) - except AuthError as e: - logger.warn("Failed auth resolution for %r because %s", event, e) - raise e + except Exception: + # FIXME: + logger.exception("Failed to query auth chain") + + # 4. Look at rejects and their proofs. + # TODO. + + yield self._update_context_for_auth_events( + event, context, auth_events, event_key, + ) @defer.inlineCallbacks def _update_context_for_auth_events(self, event, context, auth_events, diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 83ffae2132e4..21b353cad3fe 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -610,7 +610,7 @@ def f(txn): return res - return self.runInteraction("get_rejection_reasons", f) + return self.runInteraction("get_seen_events_with_rejections", f) def _get_total_state_event_counts_txn(self, txn, room_id): """ From 2e052110ee0bca17b8e27b6b48ee8b7c64bc94ae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 May 2019 11:45:39 +0100 Subject: [PATCH 016/231] Rewrite store_server_verify_key to store several keys at once (#5234) Storing server keys hammered the database a bit. This replaces the implementation which stored a single key, with one which can do many updates at once. --- changelog.d/5234.misc | 1 + synapse/crypto/keyring.py | 59 ++++++++------------------------ synapse/storage/keys.py | 65 +++++++++++++++++++++--------------- tests/crypto/test_keyring.py | 14 ++++++-- tests/storage/test_keys.py | 44 ++++++++++++++++-------- 5 files changed, 96 insertions(+), 87 deletions(-) create mode 100644 changelog.d/5234.misc diff --git a/changelog.d/5234.misc b/changelog.d/5234.misc new file mode 100644 index 000000000000..43fbd6f67c59 --- /dev/null +++ b/changelog.d/5234.misc @@ -0,0 +1 @@ +Rewrite store_server_verify_key to store several keys at once. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5cc98542ce4e..badb5254ea1a 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -453,10 +453,11 @@ def get_server_verify_key_v2_indirect( raise_from(KeyLookupError("Remote server returned an error"), e) keys = {} + added_keys = [] - responses = query_response["server_keys"] + time_now_ms = self.clock.time_msec() - for response in responses: + for response in query_response["server_keys"]: if ( u"signatures" not in response or perspective_name not in response[u"signatures"] @@ -492,21 +493,13 @@ def get_server_verify_key_v2_indirect( ) server_name = response["server_name"] + added_keys.extend( + (server_name, key_id, key) for key_id, key in processed_response.items() + ) keys.setdefault(server_name, {}).update(processed_response) - yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.store_keys, - server_name=server_name, - from_server=perspective_name, - verify_keys=response_keys, - ) - for server_name, response_keys in keys.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) + yield self.store.store_server_verify_keys( + perspective_name, time_now_ms, added_keys ) defer.returnValue(keys) @@ -519,6 +512,7 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): if requested_key_id in keys: continue + time_now_ms = self.clock.time_msec() try: response = yield self.client.get_json( destination=server_name, @@ -548,12 +542,13 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): requested_ids=[requested_key_id], response_json=response, ) - + yield self.store.store_server_verify_keys( + server_name, + time_now_ms, + ((server_name, key_id, key) for key_id, key in response_keys.items()), + ) keys.update(response_keys) - yield self.store_keys( - server_name=server_name, from_server=server_name, verify_keys=keys - ) defer.returnValue({server_name: keys}) @defer.inlineCallbacks @@ -650,32 +645,6 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): defer.returnValue(response_keys) - def store_keys(self, server_name, from_server, verify_keys): - """Store a collection of verify keys for a given server - Args: - server_name(str): The name of the server the keys are for. - from_server(str): The server the keys were downloaded from. - verify_keys(dict): A mapping of key_id to VerifyKey. - Returns: - A deferred that completes when the keys are stored. - """ - # TODO(markjh): Store whether the keys have expired. - return logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.store.store_server_verify_key, - server_name, - server_name, - key.time_added, - key, - ) - for key_id, key in verify_keys.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - @defer.inlineCallbacks def _handle_key_deferred(verify_request): diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 70365417921a..3c5f52009b3f 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -84,38 +84,51 @@ def _txn(txn): return self.runInteraction("get_server_verify_keys", _txn) - def store_server_verify_key( - self, server_name, from_server, time_now_ms, verify_key - ): - """Stores a NACL verification key for the given server. + def store_server_verify_keys(self, from_server, ts_added_ms, verify_keys): + """Stores NACL verification keys for remote servers. Args: - server_name (str): The name of the server. - from_server (str): Where the verification key was looked up - time_now_ms (int): The time now in milliseconds - verify_key (nacl.signing.VerifyKey): The NACL verify key. + from_server (str): Where the verification keys were looked up + ts_added_ms (int): The time to record that the key was added + verify_keys (iterable[tuple[str, str, nacl.signing.VerifyKey]]): + keys to be stored. Each entry is a triplet of + (server_name, key_id, key). """ - key_id = "%s:%s" % (verify_key.alg, verify_key.version) - - # XXX fix this to not need a lock (#3819) - def _txn(txn): - self._simple_upsert_txn( - txn, - table="server_signature_keys", - keyvalues={"server_name": server_name, "key_id": key_id}, - values={ - "from_server": from_server, - "ts_added_ms": time_now_ms, - "verify_key": db_binary_type(verify_key.encode()), - }, + key_values = [] + value_values = [] + invalidations = [] + for server_name, key_id, verify_key in verify_keys: + key_values.append((server_name, key_id)) + value_values.append( + ( + from_server, + ts_added_ms, + db_binary_type(verify_key.encode()), + ) ) # invalidate takes a tuple corresponding to the params of # _get_server_verify_key. _get_server_verify_key only takes one # param, which is itself the 2-tuple (server_name, key_id). - txn.call_after( - self._get_server_verify_key.invalidate, ((server_name, key_id),) - ) - - return self.runInteraction("store_server_verify_key", _txn) + invalidations.append((server_name, key_id)) + + def _invalidate(res): + f = self._get_server_verify_key.invalidate + for i in invalidations: + f((i, )) + return res + + return self.runInteraction( + "store_server_verify_keys", + self._simple_upsert_many_txn, + table="server_signature_keys", + key_names=("server_name", "key_id"), + key_values=key_values, + value_names=( + "from_server", + "ts_added_ms", + "verify_key", + ), + value_values=value_values, + ).addCallback(_invalidate) def store_server_keys_json( self, server_name, key_id, from_server, ts_now_ms, ts_expires_ms, key_json_bytes diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 3c79d4afe749..bcffe53a9187 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -192,8 +192,18 @@ def test_verify_json_for_server(self): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key(1) - r = self.hs.datastore.store_server_verify_key( - "server9", "", time.time() * 1000, signedjson.key.get_verify_key(key1) + key1_id = "%s:%s" % (key1.alg, key1.version) + + r = self.hs.datastore.store_server_verify_keys( + "server9", + time.time() * 1000, + [ + ( + "server9", + key1_id, + signedjson.key.get_verify_key(key1), + ), + ], ) self.get_success(r) json1 = {} diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 6bfaa00fe949..71ad7aee32fc 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -31,23 +31,32 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): def test_get_server_verify_keys(self): store = self.hs.get_datastore() - d = store.store_server_verify_key("server1", "from_server", 0, KEY_1) - self.get_success(d) - d = store.store_server_verify_key("server1", "from_server", 0, KEY_2) + key_id_1 = "ed25519:key1" + key_id_2 = "ed25519:KEY_ID_2" + d = store.store_server_verify_keys( + "from_server", + 10, + [ + ("server1", key_id_1, KEY_1), + ("server1", key_id_2, KEY_2), + ], + ) self.get_success(d) d = store.get_server_verify_keys( - [ - ("server1", "ed25519:key1"), - ("server1", "ed25519:key2"), - ("server1", "ed25519:key3"), - ] + [("server1", key_id_1), ("server1", key_id_2), ("server1", "ed25519:key3")] ) res = self.get_success(d) self.assertEqual(len(res.keys()), 3) - self.assertEqual(res[("server1", "ed25519:key1")].version, "key1") - self.assertEqual(res[("server1", "ed25519:key2")].version, "key2") + res1 = res[("server1", key_id_1)] + self.assertEqual(res1, KEY_1) + self.assertEqual(res1.version, "key1") + + res2 = res[("server1", key_id_2)] + self.assertEqual(res2, KEY_2) + # version comes from the ID it was stored with + self.assertEqual(res2.version, "KEY_ID_2") # non-existent result gives None self.assertIsNone(res[("server1", "ed25519:key3")]) @@ -60,9 +69,14 @@ def test_cache(self): key_id_1 = "ed25519:key1" key_id_2 = "ed25519:key2" - d = store.store_server_verify_key("srv1", "from_server", 0, KEY_1) - self.get_success(d) - d = store.store_server_verify_key("srv1", "from_server", 0, KEY_2) + d = store.store_server_verify_keys( + "from_server", + 0, + [ + ("srv1", key_id_1, KEY_1), + ("srv1", key_id_2, KEY_2), + ], + ) self.get_success(d) d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) @@ -81,7 +95,9 @@ def test_cache(self): new_key_2 = signedjson.key.get_verify_key( signedjson.key.generate_signing_key("key2") ) - d = store.store_server_verify_key("srv1", "from_server", 10, new_key_2) + d = store.store_server_verify_keys( + "from_server", 10, [("srv1", key_id_2, new_key_2)] + ) self.get_success(d) d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) From cc187f933796a8151f0d11c03ef6faa8d1949256 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 May 2019 11:46:05 +0100 Subject: [PATCH 017/231] Remove unused VerifyKey.expired and .time_added fields (#5235) These were never used, and poking arbitary data into objects from other packages seems confusing at best. --- changelog.d/5235.misc | 1 + synapse/crypto/keyring.py | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) create mode 100644 changelog.d/5235.misc diff --git a/changelog.d/5235.misc b/changelog.d/5235.misc new file mode 100644 index 000000000000..2296ad2a4f46 --- /dev/null +++ b/changelog.d/5235.misc @@ -0,0 +1 @@ +Remove unused VerifyKey.expired and .time_added fields. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index badb5254ea1a..ea910a2f215f 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -589,7 +589,6 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_key.time_added = time_now_ms verify_keys[key_id] = verify_key old_verify_keys = {} @@ -598,8 +597,6 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_key.expired = key_data["expired_ts"] - verify_key.time_added = time_now_ms old_verify_keys[key_id] = verify_key server_name = response_json["server_name"] From 84660d91b21eb3357ee0287319bc7f50e2222b21 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 May 2019 11:51:39 +0100 Subject: [PATCH 018/231] Simplify process_v2_response (#5236) * Pass time_added_ms into process_v2_response * Simplify process_v2_response We can merge old_verify_keys into verify_keys, and reduce the number of dicts flying around. --- changelog.d/5236.misc | 1 + synapse/crypto/keyring.py | 50 +++++++++++++++++++++++---------------- 2 files changed, 30 insertions(+), 21 deletions(-) create mode 100644 changelog.d/5236.misc diff --git a/changelog.d/5236.misc b/changelog.d/5236.misc new file mode 100644 index 000000000000..cb4417a9f45f --- /dev/null +++ b/changelog.d/5236.misc @@ -0,0 +1 @@ +Simplify Keyring.process_v2_response. \ No newline at end of file diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index ea910a2f215f..9d629b2238d4 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -489,7 +489,7 @@ def get_server_verify_key_v2_indirect( ) processed_response = yield self.process_v2_response( - perspective_name, response + perspective_name, response, time_added_ms=time_now_ms ) server_name = response["server_name"] @@ -541,6 +541,7 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): from_server=server_name, requested_ids=[requested_key_id], response_json=response, + time_added_ms=time_now_ms, ) yield self.store.store_server_verify_keys( server_name, @@ -552,7 +553,9 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): defer.returnValue({server_name: keys}) @defer.inlineCallbacks - def process_v2_response(self, from_server, response_json, requested_ids=[]): + def process_v2_response( + self, from_server, response_json, time_added_ms, requested_ids=[] + ): """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from @@ -573,6 +576,8 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): response_json (dict): the json-decoded Server Keys response object + time_added_ms (int): the timestamp to record in server_keys_json + requested_ids (iterable[str]): a list of the key IDs that were requested. We will store the json for these key ids as well as any that are actually in the response @@ -581,8 +586,9 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): Deferred[dict[str, nacl.signing.VerifyKey]]: map from key_id to key object """ - time_now_ms = self.clock.time_msec() - response_keys = {} + + # start by extracting the keys from the response, since they may be required + # to validate the signature on the response. verify_keys = {} for key_id, key_data in response_json["verify_keys"].items(): if is_signing_algorithm_supported(key_id): @@ -591,23 +597,27 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_keys[key_id] = verify_key - old_verify_keys = {} + # TODO: improve this signature checking + server_name = response_json["server_name"] + for key_id in response_json["signatures"].get(server_name, {}): + if key_id not in verify_keys: + raise KeyLookupError( + "Key response must include verification keys for all signatures" + ) + + verify_signed_json( + response_json, server_name, verify_keys[key_id] + ) + for key_id, key_data in response_json["old_verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - old_verify_keys[key_id] = verify_key - - server_name = response_json["server_name"] - for key_id in response_json["signatures"].get(server_name, {}): - if key_id not in response_json["verify_keys"]: - raise KeyLookupError( - "Key response must include verification keys for all" " signatures" - ) - if key_id in verify_keys: - verify_signed_json(response_json, server_name, verify_keys[key_id]) + verify_keys[key_id] = verify_key + # re-sign the json with our own key, so that it is ready if we are asked to + # give it out as a notary server signed_key_json = sign_json( response_json, self.config.server_name, self.config.signing_key[0] ) @@ -615,12 +625,10 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): signed_key_json_bytes = encode_canonical_json(signed_key_json) ts_valid_until_ms = signed_key_json[u"valid_until_ts"] + # for reasons I don't quite understand, we store this json for the key ids we + # requested, as well as those we got. updated_key_ids = set(requested_ids) updated_key_ids.update(verify_keys) - updated_key_ids.update(old_verify_keys) - - response_keys.update(verify_keys) - response_keys.update(old_verify_keys) yield logcontext.make_deferred_yieldable( defer.gatherResults( @@ -630,7 +638,7 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): server_name=server_name, key_id=key_id, from_server=from_server, - ts_now_ms=time_now_ms, + ts_now_ms=time_added_ms, ts_expires_ms=ts_valid_until_ms, key_json_bytes=signed_key_json_bytes, ) @@ -640,7 +648,7 @@ def process_v2_response(self, from_server, response_json, requested_ids=[]): ).addErrback(unwrapFirstError) ) - defer.returnValue(response_keys) + defer.returnValue(verify_keys) @defer.inlineCallbacks From b75537beaf841089f9f07c9dbed04a7a420a8b1f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 3 Apr 2019 18:10:24 +0100 Subject: [PATCH 019/231] Store key validity time in the storage layer This is a first step to checking that the key is valid at the required moment. The idea here is that, rather than passing VerifyKey objects in and out of the storage layer, we instead pass FetchKeyResult objects, which simply wrap the VerifyKey and add a valid_until_ts field. --- changelog.d/5237.misc | 1 + synapse/crypto/keyring.py | 47 +++++++++++++------ synapse/storage/keys.py | 31 ++++++++---- .../delta/54/add_validity_to_server_keys.sql | 23 +++++++++ tests/crypto/test_keyring.py | 22 +++++---- tests/storage/test_keys.py | 44 +++++++++++------ 6 files changed, 122 insertions(+), 46 deletions(-) create mode 100644 changelog.d/5237.misc create mode 100644 synapse/storage/schema/delta/54/add_validity_to_server_keys.sql diff --git a/changelog.d/5237.misc b/changelog.d/5237.misc new file mode 100644 index 000000000000..f4fe3b821bf6 --- /dev/null +++ b/changelog.d/5237.misc @@ -0,0 +1 @@ +Store key validity time in the storage layer. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 9d629b2238d4..14a27288fd4c 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -20,7 +20,6 @@ from six import raise_from from six.moves import urllib -import nacl.signing from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -43,6 +42,7 @@ RequestSendFailed, SynapseError, ) +from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext, unwrapFirstError from synapse.util.logcontext import ( LoggingContext, @@ -307,11 +307,15 @@ def do_iterations(): # complete this VerifyKeyRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: - key = result_keys.get(key_id) - if key: + fetch_key_result = result_keys.get(key_id) + if fetch_key_result: with PreserveLoggingContext(): verify_request.deferred.callback( - (server_name, key_id, key) + ( + server_name, + key_id, + fetch_key_result.verify_key, + ) ) break else: @@ -348,12 +352,12 @@ def on_err(err): def get_keys_from_store(self, server_name_and_key_ids): """ Args: - server_name_and_key_ids (iterable(Tuple[str, iterable[str]]): + server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): list of (server_name, iterable[key_id]) tuples to fetch keys for Returns: - Deferred: resolves to dict[str, dict[str, VerifyKey|None]]: map from - server_name -> key_id -> VerifyKey + Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: + map from server_name -> key_id -> FetchKeyResult """ keys_to_fetch = ( (server_name, key_id) @@ -430,6 +434,18 @@ def get_keys_from_server(self, server_name_and_key_ids): def get_server_verify_key_v2_indirect( self, server_names_and_key_ids, perspective_name, perspective_keys ): + """ + Args: + server_names_and_key_ids (iterable[Tuple[str, iterable[str]]]): + list of (server_name, iterable[key_id]) tuples to fetch keys for + perspective_name (str): name of the notary server to query for the keys + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the + notary server + + Returns: + Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map + from server_name -> key_id -> FetchKeyResult + """ # TODO(mark): Set the minimum_valid_until_ts to that needed by # the events being validated or the current time if validating # an incoming request. @@ -506,7 +522,7 @@ def get_server_verify_key_v2_indirect( @defer.inlineCallbacks def get_server_verify_key_v2_direct(self, server_name, key_ids): - keys = {} # type: dict[str, nacl.signing.VerifyKey] + keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: if requested_key_id in keys: @@ -583,9 +599,9 @@ def process_v2_response( actually in the response Returns: - Deferred[dict[str, nacl.signing.VerifyKey]]: - map from key_id to key object + Deferred[dict[str, FetchKeyResult]]: map from key_id to result object """ + ts_valid_until_ms = response_json[u"valid_until_ts"] # start by extracting the keys from the response, since they may be required # to validate the signature on the response. @@ -595,7 +611,9 @@ def process_v2_response( key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_keys[key_id] = verify_key + verify_keys[key_id] = FetchKeyResult( + verify_key=verify_key, valid_until_ts=ts_valid_until_ms + ) # TODO: improve this signature checking server_name = response_json["server_name"] @@ -606,7 +624,7 @@ def process_v2_response( ) verify_signed_json( - response_json, server_name, verify_keys[key_id] + response_json, server_name, verify_keys[key_id].verify_key ) for key_id, key_data in response_json["old_verify_keys"].items(): @@ -614,7 +632,9 @@ def process_v2_response( key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_keys[key_id] = verify_key + verify_keys[key_id] = FetchKeyResult( + verify_key=verify_key, valid_until_ts=key_data["expired_ts"] + ) # re-sign the json with our own key, so that it is ready if we are asked to # give it out as a notary server @@ -623,7 +643,6 @@ def process_v2_response( ) signed_key_json_bytes = encode_canonical_json(signed_key_json) - ts_valid_until_ms = signed_key_json[u"valid_until_ts"] # for reasons I don't quite understand, we store this json for the key ids we # requested, as well as those we got. diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 3c5f52009b3f..5300720dbb87 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -19,6 +19,7 @@ import six +import attr from signedjson.key import decode_verify_key_bytes from synapse.util import batch_iter @@ -36,6 +37,12 @@ db_binary_type = memoryview +@attr.s(slots=True, frozen=True) +class FetchKeyResult(object): + verify_key = attr.ib() # VerifyKey: the key itself + valid_until_ts = attr.ib() # int: how long we can use this key for + + class KeyStore(SQLBaseStore): """Persistence for signature verification keys """ @@ -54,8 +61,8 @@ def get_server_verify_keys(self, server_name_and_key_ids): iterable of (server_name, key-id) tuples to fetch keys for Returns: - Deferred: resolves to dict[Tuple[str, str], VerifyKey|None]: - map from (server_name, key_id) -> VerifyKey, or None if the key is + Deferred: resolves to dict[Tuple[str, str], FetchKeyResult|None]: + map from (server_name, key_id) -> FetchKeyResult, or None if the key is unknown """ keys = {} @@ -65,17 +72,19 @@ def _get_keys(txn, batch): # batch_iter always returns tuples so it's safe to do len(batch) sql = ( - "SELECT server_name, key_id, verify_key FROM server_signature_keys " - "WHERE 1=0" + "SELECT server_name, key_id, verify_key, ts_valid_until_ms " + "FROM server_signature_keys WHERE 1=0" ) + " OR (server_name=? AND key_id=?)" * len(batch) txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) for row in txn: - server_name, key_id, key_bytes = row - keys[(server_name, key_id)] = decode_verify_key_bytes( - key_id, bytes(key_bytes) + server_name, key_id, key_bytes, ts_valid_until_ms = row + res = FetchKeyResult( + verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)), + valid_until_ts=ts_valid_until_ms, ) + keys[(server_name, key_id)] = res def _txn(txn): for batch in batch_iter(server_name_and_key_ids, 50): @@ -89,20 +98,21 @@ def store_server_verify_keys(self, from_server, ts_added_ms, verify_keys): Args: from_server (str): Where the verification keys were looked up ts_added_ms (int): The time to record that the key was added - verify_keys (iterable[tuple[str, str, nacl.signing.VerifyKey]]): + verify_keys (iterable[tuple[str, str, FetchKeyResult]]): keys to be stored. Each entry is a triplet of (server_name, key_id, key). """ key_values = [] value_values = [] invalidations = [] - for server_name, key_id, verify_key in verify_keys: + for server_name, key_id, fetch_result in verify_keys: key_values.append((server_name, key_id)) value_values.append( ( from_server, ts_added_ms, - db_binary_type(verify_key.encode()), + fetch_result.valid_until_ts, + db_binary_type(fetch_result.verify_key.encode()), ) ) # invalidate takes a tuple corresponding to the params of @@ -125,6 +135,7 @@ def _invalidate(res): value_names=( "from_server", "ts_added_ms", + "ts_valid_until_ms", "verify_key", ), value_values=value_values, diff --git a/synapse/storage/schema/delta/54/add_validity_to_server_keys.sql b/synapse/storage/schema/delta/54/add_validity_to_server_keys.sql new file mode 100644 index 000000000000..c01aa9d2d90b --- /dev/null +++ b/synapse/storage/schema/delta/54/add_validity_to_server_keys.sql @@ -0,0 +1,23 @@ +/* Copyright 2019 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* When we can use this key until, before we have to refresh it. */ +ALTER TABLE server_signature_keys ADD COLUMN ts_valid_until_ms BIGINT; + +UPDATE server_signature_keys SET ts_valid_until_ms = ( + SELECT MAX(ts_valid_until_ms) FROM server_keys_json skj WHERE + skj.server_name = server_signature_keys.server_name AND + skj.key_id = server_signature_keys.key_id +); diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index bcffe53a9187..83de32b05d33 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -25,6 +25,7 @@ from synapse.api.errors import SynapseError from synapse.crypto import keyring from synapse.crypto.keyring import KeyLookupError +from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -201,7 +202,7 @@ def test_verify_json_for_server(self): ( "server9", key1_id, - signedjson.key.get_verify_key(key1), + FetchKeyResult(signedjson.key.get_verify_key(key1), 1000), ), ], ) @@ -251,9 +252,10 @@ def get_json(destination, path, **kwargs): server_name_and_key_ids = [(SERVER_NAME, ("key1",))] keys = self.get_success(kr.get_keys_from_server(server_name_and_key_ids)) k = keys[SERVER_NAME][testverifykey_id] - self.assertEqual(k, testverifykey) - self.assertEqual(k.alg, "ed25519") - self.assertEqual(k.version, "ver1") + self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) + self.assertEqual(k.verify_key, testverifykey) + self.assertEqual(k.verify_key.alg, "ed25519") + self.assertEqual(k.verify_key.version, "ver1") # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) @@ -321,9 +323,10 @@ def post_json(destination, path, data, **kwargs): keys = self.get_success(kr.get_keys_from_perspectives(server_name_and_key_ids)) self.assertIn(SERVER_NAME, keys) k = keys[SERVER_NAME][testverifykey_id] - self.assertEqual(k, testverifykey) - self.assertEqual(k.alg, "ed25519") - self.assertEqual(k.version, "ver1") + self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) + self.assertEqual(k.verify_key, testverifykey) + self.assertEqual(k.verify_key.alg, "ed25519") + self.assertEqual(k.verify_key.version, "ver1") # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) @@ -346,7 +349,10 @@ def post_json(destination, path, data, **kwargs): @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): - with LoggingContext("testctx"): + with LoggingContext("testctx") as ctx: + # we set the "request" prop to make it easier to follow what's going on in the + # logs. + ctx.request = "testctx" rv = yield f(*args, **kwargs) defer.returnValue(rv) diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 71ad7aee32fc..e07ff0120173 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -17,6 +17,8 @@ from twisted.internet.defer import Deferred +from synapse.storage.keys import FetchKeyResult + import tests.unittest KEY_1 = signedjson.key.decode_verify_key_base64( @@ -37,8 +39,8 @@ def test_get_server_verify_keys(self): "from_server", 10, [ - ("server1", key_id_1, KEY_1), - ("server1", key_id_2, KEY_2), + ("server1", key_id_1, FetchKeyResult(KEY_1, 100)), + ("server1", key_id_2, FetchKeyResult(KEY_2, 200)), ], ) self.get_success(d) @@ -50,13 +52,15 @@ def test_get_server_verify_keys(self): self.assertEqual(len(res.keys()), 3) res1 = res[("server1", key_id_1)] - self.assertEqual(res1, KEY_1) - self.assertEqual(res1.version, "key1") + self.assertEqual(res1.verify_key, KEY_1) + self.assertEqual(res1.verify_key.version, "key1") + self.assertEqual(res1.valid_until_ts, 100) res2 = res[("server1", key_id_2)] - self.assertEqual(res2, KEY_2) + self.assertEqual(res2.verify_key, KEY_2) # version comes from the ID it was stored with - self.assertEqual(res2.version, "KEY_ID_2") + self.assertEqual(res2.verify_key.version, "KEY_ID_2") + self.assertEqual(res2.valid_until_ts, 200) # non-existent result gives None self.assertIsNone(res[("server1", "ed25519:key3")]) @@ -73,8 +77,8 @@ def test_cache(self): "from_server", 0, [ - ("srv1", key_id_1, KEY_1), - ("srv1", key_id_2, KEY_2), + ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)), + ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)), ], ) self.get_success(d) @@ -82,26 +86,38 @@ def test_cache(self): d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) res = self.get_success(d) self.assertEqual(len(res.keys()), 2) - self.assertEqual(res[("srv1", key_id_1)], KEY_1) - self.assertEqual(res[("srv1", key_id_2)], KEY_2) + + res1 = res[("srv1", key_id_1)] + self.assertEqual(res1.verify_key, KEY_1) + self.assertEqual(res1.valid_until_ts, 100) + + res2 = res[("srv1", key_id_2)] + self.assertEqual(res2.verify_key, KEY_2) + self.assertEqual(res2.valid_until_ts, 200) # we should be able to look up the same thing again without a db hit res = store.get_server_verify_keys([("srv1", key_id_1)]) if isinstance(res, Deferred): res = self.successResultOf(res) self.assertEqual(len(res.keys()), 1) - self.assertEqual(res[("srv1", key_id_1)], KEY_1) + self.assertEqual(res[("srv1", key_id_1)].verify_key, KEY_1) new_key_2 = signedjson.key.get_verify_key( signedjson.key.generate_signing_key("key2") ) d = store.store_server_verify_keys( - "from_server", 10, [("srv1", key_id_2, new_key_2)] + "from_server", 10, [("srv1", key_id_2, FetchKeyResult(new_key_2, 300))] ) self.get_success(d) d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) res = self.get_success(d) self.assertEqual(len(res.keys()), 2) - self.assertEqual(res[("srv1", key_id_1)], KEY_1) - self.assertEqual(res[("srv1", key_id_2)], new_key_2) + + res1 = res[("srv1", key_id_1)] + self.assertEqual(res1.verify_key, KEY_1) + self.assertEqual(res1.valid_until_ts, 100) + + res2 = res[("srv1", key_id_2)] + self.assertEqual(res2.verify_key, new_key_2) + self.assertEqual(res2.valid_until_ts, 300) From 895b79ac2ece74500fb8a4ea158a6aec2adc0856 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 9 Apr 2019 18:28:17 +0100 Subject: [PATCH 020/231] Factor out KeyFetchers from KeyRing Rather than have three methods which have to have the same interface, factor out a separate interface which is provided by three implementations. I find it easier to grok the code this way. --- changelog.d/5244.misc | 1 + synapse/crypto/keyring.py | 315 +++++++++++++++++++---------------- tests/crypto/test_keyring.py | 34 +++- 3 files changed, 204 insertions(+), 146 deletions(-) create mode 100644 changelog.d/5244.misc diff --git a/changelog.d/5244.misc b/changelog.d/5244.misc new file mode 100644 index 000000000000..9cc1fb869de0 --- /dev/null +++ b/changelog.d/5244.misc @@ -0,0 +1 @@ +Refactor synapse.crypto.keyring to use a KeyFetcher interface. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 14a27288fd4c..eaf41b983c11 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -80,12 +80,13 @@ class KeyLookupError(ValueError): class Keyring(object): def __init__(self, hs): - self.store = hs.get_datastore() self.clock = hs.get_clock() - self.client = hs.get_http_client() - self.config = hs.get_config() - self.perspective_servers = self.config.perspectives - self.hs = hs + + self._key_fetchers = ( + StoreKeyFetcher(hs), + PerspectivesKeyFetcher(hs), + ServerKeyFetcher(hs), + ) # map from server name to Deferred. Has an entry for each server with # an ongoing key download; the Deferred completes once the download @@ -271,13 +272,6 @@ def _get_server_verify_keys(self, verify_requests): verify_requests (list[VerifyKeyRequest]): list of verify requests """ - # These are functions that produce keys given a list of key ids - key_fetch_fns = ( - self.get_keys_from_store, # First try the local store - self.get_keys_from_perspectives, # Then try via perspectives - self.get_keys_from_server, # Then try directly - ) - @defer.inlineCallbacks def do_iterations(): with Measure(self.clock, "get_server_verify_keys"): @@ -288,8 +282,8 @@ def do_iterations(): verify_request.key_ids ) - for fn in key_fetch_fns: - results = yield fn(missing_keys.items()) + for f in self._key_fetchers: + results = yield f.get_keys(missing_keys.items()) # We now need to figure out which verify requests we have keys # for and which we don't @@ -348,8 +342,9 @@ def on_err(err): run_in_background(do_iterations).addErrback(on_err) - @defer.inlineCallbacks - def get_keys_from_store(self, server_name_and_key_ids): + +class KeyFetcher(object): + def get_keys(self, server_name_and_key_ids): """ Args: server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): @@ -359,6 +354,18 @@ def get_keys_from_store(self, server_name_and_key_ids): Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: map from server_name -> key_id -> FetchKeyResult """ + raise NotImplementedError + + +class StoreKeyFetcher(KeyFetcher): + """KeyFetcher impl which fetches keys from our data store""" + + def __init__(self, hs): + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def get_keys(self, server_name_and_key_ids): + """see KeyFetcher.get_keys""" keys_to_fetch = ( (server_name, key_id) for server_name, key_ids in server_name_and_key_ids @@ -370,8 +377,127 @@ def get_keys_from_store(self, server_name_and_key_ids): keys.setdefault(server_name, {})[key_id] = key defer.returnValue(keys) + +class BaseV2KeyFetcher(object): + def __init__(self, hs): + self.store = hs.get_datastore() + self.config = hs.get_config() + + @defer.inlineCallbacks + def process_v2_response( + self, from_server, response_json, time_added_ms, requested_ids=[] + ): + """Parse a 'Server Keys' structure from the result of a /key request + + This is used to parse either the entirety of the response from + GET /_matrix/key/v2/server, or a single entry from the list returned by + POST /_matrix/key/v2/query. + + Checks that each signature in the response that claims to come from the origin + server is valid. (Does not check that there actually is such a signature, for + some reason.) + + Stores the json in server_keys_json so that it can be used for future responses + to /_matrix/key/v2/query. + + Args: + from_server (str): the name of the server producing this result: either + the origin server for a /_matrix/key/v2/server request, or the notary + for a /_matrix/key/v2/query. + + response_json (dict): the json-decoded Server Keys response object + + time_added_ms (int): the timestamp to record in server_keys_json + + requested_ids (iterable[str]): a list of the key IDs that were requested. + We will store the json for these key ids as well as any that are + actually in the response + + Returns: + Deferred[dict[str, FetchKeyResult]]: map from key_id to result object + """ + ts_valid_until_ms = response_json[u"valid_until_ts"] + + # start by extracting the keys from the response, since they may be required + # to validate the signature on the response. + verify_keys = {} + for key_id, key_data in response_json["verify_keys"].items(): + if is_signing_algorithm_supported(key_id): + key_base64 = key_data["key"] + key_bytes = decode_base64(key_base64) + verify_key = decode_verify_key_bytes(key_id, key_bytes) + verify_keys[key_id] = FetchKeyResult( + verify_key=verify_key, valid_until_ts=ts_valid_until_ms + ) + + # TODO: improve this signature checking + server_name = response_json["server_name"] + for key_id in response_json["signatures"].get(server_name, {}): + if key_id not in verify_keys: + raise KeyLookupError( + "Key response must include verification keys for all signatures" + ) + + verify_signed_json( + response_json, server_name, verify_keys[key_id].verify_key + ) + + for key_id, key_data in response_json["old_verify_keys"].items(): + if is_signing_algorithm_supported(key_id): + key_base64 = key_data["key"] + key_bytes = decode_base64(key_base64) + verify_key = decode_verify_key_bytes(key_id, key_bytes) + verify_keys[key_id] = FetchKeyResult( + verify_key=verify_key, valid_until_ts=key_data["expired_ts"] + ) + + # re-sign the json with our own key, so that it is ready if we are asked to + # give it out as a notary server + signed_key_json = sign_json( + response_json, self.config.server_name, self.config.signing_key[0] + ) + + signed_key_json_bytes = encode_canonical_json(signed_key_json) + + # for reasons I don't quite understand, we store this json for the key ids we + # requested, as well as those we got. + updated_key_ids = set(requested_ids) + updated_key_ids.update(verify_keys) + + yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.store.store_server_keys_json, + server_name=server_name, + key_id=key_id, + from_server=from_server, + ts_now_ms=time_added_ms, + ts_expires_ms=ts_valid_until_ms, + key_json_bytes=signed_key_json_bytes, + ) + for key_id in updated_key_ids + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) + + defer.returnValue(verify_keys) + + +class PerspectivesKeyFetcher(BaseV2KeyFetcher): + """KeyFetcher impl which fetches keys from the "perspectives" servers""" + + def __init__(self, hs): + super(PerspectivesKeyFetcher, self).__init__(hs) + self.clock = hs.get_clock() + self.client = hs.get_http_client() + self.perspective_servers = self.config.perspectives + @defer.inlineCallbacks - def get_keys_from_perspectives(self, server_name_and_key_ids): + def get_keys(self, server_name_and_key_ids): + """see KeyFetcher.get_keys""" + @defer.inlineCallbacks def get_key(perspective_name, perspective_keys): try: @@ -408,28 +534,6 @@ def get_key(perspective_name, perspective_keys): defer.returnValue(union_of_keys) - @defer.inlineCallbacks - def get_keys_from_server(self, server_name_and_key_ids): - results = yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.get_server_verify_key_v2_direct, server_name, key_ids - ) - for server_name, key_ids in server_name_and_key_ids - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - - merged = {} - for result in results: - merged.update(result) - - defer.returnValue( - {server_name: keys for server_name, keys in merged.items() if keys} - ) - @defer.inlineCallbacks def get_server_verify_key_v2_indirect( self, server_names_and_key_ids, perspective_name, perspective_keys @@ -520,6 +624,38 @@ def get_server_verify_key_v2_indirect( defer.returnValue(keys) + +class ServerKeyFetcher(BaseV2KeyFetcher): + """KeyFetcher impl which fetches keys from the origin servers""" + + def __init__(self, hs): + super(ServerKeyFetcher, self).__init__(hs) + self.clock = hs.get_clock() + self.client = hs.get_http_client() + + @defer.inlineCallbacks + def get_keys(self, server_name_and_key_ids): + """see KeyFetcher.get_keys""" + results = yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.get_server_verify_key_v2_direct, server_name, key_ids + ) + for server_name, key_ids in server_name_and_key_ids + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) + + merged = {} + for result in results: + merged.update(result) + + defer.returnValue( + {server_name: keys for server_name, keys in merged.items() if keys} + ) + @defer.inlineCallbacks def get_server_verify_key_v2_direct(self, server_name, key_ids): keys = {} # type: dict[str, FetchKeyResult] @@ -568,107 +704,6 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): defer.returnValue({server_name: keys}) - @defer.inlineCallbacks - def process_v2_response( - self, from_server, response_json, time_added_ms, requested_ids=[] - ): - """Parse a 'Server Keys' structure from the result of a /key request - - This is used to parse either the entirety of the response from - GET /_matrix/key/v2/server, or a single entry from the list returned by - POST /_matrix/key/v2/query. - - Checks that each signature in the response that claims to come from the origin - server is valid. (Does not check that there actually is such a signature, for - some reason.) - - Stores the json in server_keys_json so that it can be used for future responses - to /_matrix/key/v2/query. - - Args: - from_server (str): the name of the server producing this result: either - the origin server for a /_matrix/key/v2/server request, or the notary - for a /_matrix/key/v2/query. - - response_json (dict): the json-decoded Server Keys response object - - time_added_ms (int): the timestamp to record in server_keys_json - - requested_ids (iterable[str]): a list of the key IDs that were requested. - We will store the json for these key ids as well as any that are - actually in the response - - Returns: - Deferred[dict[str, FetchKeyResult]]: map from key_id to result object - """ - ts_valid_until_ms = response_json[u"valid_until_ts"] - - # start by extracting the keys from the response, since they may be required - # to validate the signature on the response. - verify_keys = {} - for key_id, key_data in response_json["verify_keys"].items(): - if is_signing_algorithm_supported(key_id): - key_base64 = key_data["key"] - key_bytes = decode_base64(key_base64) - verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_keys[key_id] = FetchKeyResult( - verify_key=verify_key, valid_until_ts=ts_valid_until_ms - ) - - # TODO: improve this signature checking - server_name = response_json["server_name"] - for key_id in response_json["signatures"].get(server_name, {}): - if key_id not in verify_keys: - raise KeyLookupError( - "Key response must include verification keys for all signatures" - ) - - verify_signed_json( - response_json, server_name, verify_keys[key_id].verify_key - ) - - for key_id, key_data in response_json["old_verify_keys"].items(): - if is_signing_algorithm_supported(key_id): - key_base64 = key_data["key"] - key_bytes = decode_base64(key_base64) - verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_keys[key_id] = FetchKeyResult( - verify_key=verify_key, valid_until_ts=key_data["expired_ts"] - ) - - # re-sign the json with our own key, so that it is ready if we are asked to - # give it out as a notary server - signed_key_json = sign_json( - response_json, self.config.server_name, self.config.signing_key[0] - ) - - signed_key_json_bytes = encode_canonical_json(signed_key_json) - - # for reasons I don't quite understand, we store this json for the key ids we - # requested, as well as those we got. - updated_key_ids = set(requested_ids) - updated_key_ids.update(verify_keys) - - yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.store.store_server_keys_json, - server_name=server_name, - key_id=key_id, - from_server=from_server, - ts_now_ms=time_added_ms, - ts_expires_ms=ts_valid_until_ms, - key_json_bytes=signed_key_json_bytes, - ) - for key_id in updated_key_ids - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - - defer.returnValue(verify_keys) - @defer.inlineCallbacks def _handle_key_deferred(verify_request): diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 83de32b05d33..de61bad15d27 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -24,7 +24,11 @@ from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.crypto.keyring import KeyLookupError +from synapse.crypto.keyring import ( + KeyLookupError, + PerspectivesKeyFetcher, + ServerKeyFetcher, +) from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -218,12 +222,19 @@ def test_verify_json_for_server(self): self.assertFalse(d.called) self.get_success(d) + +class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + self.http_client = Mock() + hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) + return hs + def test_get_keys_from_server(self): # arbitrarily advance the clock a bit self.reactor.advance(100) SERVER_NAME = "server2" - kr = keyring.Keyring(self.hs) + fetcher = ServerKeyFetcher(self.hs) testkey = signedjson.key.generate_signing_key("ver1") testverifykey = signedjson.key.get_verify_key(testkey) testverifykey_id = "ed25519:ver1" @@ -250,7 +261,7 @@ def get_json(destination, path, **kwargs): self.http_client.get_json.side_effect = get_json server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(kr.get_keys_from_server(server_name_and_key_ids)) + keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) @@ -278,15 +289,26 @@ def get_json(destination, path, **kwargs): # change the server name: it should cause a rejection response["server_name"] = "OTHER_SERVER" self.get_failure( - kr.get_keys_from_server(server_name_and_key_ids), KeyLookupError + fetcher.get_keys(server_name_and_key_ids), KeyLookupError ) + +class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + self.mock_perspective_server = MockPerspectiveServer() + self.http_client = Mock() + hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) + keys = self.mock_perspective_server.get_verify_keys() + hs.config.perspectives = {self.mock_perspective_server.server_name: keys} + return hs + def test_get_keys_from_perspectives(self): # arbitrarily advance the clock a bit self.reactor.advance(100) + fetcher = PerspectivesKeyFetcher(self.hs) + SERVER_NAME = "server2" - kr = keyring.Keyring(self.hs) testkey = signedjson.key.generate_signing_key("ver1") testverifykey = signedjson.key.get_verify_key(testkey) testverifykey_id = "ed25519:ver1" @@ -320,7 +342,7 @@ def post_json(destination, path, data, **kwargs): self.http_client.post_json.side_effect = post_json server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(kr.get_keys_from_perspectives(server_name_and_key_ids)) + keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) self.assertIn(SERVER_NAME, keys) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) From ec24108cc2e937f49908df4c78f5cee9f81e0834 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 23 May 2019 14:52:13 +0100 Subject: [PATCH 021/231] Fix remote_key_resource --- synapse/rest/key/v2/remote_key_resource.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index eb8782aa6e1a..21c3c807b9d4 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -20,7 +20,7 @@ from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError -from synapse.crypto.keyring import KeyLookupError +from synapse.crypto.keyring import KeyLookupError, ServerKeyFetcher from synapse.http.server import respond_with_json_bytes, wrap_json_request_handler from synapse.http.servlet import parse_integer, parse_json_object_from_request @@ -89,7 +89,7 @@ class RemoteKey(Resource): isLeaf = True def __init__(self, hs): - self.keyring = hs.get_keyring() + self.fetcher = ServerKeyFetcher(hs) self.store = hs.get_datastore() self.clock = hs.get_clock() self.federation_domain_whitelist = hs.config.federation_domain_whitelist @@ -217,7 +217,7 @@ def query_keys(self, request, query, query_remote_on_cache_miss=False): if cache_misses and query_remote_on_cache_miss: for server_name, key_ids in cache_misses.items(): try: - yield self.keyring.get_server_verify_key_v2_direct( + yield self.fetcher.get_server_verify_key_v2_direct( server_name, key_ids ) except KeyLookupError as e: From 6368150a748e9303f34948873af360d8a62347b6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 23 May 2019 15:00:20 +0100 Subject: [PATCH 022/231] Add config option for setting homeserver's default room version (#5223) Replaces DEFAULT_ROOM_VERSION constant with a method that first checks the config, then returns a hardcoded value if the option is not present. That hardcoded value is now located in the server.py config file. --- changelog.d/5223.feature | 1 + docs/sample_config.yaml | 9 ++++++ synapse/api/room_versions.py | 4 --- synapse/config/server.py | 32 +++++++++++++++++++ synapse/handlers/room.py | 9 ++++-- synapse/rest/client/v2_alpha/capabilities.py | 5 +-- .../rest/client/v2_alpha/test_capabilities.py | 7 ++-- 7 files changed, 57 insertions(+), 10 deletions(-) create mode 100644 changelog.d/5223.feature diff --git a/changelog.d/5223.feature b/changelog.d/5223.feature new file mode 100644 index 000000000000..cfdf1ad41ba5 --- /dev/null +++ b/changelog.d/5223.feature @@ -0,0 +1 @@ +Ability to configure default room version. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 559fbcdd01bc..2a5a514d6184 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -83,6 +83,15 @@ pid_file: DATADIR/homeserver.pid # #restrict_public_rooms_to_local_users: true +# The default room version for newly created rooms. +# +# Known room versions are listed here: +# https://matrix.org/docs/spec/#complete-list-of-room-versions +# +# For example, for room version 1, default_room_version should be set +# to "1". +#default_room_version: "1" + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index b2895355a81a..4085bd10b95f 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -85,10 +85,6 @@ class RoomVersions(object): ) -# the version we will give rooms which are created on this server -DEFAULT_ROOM_VERSION = RoomVersions.V1 - - KNOWN_ROOM_VERSIONS = { v.identifier: v for v in ( RoomVersions.V1, diff --git a/synapse/config/server.py b/synapse/config/server.py index f34aa42afa20..e9120d4d75e0 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -20,6 +20,7 @@ from netaddr import IPSet +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.endpoint import parse_and_validate_server_name from synapse.python_dependencies import DependencyException, check_requirements @@ -35,6 +36,8 @@ # in the list. DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0'] +DEFAULT_ROOM_VERSION = "1" + class ServerConfig(Config): @@ -88,6 +91,22 @@ def read_config(self, config): "restrict_public_rooms_to_local_users", False, ) + default_room_version = config.get( + "default_room_version", DEFAULT_ROOM_VERSION, + ) + + # Ensure room version is a str + default_room_version = str(default_room_version) + + if default_room_version not in KNOWN_ROOM_VERSIONS: + raise ConfigError( + "Unknown default_room_version: %s, known room versions: %s" % + (default_room_version, list(KNOWN_ROOM_VERSIONS.keys())) + ) + + # Get the actual room version object rather than just the identifier + self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version] + # whether to enable search. If disabled, new entries will not be inserted # into the search tables and they will not be indexed. Users will receive # errors when attempting to search for messages. @@ -310,6 +329,10 @@ def default_config(self, server_name, data_dir_path, **kwargs): unsecure_port = 8008 pid_file = os.path.join(data_dir_path, "homeserver.pid") + + # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the + # default config string + default_room_version = DEFAULT_ROOM_VERSION return """\ ## Server ## @@ -384,6 +407,15 @@ def default_config(self, server_name, data_dir_path, **kwargs): # #restrict_public_rooms_to_local_users: true + # The default room version for newly created rooms. + # + # Known room versions are listed here: + # https://matrix.org/docs/spec/#complete-list-of-room-versions + # + # For example, for room version 1, default_room_version should be set + # to "1". + #default_room_version: "%(default_room_version)s" + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e37ae968998a..4a17911a87fa 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -27,7 +27,7 @@ from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils @@ -70,6 +70,7 @@ def __init__(self, hs): self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() + self.config = hs.config # linearizer to stop two upgrades happening at once self._upgrade_linearizer = Linearizer("room_upgrade_linearizer") @@ -475,7 +476,11 @@ def create_room(self, requester, config, ratelimit=True, if ratelimit: yield self.ratelimit(requester) - room_version = config.get("room_version", DEFAULT_ROOM_VERSION.identifier) + room_version = config.get( + "room_version", + self.config.default_room_version.identifier, + ) + if not isinstance(room_version, string_types): raise SynapseError( 400, diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index a868d06098b1..2b4892330c4d 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -16,7 +16,7 @@ from twisted.internet import defer -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.servlet import RestServlet from ._base import client_v2_patterns @@ -36,6 +36,7 @@ def __init__(self, hs): """ super(CapabilitiesRestServlet, self).__init__() self.hs = hs + self.config = hs.config self.auth = hs.get_auth() self.store = hs.get_datastore() @@ -48,7 +49,7 @@ def on_GET(self, request): response = { "capabilities": { "m.room_versions": { - "default": DEFAULT_ROOM_VERSION.identifier, + "default": self.config.default_room_version.identifier, "available": { v.identifier: v.disposition for v in KNOWN_ROOM_VERSIONS.values() diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index f3ef977404ab..bce5b0cf4c70 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import synapse.rest.admin -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.rest.client.v1 import login from synapse.rest.client.v2_alpha import capabilities @@ -32,6 +32,7 @@ def make_homeserver(self, reactor, clock): self.url = b"/_matrix/client/r0/capabilities" hs = self.setup_test_homeserver() self.store = hs.get_datastore() + self.config = hs.config return hs def test_check_auth_required(self): @@ -51,8 +52,10 @@ def test_get_room_version_capabilities(self): self.assertEqual(channel.code, 200) for room_version in capabilities['m.room_versions']['available'].keys(): self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, "" + room_version) + self.assertEqual( - DEFAULT_ROOM_VERSION.identifier, capabilities['m.room_versions']['default'] + self.config.default_room_version.identifier, + capabilities['m.room_versions']['default'], ) def test_get_change_password_capabilities(self): From 753b1270da1f0449bbb960b37707556abd3eaac0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 9 Apr 2019 13:03:56 +0100 Subject: [PATCH 023/231] Require sig from origin server on perspectives responses --- synapse/crypto/keyring.py | 28 ++++++------ tests/crypto/test_keyring.py | 84 ++++++++++++++++++++++++++++++++---- 2 files changed, 90 insertions(+), 22 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index eaf41b983c11..a64ba0752a08 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -394,8 +394,7 @@ def process_v2_response( POST /_matrix/key/v2/query. Checks that each signature in the response that claims to come from the origin - server is valid. (Does not check that there actually is such a signature, for - some reason.) + server is valid, and that there is at least one such signature. Stores the json in server_keys_json so that it can be used for future responses to /_matrix/key/v2/query. @@ -430,16 +429,25 @@ def process_v2_response( verify_key=verify_key, valid_until_ts=ts_valid_until_ms ) - # TODO: improve this signature checking server_name = response_json["server_name"] + verified = False for key_id in response_json["signatures"].get(server_name, {}): - if key_id not in verify_keys: + # each of the keys used for the signature must be present in the response + # json. + key = verify_keys.get(key_id) + if not key: raise KeyLookupError( - "Key response must include verification keys for all signatures" + "Key response is signed by key id %s:%s but that key is not " + "present in the response" % (server_name, key_id) ) - verify_signed_json( - response_json, server_name, verify_keys[key_id].verify_key + verify_signed_json(response_json, server_name, key.verify_key) + verified = True + + if not verified: + raise KeyLookupError( + "Key response for %s is not signed by the origin server" + % (server_name,) ) for key_id, key_data in response_json["old_verify_keys"].items(): @@ -677,12 +685,6 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): except HttpResponseException as e: raise_from(KeyLookupError("Remote server returned an error"), e) - if ( - u"signatures" not in response - or server_name not in response[u"signatures"] - ): - raise KeyLookupError("Key response not signed by remote server") - if response["server_name"] != server_name: raise KeyLookupError( "Expected a response for server %r not %r" diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index de61bad15d27..c4c9d29499a8 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -55,11 +55,11 @@ def get_signed_key(self, server_name, verify_key): key_id: {"key": signedjson.key.encode_verify_key_base64(verify_key)} }, } - return self.get_signed_response(res) + self.sign_response(res) + return res - def get_signed_response(self, res): + def sign_response(self, res): signedjson.sign.sign_json(res, self.server_name, self.key) - return res class KeyringTestCase(unittest.HomeserverTestCase): @@ -238,7 +238,7 @@ def test_get_keys_from_server(self): testkey = signedjson.key.generate_signing_key("ver1") testverifykey = signedjson.key.get_verify_key(testkey) testverifykey_id = "ed25519:ver1" - VALID_UNTIL_TS = 1000 + VALID_UNTIL_TS = 200 * 1000 # valid response response = { @@ -326,9 +326,10 @@ def test_get_keys_from_perspectives(self): }, } - persp_resp = { - "server_keys": [self.mock_perspective_server.get_signed_response(response)] - } + # the response must be signed by both the origin server and the perspectives + # server. + signedjson.sign.sign_json(response, SERVER_NAME, testkey) + self.mock_perspective_server.sign_response(response) def post_json(destination, path, data, **kwargs): self.assertEqual(destination, self.mock_perspective_server.server_name) @@ -337,7 +338,7 @@ def post_json(destination, path, data, **kwargs): # check that the request is for the expected key q = data["server_keys"] self.assertEqual(list(q[SERVER_NAME].keys()), ["key1"]) - return persp_resp + return {"server_keys": [response]} self.http_client.post_json.side_effect = post_json @@ -365,9 +366,74 @@ def post_json(destination, path, data, **kwargs): self.assertEqual( bytes(res["key_json"]), - canonicaljson.encode_canonical_json(persp_resp["server_keys"][0]), + canonicaljson.encode_canonical_json(response), ) + def test_invalid_perspectives_responses(self): + """Check that invalid responses from the perspectives server are rejected""" + # arbitrarily advance the clock a bit + self.reactor.advance(100) + + SERVER_NAME = "server2" + testkey = signedjson.key.generate_signing_key("ver1") + testverifykey = signedjson.key.get_verify_key(testkey) + testverifykey_id = "ed25519:ver1" + VALID_UNTIL_TS = 200 * 1000 + + def build_response(): + # valid response + response = { + "server_name": SERVER_NAME, + "old_verify_keys": {}, + "valid_until_ts": VALID_UNTIL_TS, + "verify_keys": { + testverifykey_id: { + "key": signedjson.key.encode_verify_key_base64(testverifykey) + } + }, + } + + # the response must be signed by both the origin server and the perspectives + # server. + signedjson.sign.sign_json(response, SERVER_NAME, testkey) + self.mock_perspective_server.sign_response(response) + return response + + def get_key_from_perspectives(response): + fetcher = PerspectivesKeyFetcher(self.hs) + server_name_and_key_ids = [(SERVER_NAME, ("key1",))] + + def post_json(destination, path, data, **kwargs): + self.assertEqual(destination, self.mock_perspective_server.server_name) + self.assertEqual(path, "/_matrix/key/v2/query") + return {"server_keys": [response]} + + self.http_client.post_json.side_effect = post_json + + return self.get_success( + fetcher.get_keys(server_name_and_key_ids) + ) + + # start with a valid response so we can check we are testing the right thing + response = build_response() + keys = get_key_from_perspectives(response) + k = keys[SERVER_NAME][testverifykey_id] + self.assertEqual(k.verify_key, testverifykey) + + # remove the perspectives server's signature + response = build_response() + del response["signatures"][self.mock_perspective_server.server_name] + self.http_client.post_json.return_value = {"server_keys": [response]} + keys = get_key_from_perspectives(response) + self.assertEqual(keys, {}, "Expected empty dict with missing persp server sig") + + # remove the origin server's signature + response = build_response() + del response["signatures"][SERVER_NAME] + self.http_client.post_json.return_value = {"server_keys": [response]} + keys = get_key_from_perspectives(response) + self.assertEqual(keys, {}, "Expected empty dict with missing origin server sig") + @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): From 4cb577c23f1399d233d72cee45efad36982f692a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 May 2019 09:52:33 +0100 Subject: [PATCH 024/231] Don't bundle aggs for /state and /members etc APIs --- synapse/handlers/message.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 7b2c33a9228f..7e40cb6502a1 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -166,6 +166,9 @@ def get_state_events( now = self.clock.time_msec() events = yield self._event_serializer.serialize_events( room_state.values(), now, + # We don't bother bundling aggregations in when asked for state + # events, as clients won't use them. + bundle_aggregations=False, ) defer.returnValue(events) From dba9152d1593c9de176e969153b30b4ef19f29ae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 24 May 2019 14:12:38 +0100 Subject: [PATCH 025/231] Add missing blank line in config (#5249) --- changelog.d/5249.feature | 1 + docs/sample_config.yaml | 1 + synapse/config/server.py | 1 + 3 files changed, 3 insertions(+) create mode 100644 changelog.d/5249.feature diff --git a/changelog.d/5249.feature b/changelog.d/5249.feature new file mode 100644 index 000000000000..cfdf1ad41ba5 --- /dev/null +++ b/changelog.d/5249.feature @@ -0,0 +1 @@ +Ability to configure default room version. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2a5a514d6184..421ae96f04f1 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -90,6 +90,7 @@ pid_file: DATADIR/homeserver.pid # # For example, for room version 1, default_room_version should be set # to "1". +# #default_room_version: "1" # The GC threshold parameters to pass to `gc.set_threshold`, if defined diff --git a/synapse/config/server.py b/synapse/config/server.py index e9120d4d75e0..e763e19e15a4 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -414,6 +414,7 @@ def default_config(self, server_name, data_dir_path, **kwargs): # # For example, for room version 1, default_room_version should be set # to "1". + # #default_room_version: "%(default_room_version)s" # The GC threshold parameters to pass to `gc.set_threshold`, if defined From dd64b9dbdd9bb6f9ac84d51dd4f4bdb665fcfcb1 Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Fri, 24 May 2019 16:44:04 +0300 Subject: [PATCH 026/231] Fix appservice timestamp massaging (#5233) Signed-off-by: Tulir Asokan --- changelog.d/5233.bugfix | 1 + synapse/events/builder.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5233.bugfix diff --git a/changelog.d/5233.bugfix b/changelog.d/5233.bugfix new file mode 100644 index 000000000000..d71b9621602d --- /dev/null +++ b/changelog.d/5233.bugfix @@ -0,0 +1 @@ +Fix appservice timestamp massaging. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 1fe995f21214..546b6f498275 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -76,6 +76,7 @@ class EventBuilder(object): # someone tries to get them when they don't exist. _state_key = attr.ib(default=None) _redacts = attr.ib(default=None) + _origin_server_ts = attr.ib(default=None) internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({}))) @@ -142,6 +143,9 @@ def build(self, prev_event_ids): if self._redacts is not None: event_dict["redacts"] = self._redacts + if self._origin_server_ts is not None: + event_dict["origin_server_ts"] = self._origin_server_ts + defer.returnValue( create_local_event_from_event_dict( clock=self._clock, @@ -209,6 +213,7 @@ def for_room_version(self, room_version, key_values): content=key_values.get("content", {}), unsigned=key_values.get("unsigned", {}), redacts=key_values.get("redacts", None), + origin_server_ts=key_values.get("origin_server_ts", None), ) @@ -245,7 +250,7 @@ def create_local_event_from_event_dict(clock, hostname, signing_key, event_dict["event_id"] = _create_event_id(clock, hostname) event_dict["origin"] = hostname - event_dict["origin_server_ts"] = time_now + event_dict.setdefault("origin_server_ts", time_now) event_dict.setdefault("unsigned", {}) age = event_dict["unsigned"].pop("age", 0) From b825d1c80046b37e32951ef034a05002df76a287 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 23 May 2019 17:31:26 +0100 Subject: [PATCH 027/231] Improve error handling/logging for perspectives-key fetching. In particular, don't give up on the first failure. --- synapse/crypto/keyring.py | 105 ++++++++++++++++++++++++++++---------- 1 file changed, 77 insertions(+), 28 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index a64ba0752a08..65af2fb671a4 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -17,6 +17,7 @@ import logging from collections import namedtuple +import six from six import raise_from from six.moves import urllib @@ -349,6 +350,7 @@ def get_keys(self, server_name_and_key_ids): Args: server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): list of (server_name, iterable[key_id]) tuples to fetch keys for + Note that the iterables may be iterated more than once. Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: @@ -557,7 +559,16 @@ def get_server_verify_key_v2_indirect( Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map from server_name -> key_id -> FetchKeyResult + + Raises: + KeyLookupError if there was an error processing the entire response from + the server """ + logger.info( + "Requesting keys %s from notary server %s", + server_names_and_key_ids, + perspective_name, + ) # TODO(mark): Set the minimum_valid_until_ts to that needed by # the events being validated or the current time if validating # an incoming request. @@ -586,40 +597,31 @@ def get_server_verify_key_v2_indirect( time_now_ms = self.clock.time_msec() for response in query_response["server_keys"]: - if ( - u"signatures" not in response - or perspective_name not in response[u"signatures"] - ): + # do this first, so that we can give useful errors thereafter + server_name = response.get("server_name") + if not isinstance(server_name, six.string_types): raise KeyLookupError( - "Key response not signed by perspective server" - " %r" % (perspective_name,) + "Malformed response from key notary server %s: invalid server_name" + % (perspective_name,) ) - verified = False - for key_id in response[u"signatures"][perspective_name]: - if key_id in perspective_keys: - verify_signed_json( - response, perspective_name, perspective_keys[key_id] - ) - verified = True - - if not verified: - logging.info( - "Response from perspective server %r not signed with a" - " known key, signed with: %r, known keys: %r", + try: + processed_response = yield self._process_perspectives_response( perspective_name, - list(response[u"signatures"][perspective_name]), - list(perspective_keys), + perspective_keys, + response, + time_added_ms=time_now_ms, ) - raise KeyLookupError( - "Response not signed with a known key for perspective" - " server %r" % (perspective_name,) + except KeyLookupError as e: + logger.warning( + "Error processing response from key notary server %s for origin " + "server %s: %s", + perspective_name, + server_name, + e, ) - - processed_response = yield self.process_v2_response( - perspective_name, response, time_added_ms=time_now_ms - ) - server_name = response["server_name"] + # we continue to process the rest of the response + continue added_keys.extend( (server_name, key_id, key) for key_id, key in processed_response.items() @@ -632,6 +634,53 @@ def get_server_verify_key_v2_indirect( defer.returnValue(keys) + def _process_perspectives_response( + self, perspective_name, perspective_keys, response, time_added_ms + ): + """Parse a 'Server Keys' structure from the result of a /key/query request + + Checks that the entry is correctly signed by the perspectives server, and then + passes over to process_v2_response + + Args: + perspective_name (str): the name of the notary server that produced this + result + + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the + notary server + + response (dict): the json-decoded Server Keys response object + + time_added_ms (int): the timestamp to record in server_keys_json + + Returns: + Deferred[dict[str, FetchKeyResult]]: map from key_id to result object + """ + if ( + u"signatures" not in response + or perspective_name not in response[u"signatures"] + ): + raise KeyLookupError("Response not signed by the notary server") + + verified = False + for key_id in response[u"signatures"][perspective_name]: + if key_id in perspective_keys: + verify_signed_json(response, perspective_name, perspective_keys[key_id]) + verified = True + + if not verified: + raise KeyLookupError( + "Response not signed with a known key: signed with: %r, known keys: %r" + % ( + list(response[u"signatures"][perspective_name].keys()), + list(perspective_keys.keys()), + ) + ) + + return self.process_v2_response( + perspective_name, response, time_added_ms=time_added_ms + ) + class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" From cbcfd642a0dc375ea6f006c1633f82d16b3ac002 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 24 May 2019 15:47:30 +0100 Subject: [PATCH 028/231] changelog --- changelog.d/5251.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5251.bugfix diff --git a/changelog.d/5251.bugfix b/changelog.d/5251.bugfix new file mode 100644 index 000000000000..9a053204b6dd --- /dev/null +++ b/changelog.d/5251.bugfix @@ -0,0 +1 @@ +Ensure that server_keys fetched via a notary server are correctly signed. \ No newline at end of file From fa1b293da2e0a5e47864ccb49e530d8a81d81790 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 24 May 2019 22:17:18 +0100 Subject: [PATCH 029/231] Simplification to Keyring.wait_for_previous_lookups. (#5250) The list of server names was redundant, since it was equivalent to the keys on the server_to_deferred map. This reduces the number of large lists being passed around, and has the benefit of deduplicating the entries in `wait_on`. --- changelog.d/5250.misc | 1 + synapse/crypto/keyring.py | 11 ++++------- tests/crypto/test_keyring.py | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) create mode 100644 changelog.d/5250.misc diff --git a/changelog.d/5250.misc b/changelog.d/5250.misc new file mode 100644 index 000000000000..575a299a8214 --- /dev/null +++ b/changelog.d/5250.misc @@ -0,0 +1 @@ +Simplification to Keyring.wait_for_previous_lookups. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index eaf41b983c11..d6ad7f177260 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -180,9 +180,7 @@ def _start_key_lookups(self, verify_requests): # We want to wait for any previous lookups to complete before # proceeding. - yield self.wait_for_previous_lookups( - [rq.server_name for rq in verify_requests], server_to_deferred - ) + yield self.wait_for_previous_lookups(server_to_deferred) # Actually start fetching keys. self._get_server_verify_keys(verify_requests) @@ -215,12 +213,11 @@ def remove_deferreds(res, verify_request): logger.exception("Error starting key lookups") @defer.inlineCallbacks - def wait_for_previous_lookups(self, server_names, server_to_deferred): + def wait_for_previous_lookups(self, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. Args: - server_names (list): list of server_names we want to lookup - server_to_deferred (dict): server_name to deferred which gets + server_to_deferred (dict[str, Deferred]): server_name to deferred which gets resolved once we've finished looking up keys for that server. The Deferreds should be regular twisted ones which call their callbacks with no logcontext. @@ -233,7 +230,7 @@ def wait_for_previous_lookups(self, server_names, server_to_deferred): while True: wait_on = [ (server_name, self.key_downloads[server_name]) - for server_name in server_names + for server_name in server_to_deferred.keys() if server_name in self.key_downloads ] if not wait_on: diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index de61bad15d27..4fba462d4444 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -85,7 +85,7 @@ def test_wait_for_previous_lookups(self): # we run the lookup in a logcontext so that the patched inlineCallbacks can check # it is doing the right thing with logcontexts. wait_1_deferred = run_in_context( - kr.wait_for_previous_lookups, ["server1"], {"server1": lookup_1_deferred} + kr.wait_for_previous_lookups, {"server1": lookup_1_deferred} ) # there were no previous lookups, so the deferred should be ready @@ -94,7 +94,7 @@ def test_wait_for_previous_lookups(self): # set off another wait. It should block because the first lookup # hasn't yet completed. wait_2_deferred = run_in_context( - kr.wait_for_previous_lookups, ["server1"], {"server1": lookup_2_deferred} + kr.wait_for_previous_lookups, {"server1": lookup_2_deferred} ) self.assertFalse(wait_2_deferred.called) From 56f07d980a9d3b3b8e2cc196e5d630abd98be122 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 16:32:21 -0500 Subject: [PATCH 030/231] Show correct error when logging out and access token is missing Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 430c692336a8..317f52cb564e 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -40,11 +40,11 @@ def on_OPTIONS(self, request): def on_POST(self, request): try: requester = yield self.auth.get_user_by_req(request) - except AuthError: + except AuthError as e: # this implies the access token has already been deleted. - defer.returnValue((401, { - "errcode": "M_UNKNOWN_TOKEN", - "error": "Access Token unknown or expired" + defer.returnValue((e.code, { + "errcode": e.errcode, + "error": e.msg })) else: if requester.device_id is None: From 2d4853039f37a16cc898c7dba1a4a34f67ca1062 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 17:13:10 -0500 Subject: [PATCH 031/231] Fix error code for invalid parameter Signed-off-by: Aaron Raimist --- synapse/http/servlet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 528125e73768..197c652850ed 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -55,7 +55,7 @@ def parse_integer_from_args(args, name, default=None, required=False): return int(args[name][0]) except Exception: message = "Query parameter %r must be an integer" % (name,) - raise SynapseError(400, message) + raise SynapseError(400, message, errcode=Codes.INVALID_PARAM) else: if required: message = "Missing integer query parameter %r" % (name,) From 6dac0e738c70ee67abb9b03cafbb07749b84309c Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 17:15:24 -0500 Subject: [PATCH 032/231] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5257.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5257.bugfix diff --git a/changelog.d/5257.bugfix b/changelog.d/5257.bugfix new file mode 100644 index 000000000000..8334af9b9957 --- /dev/null +++ b/changelog.d/5257.bugfix @@ -0,0 +1 @@ +Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms From 0b4f4cb0b4bff42dd0f638d1d2891f35feaff1be Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 16:35:48 -0500 Subject: [PATCH 033/231] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5256.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5256.bugfix diff --git a/changelog.d/5256.bugfix b/changelog.d/5256.bugfix new file mode 100644 index 000000000000..86316ab5dd89 --- /dev/null +++ b/changelog.d/5256.bugfix @@ -0,0 +1 @@ +Show the correct error when logging out and access token is missing. From bc4b2ecf70bc3965cbbf1daee52bf7577e219d7b Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sat, 25 May 2019 12:02:48 -0600 Subject: [PATCH 034/231] Fix logging for room stats background update --- synapse/storage/stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 71b80a891dd6..eb0ced5b5e46 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -169,7 +169,7 @@ def _get_next_batch(txn): logger.info( "Processing the next %d rooms of %d remaining", - (len(rooms_to_work_on), progress["remaining"]), + len(rooms_to_work_on), progress["remaining"], ) # Number of state events we've processed by going through each room From 4ccdbfcdb133ed10cd53f5a1b7f77b00c1ecdf97 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sat, 25 May 2019 12:21:21 -0600 Subject: [PATCH 035/231] Changelog --- changelog.d/5260.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5260.feature diff --git a/changelog.d/5260.feature b/changelog.d/5260.feature new file mode 100644 index 000000000000..01285e965c84 --- /dev/null +++ b/changelog.d/5260.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From 119c9c10b026b65affab5d5bcb6ae1d3d7e7229c Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 27 May 2019 00:13:48 -0500 Subject: [PATCH 036/231] Get rid of try except Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 317f52cb564e..2cf373e83cf2 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -38,23 +38,16 @@ def on_OPTIONS(self, request): @defer.inlineCallbacks def on_POST(self, request): - try: - requester = yield self.auth.get_user_by_req(request) - except AuthError as e: - # this implies the access token has already been deleted. - defer.returnValue((e.code, { - "errcode": e.errcode, - "error": e.msg - })) + requester = yield self.auth.get_user_by_req(request) + + if requester.device_id is None: + # the acccess token wasn't associated with a device. + # Just delete the access token + access_token = self._auth.get_access_token_from_request(request) + yield self._auth_handler.delete_access_token(access_token) else: - if requester.device_id is None: - # the acccess token wasn't associated with a device. - # Just delete the access token - access_token = self._auth.get_access_token_from_request(request) - yield self._auth_handler.delete_access_token(access_token) - else: - yield self._device_handler.delete_device( - requester.user.to_string(), requester.device_id) + yield self._device_handler.delete_device( + requester.user.to_string(), requester.device_id) defer.returnValue((200, {})) From ba17de7fbc29700163b23363ae0e03f8a01ef274 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 10:11:38 +0100 Subject: [PATCH 037/231] Fix schema update for account validity --- ...{account_validity.sql => account_validity_with_renewal.sql} | 3 +++ 1 file changed, 3 insertions(+) rename synapse/storage/schema/delta/54/{account_validity.sql => account_validity_with_renewal.sql} (83%) diff --git a/synapse/storage/schema/delta/54/account_validity.sql b/synapse/storage/schema/delta/54/account_validity_with_renewal.sql similarity index 83% rename from synapse/storage/schema/delta/54/account_validity.sql rename to synapse/storage/schema/delta/54/account_validity_with_renewal.sql index 2357626000ff..0adb2ad55e35 100644 --- a/synapse/storage/schema/delta/54/account_validity.sql +++ b/synapse/storage/schema/delta/54/account_validity_with_renewal.sql @@ -13,6 +13,9 @@ * limitations under the License. */ +-- We previously changed the schema for this table without renaming the file, which means +-- that some databases might still be using the old schema. This ensures Synapse uses the +-- right schema for the table. DROP TABLE IF EXISTS account_validity; -- Track what users are in public rooms. From ddd30f44a09a775d290c2d41b8db4d15b967dd43 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 10:14:21 +0100 Subject: [PATCH 038/231] Changelog --- changelog.d/5268.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5268.bugfix diff --git a/changelog.d/5268.bugfix b/changelog.d/5268.bugfix new file mode 100644 index 000000000000..1a5a03bf0a0c --- /dev/null +++ b/changelog.d/5268.bugfix @@ -0,0 +1 @@ +Fix schema update for account validity. From 52839886d664576831462e033b88e5aba4c019e3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 16:47:42 +0100 Subject: [PATCH 039/231] Allow configuring a range for the account validity startup job When enabling the account validity feature, Synapse will look at startup for registered account without an expiration date, and will set one equals to 'now + validity_period' for them. On large servers, it can mean that a large number of users will have the same expiration date, which means that they will all be sent a renewal email at the same time, which isn't ideal. In order to mitigate this, this PR allows server admins to define a 'max_delta' so that the expiration date is a random value in the [now + validity_period ; now + validity_period + max_delta] range. This allows renewal emails to be progressively sent over a configured period instead of being sent all in one big batch. --- synapse/config/registration.py | 11 ++++++++++ synapse/storage/_base.py | 23 +++++++++++++++++++-- tests/rest/client/v2_alpha/test_register.py | 21 +++++++++++++++++++ 3 files changed, 53 insertions(+), 2 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 693288f93894..b4fd4af368d7 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -39,6 +39,10 @@ def __init__(self, config, synapse_config): else: self.renew_email_subject = "Renew your %(app)s account" + self.startup_job_max_delta = self.parse_duration( + config.get("startup_job_max_delta", 0), + ) + if self.renew_by_email_enabled and "public_baseurl" not in synapse_config: raise ConfigError("Can't send renewal emails without 'public_baseurl'") @@ -131,11 +135,18 @@ def default_config(self, generate_secrets=False, **kwargs): # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. # + # If set, the ``startup_job_max_delta`` optional setting will make the startup job + # described above set a random expiration date between t + period and + # t + period + startup_job_max_delta, t being the date and time at which the job + # sets the expiration date for a given user. This is useful for server admins that + # want to avoid Synapse sending a lot of renewal emails at once. + # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" + # startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fa6839cecade..40802fd3dc43 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -16,6 +16,7 @@ # limitations under the License. import itertools import logging +import random import sys import threading import time @@ -247,6 +248,8 @@ def __init__(self, db_conn, hs): self._check_safe_to_upsert, ) + self.rand = random.SystemRandom() + if self._account_validity.enabled: self._clock.call_later( 0.0, @@ -308,21 +311,37 @@ def select_users_with_no_expiration_date_txn(txn): res = self.cursor_to_dict(txn) if res: for user in res: - self.set_expiration_date_for_user_txn(txn, user["name"]) + self.set_expiration_date_for_user_txn( + txn, + user["name"], + use_delta=True, + ) yield self.runInteraction( "get_users_with_no_expiration_date", select_users_with_no_expiration_date_txn, ) - def set_expiration_date_for_user_txn(self, txn, user_id): + def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): """Sets an expiration date to the account with the given user ID. Args: user_id (str): User ID to set an expiration date for. + use_delta (bool): If set to False, the expiration date for the user will be + now + validity period. If set to True, this expiration date will be a + random value in the [now + period; now + period + max_delta] range, + max_delta being the configured value for the size of the range, unless + delta is 0, in which case it sets it to now + period. """ now_ms = self._clock.time_msec() expiration_ts = now_ms + self._account_validity.period + + if use_delta and self._account_validity.startup_job_max_delta: + expiration_ts = self.rand.randrange( + expiration_ts, + expiration_ts + self._account_validity.startup_job_max_delta, + ) + self._simple_insert_txn( txn, "account_validity", diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index d4a1d4d50c8d..7603440fd859 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -436,6 +436,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.validity_period = 10 + self.max_delta = 10 config = self.default_config() @@ -459,8 +460,28 @@ def test_background_job(self): """ user_id = self.register_user("kermit", "user") + self.hs.config.account_validity.startup_job_max_delta = 0 + now_ms = self.hs.clock.time_msec() self.get_success(self.store._set_expiration_date_when_missing()) res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) self.assertEqual(res, now_ms + self.validity_period) + + def test_background_job_with_max_delta(self): + """ + Tests the same thing as test_background_job, except that it sets the + startup_job_max_delta parameter and checks that the expiration date is within the + allowed range. + """ + user_id = self.register_user("kermit_delta", "user") + + self.hs.config.account_validity.startup_job_max_delta = self.max_delta + + now_ms = self.hs.clock.time_msec() + self.get_success(self.store._set_expiration_date_when_missing()) + + res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) + + self.assertLessEqual(res, now_ms + self.validity_period + self.delta) + self.assertGreaterEqual(res, now_ms + self.validity_period) From 4aba561c65c842e640861035e3937e78ab950a21 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 16:55:10 +0100 Subject: [PATCH 040/231] Config and changelog --- changelog.d/5276.feature | 1 + docs/sample_config.yaml | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/5276.feature diff --git a/changelog.d/5276.feature b/changelog.d/5276.feature new file mode 100644 index 000000000000..403dee0862e4 --- /dev/null +++ b/changelog.d/5276.feature @@ -0,0 +1 @@ +Allow configuring a range for the account validity startup job. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index f658ec8ecdb0..8ff53d5cb40c 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -755,11 +755,18 @@ uploads_path: "DATADIR/uploads" # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. # +# If set, the ``startup_job_max_delta`` optional setting will make the startup job +# described above set a random expiration date between t + period and +# t + period + startup_job_max_delta, t being the date and time at which the job +# sets the expiration date for a given user. This is useful for server admins that +# want to avoid Synapse sending a lot of renewal emails at once. +# #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" +# startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # From 7e1c7cc2742f5eb9d6d37205a0c457b8a7bd015f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 17:13:26 +0100 Subject: [PATCH 041/231] Typo --- tests/rest/client/v2_alpha/test_register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 7603440fd859..68654e25ab2e 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -483,5 +483,5 @@ def test_background_job_with_max_delta(self): res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertLessEqual(res, now_ms + self.validity_period + self.delta) + self.assertLessEqual(res, now_ms + self.validity_period + self.max_delta) self.assertGreaterEqual(res, now_ms + self.validity_period) From 5726378eced1d032552318cb5fd603da8f364db2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 28 May 2019 21:20:11 +0100 Subject: [PATCH 042/231] Fix "db txn 'update_presence' from sentinel context" log messages (#5275) Fixes #4414. --- changelog.d/5275.bugfix | 1 + synapse/handlers/presence.py | 99 +++++++++++++++++------------------- 2 files changed, 48 insertions(+), 52 deletions(-) create mode 100644 changelog.d/5275.bugfix diff --git a/changelog.d/5275.bugfix b/changelog.d/5275.bugfix new file mode 100644 index 000000000000..45a554642a53 --- /dev/null +++ b/changelog.d/5275.bugfix @@ -0,0 +1 @@ +Fix "db txn 'update_presence' from sentinel context" log messages. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 59d53f105059..6209858bbb9a 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -182,17 +182,27 @@ def __init__(self, hs): # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. + def run_timeout_handler(): + return run_as_background_process( + "handle_presence_timeouts", self._handle_timeouts + ) + self.clock.call_later( 30, self.clock.looping_call, - self._handle_timeouts, + run_timeout_handler, 5000, ) + def run_persister(): + return run_as_background_process( + "persist_presence_changes", self._persist_unpersisted_changes + ) + self.clock.call_later( 60, self.clock.looping_call, - self._persist_unpersisted_changes, + run_persister, 60 * 1000, ) @@ -229,6 +239,7 @@ def _on_shutdown(self): ) if self.unpersisted_users_changes: + yield self.store.update_presence([ self.user_to_current_state[user_id] for user_id in self.unpersisted_users_changes @@ -240,30 +251,18 @@ def _persist_unpersisted_changes(self): """We periodically persist the unpersisted changes, as otherwise they may stack up and slow down shutdown times. """ - logger.info( - "Performing _persist_unpersisted_changes. Persisting %d unpersisted changes", - len(self.unpersisted_users_changes) - ) - unpersisted = self.unpersisted_users_changes self.unpersisted_users_changes = set() if unpersisted: + logger.info( + "Persisting %d upersisted presence updates", len(unpersisted) + ) yield self.store.update_presence([ self.user_to_current_state[user_id] for user_id in unpersisted ]) - logger.info("Finished _persist_unpersisted_changes") - - @defer.inlineCallbacks - def _update_states_and_catch_exception(self, new_states): - try: - res = yield self._update_states(new_states) - defer.returnValue(res) - except Exception: - logger.exception("Error updating presence") - @defer.inlineCallbacks def _update_states(self, new_states): """Updates presence of users. Sets the appropriate timeouts. Pokes @@ -338,45 +337,41 @@ def _handle_timeouts(self): logger.info("Handling presence timeouts") now = self.clock.time_msec() - try: - with Measure(self.clock, "presence_handle_timeouts"): - # Fetch the list of users that *may* have timed out. Things may have - # changed since the timeout was set, so we won't necessarily have to - # take any action. - users_to_check = set(self.wheel_timer.fetch(now)) - - # Check whether the lists of syncing processes from an external - # process have expired. - expired_process_ids = [ - process_id for process_id, last_update - in self.external_process_last_updated_ms.items() - if now - last_update > EXTERNAL_PROCESS_EXPIRY - ] - for process_id in expired_process_ids: - users_to_check.update( - self.external_process_last_updated_ms.pop(process_id, ()) - ) - self.external_process_last_update.pop(process_id) + # Fetch the list of users that *may* have timed out. Things may have + # changed since the timeout was set, so we won't necessarily have to + # take any action. + users_to_check = set(self.wheel_timer.fetch(now)) + + # Check whether the lists of syncing processes from an external + # process have expired. + expired_process_ids = [ + process_id for process_id, last_update + in self.external_process_last_updated_ms.items() + if now - last_update > EXTERNAL_PROCESS_EXPIRY + ] + for process_id in expired_process_ids: + users_to_check.update( + self.external_process_last_updated_ms.pop(process_id, ()) + ) + self.external_process_last_update.pop(process_id) - states = [ - self.user_to_current_state.get( - user_id, UserPresenceState.default(user_id) - ) - for user_id in users_to_check - ] + states = [ + self.user_to_current_state.get( + user_id, UserPresenceState.default(user_id) + ) + for user_id in users_to_check + ] - timers_fired_counter.inc(len(states)) + timers_fired_counter.inc(len(states)) - changes = handle_timeouts( - states, - is_mine_fn=self.is_mine_id, - syncing_user_ids=self.get_currently_syncing_users(), - now=now, - ) + changes = handle_timeouts( + states, + is_mine_fn=self.is_mine_id, + syncing_user_ids=self.get_currently_syncing_users(), + now=now, + ) - run_in_background(self._update_states_and_catch_exception, changes) - except Exception: - logger.exception("Exception in _handle_timeouts loop") + return self._update_states(changes) @defer.inlineCallbacks def bump_presence_active_time(self, user): From 9b6f72663e2eb8c2caf834da511c2617d8061e58 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 20:53:56 -0500 Subject: [PATCH 043/231] Fix docs on resetting the user directory (#5036) Signed-off-by: Aaron Raimist --- docs/user_directory.md | 10 +++------- synapse/config/user_directory.py | 6 +++--- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/docs/user_directory.md b/docs/user_directory.md index 4c8ee44f37c0..e64aa453cc0b 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -7,11 +7,7 @@ who are present in a publicly viewable room present on the server. The directory info is stored in various tables, which can (typically after DB corruption) get stale or out of sync. If this happens, for now the -quickest solution to fix it is: - -``` -UPDATE user_directory_stream_pos SET stream_id = NULL; -``` - -and restart the synapse, which should then start a background task to +solution to fix it is to execute the SQL here +https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql +and then restart synapse. This should then start a background task to flush the current tables and regenerate the directory. diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index 142754a7dc63..023997ccdeb2 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -43,9 +43,9 @@ def default_config(self, config_dir_path, server_name, **kwargs): # # 'search_all_users' defines whether to search all users visible to your HS # when searching the user directory, rather than limiting to users visible - # in public rooms. Defaults to false. If you set it True, you'll have to run - # UPDATE user_directory_stream_pos SET stream_id = NULL; - # on your database to tell it to rebuild the user_directory search indexes. + # in public rooms. Defaults to false. If you set it True, you'll have to + # rebuild the user_directory search indexes, see + # https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md # #user_directory: # enabled: true From 878b00c39531d5200a3efab356766e4e2670e589 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 20:58:18 -0500 Subject: [PATCH 044/231] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5282.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5282.misc diff --git a/changelog.d/5282.misc b/changelog.d/5282.misc new file mode 100644 index 000000000000..350e15bc033e --- /dev/null +++ b/changelog.d/5282.misc @@ -0,0 +1 @@ +Fix docs on resetting the user directory. From f795595e956c4584ae280a59ca122057894e0c54 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 22:04:24 -0500 Subject: [PATCH 045/231] Specify the type of reCAPTCHA key to use (#5013) Signed-off-by: Aaron Raimist --- docs/CAPTCHA_SETUP.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/CAPTCHA_SETUP.rst b/docs/CAPTCHA_SETUP.rst index 19a204d9ce4d..0c22ee4ff612 100644 --- a/docs/CAPTCHA_SETUP.rst +++ b/docs/CAPTCHA_SETUP.rst @@ -7,6 +7,7 @@ Requires a public/private key pair from: https://developers.google.com/recaptcha/ +Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option Setting ReCaptcha Keys ---------------------- From 2ec28094606383a4e00fa6665b27a64bc00fd9fd Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 22:05:51 -0500 Subject: [PATCH 046/231] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5283.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5283.misc diff --git a/changelog.d/5283.misc b/changelog.d/5283.misc new file mode 100644 index 000000000000..002721e566ef --- /dev/null +++ b/changelog.d/5283.misc @@ -0,0 +1 @@ +Specify the type of reCAPTCHA key to use. From ecaa299cabe099449a1a05aef4ba3708c9d231cf Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 29 May 2019 16:32:30 +1000 Subject: [PATCH 047/231] Rename 5282.misc to 5282.doc --- changelog.d/{5282.misc => 5282.doc} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{5282.misc => 5282.doc} (100%) diff --git a/changelog.d/5282.misc b/changelog.d/5282.doc similarity index 100% rename from changelog.d/5282.misc rename to changelog.d/5282.doc From 0729ef01f80b8d6f2fcf1ab40a22587347b2c777 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 29 May 2019 16:41:25 +1000 Subject: [PATCH 048/231] regenerate sample config --- docs/sample_config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 421ae96f04f1..edfde05a2323 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1103,9 +1103,9 @@ password_config: # # 'search_all_users' defines whether to search all users visible to your HS # when searching the user directory, rather than limiting to users visible -# in public rooms. Defaults to false. If you set it True, you'll have to run -# UPDATE user_directory_stream_pos SET stream_id = NULL; -# on your database to tell it to rebuild the user_directory search indexes. +# in public rooms. Defaults to false. If you set it True, you'll have to +# rebuild the user_directory search indexes, see +# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md # #user_directory: # enabled: true From f76d407ef3d2f6c18a568eff965e12e794105a7a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 09:17:33 +0100 Subject: [PATCH 049/231] Fix dropped logcontexts during high outbound traffic. (#5277) Fixes #5271. --- changelog.d/5277.bugfix | 1 + synapse/app/_base.py | 20 +++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 changelog.d/5277.bugfix diff --git a/changelog.d/5277.bugfix b/changelog.d/5277.bugfix new file mode 100644 index 000000000000..371aa2e7fbbb --- /dev/null +++ b/changelog.d/5277.bugfix @@ -0,0 +1 @@ +Fix dropped logcontexts during high outbound traffic. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 08199a5e8df9..8cc990399f8f 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -344,15 +344,21 @@ def __init__(self, resolver, max_dns_requests_in_flight): def resolveHostName(self, resolutionReceiver, hostName, portNumber=0, addressTypes=None, transportSemantics='TCP'): - # Note this is happening deep within the reactor, so we don't need to - # worry about log contexts. - # We need this function to return `resolutionReceiver` so we do all the # actual logic involving deferreds in a separate function. - self._resolve( - resolutionReceiver, hostName, portNumber, - addressTypes, transportSemantics, - ) + + # even though this is happening within the depths of twisted, we need to drop + # our logcontext before starting _resolve, otherwise: (a) _resolve will drop + # the logcontext if it returns an incomplete deferred; (b) _resolve will + # call the resolutionReceiver *with* a logcontext, which it won't be expecting. + with PreserveLoggingContext(): + self._resolve( + resolutionReceiver, + hostName, + portNumber, + addressTypes, + transportSemantics, + ) return resolutionReceiver From 58c8ed5b0dbfe0556f11985a61c0e13bbe61d93c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:56:24 +0100 Subject: [PATCH 050/231] Correctly filter out extremities with soft failed prevs (#5274) When we receive a soft failed event we, correctly, *do not* update the forward extremity table with the event. However, if we later receive an event that references the soft failed event we then need to remove the soft failed events prev events from the forward extremities table, otherwise we just build up forward extremities. Fixes #5269 --- changelog.d/5274.bugfix | 1 + synapse/storage/events.py | 82 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5274.bugfix diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix new file mode 100644 index 000000000000..9e14d20289f8 --- /dev/null +++ b/changelog.d/5274.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2ffc27ff4188..6e9f3d1dc0b4 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -554,10 +554,18 @@ def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids): e_id for event in new_events for e_id in event.prev_event_ids() ) - # Finally, remove any events which are prev_events of any existing events. + # Remove any events which are prev_events of any existing events. existing_prevs = yield self._get_events_which_are_prevs(result) result.difference_update(existing_prevs) + # Finally handle the case where the new events have soft-failed prev + # events. If they do we need to remove them and their prev events, + # otherwise we end up with dangling extremities. + existing_prevs = yield self._get_prevs_before_rejected( + e_id for event in new_events for e_id in event.prev_event_ids() + ) + result.difference_update(existing_prevs) + defer.returnValue(result) @defer.inlineCallbacks @@ -573,7 +581,7 @@ def _get_events_which_are_prevs(self, event_ids): """ results = [] - def _get_events(txn, batch): + def _get_events_which_are_prevs_txn(txn, batch): sql = """ SELECT prev_event_id, internal_metadata FROM event_edges @@ -596,10 +604,78 @@ def _get_events(txn, batch): ) for chunk in batch_iter(event_ids, 100): - yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk) + yield self.runInteraction( + "_get_events_which_are_prevs", + _get_events_which_are_prevs_txn, + chunk, + ) defer.returnValue(results) + @defer.inlineCallbacks + def _get_prevs_before_rejected(self, event_ids): + """Get soft-failed ancestors to remove from the extremities. + + Given a set of events, find all those that have been soft-failed or + rejected. Returns those soft failed/rejected events and their prev + events (whether soft-failed/rejected or not), and recurses up the + prev-event graph until it finds no more soft-failed/rejected events. + + This is used to find extremities that are ancestors of new events, but + are separated by soft failed events. + + Args: + event_ids (Iterable[str]): Events to find prev events for. Note + that these must have already been persisted. + + Returns: + Deferred[set[str]] + """ + + # The set of event_ids to return. This includes all soft-failed events + # and their prev events. + existing_prevs = set() + + def _get_prevs_before_rejected_txn(txn, batch): + to_recursively_check = batch + + while to_recursively_check: + sql = """ + SELECT + event_id, prev_event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + LEFT JOIN rejections USING (event_id) + LEFT JOIN event_json USING (event_id) + WHERE + event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_recursively_check), + ) + + txn.execute(sql, to_recursively_check) + to_recursively_check = [] + + for event_id, prev_event_id, metadata, rejected in txn: + if prev_event_id in existing_prevs: + continue + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + to_recursively_check.append(prev_event_id) + existing_prevs.add(prev_event_id) + + for chunk in batch_iter(event_ids, 100): + yield self.runInteraction( + "_get_prevs_before_rejected", + _get_prevs_before_rejected_txn, + chunk, + ) + + defer.returnValue(existing_prevs) + @defer.inlineCallbacks def _get_new_state_after_events( self, room_id, events_context, old_latest_event_ids, new_latest_event_ids From 30858ff4617517916fc8973b16c6be6e13288bd0 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 29 May 2019 08:27:41 -0500 Subject: [PATCH 051/231] Fix error when downloading thumbnail with width/height param missing (#5258) Fix error when downloading thumbnail with width/height param missing Fixes #2748 Signed-off-by: Aaron Raimist --- changelog.d/5258.bugfix | 1 + synapse/rest/media/v1/thumbnail_resource.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5258.bugfix diff --git a/changelog.d/5258.bugfix b/changelog.d/5258.bugfix new file mode 100644 index 000000000000..fb5d44aedbf2 --- /dev/null +++ b/changelog.d/5258.bugfix @@ -0,0 +1 @@ +Fix error when downloading thumbnail with missing width/height parameter. diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 5305e9175f25..35a750923b6f 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -56,8 +56,8 @@ def render_GET(self, request): def _async_render_GET(self, request): set_cors_headers(request) server_name, media_id, _ = parse_media_id(request) - width = parse_integer(request, "width") - height = parse_integer(request, "height") + width = parse_integer(request, "width", required=True) + height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") m_type = parse_string(request, "type", "image/png") From d79c9994f416ee5dab27a277fa729ffa5ee74ccc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:52:41 +0100 Subject: [PATCH 052/231] Add DB bg update to cleanup extremities. Due to #5269 we may have extremities in our DB that we shouldn't have, so lets add a cleanup task such to remove those. --- synapse/storage/events.py | 186 ++++++++++++++++++ .../delta/54/delete_forward_extremities.sql | 19 ++ 2 files changed, 205 insertions(+) create mode 100644 synapse/storage/schema/delta/54/delete_forward_extremities.sql diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6e9f3d1dc0b4..a9be143bd5c7 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -221,6 +221,7 @@ class EventsStore( ): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) @@ -252,6 +253,11 @@ def __init__(self, db_conn, hs): psql_only=True, ) + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() @@ -2341,6 +2347,186 @@ def get_all_updated_current_state_deltas_txn(txn): get_all_updated_current_state_deltas_txn, ) + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their prev events, if any, and their prev events rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph.setdefault(event_id, set()).add(prev_event_id) + logger.info("Already handled") + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + logger.info("Deleting up to %d forward extremities", len(to_delete)) + + self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + if to_delete: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + for row in rows: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (row["room_id"],) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) + AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql new file mode 100644 index 000000000000..7056bd1d0019 --- /dev/null +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -0,0 +1,19 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('delete_soft_failed_extremities', '{}'); + +CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 7e8e683754cdc606a1440832d9b1eb47f930ddee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:58:32 +0100 Subject: [PATCH 053/231] Log actual number of entries deleted --- synapse/storage/_base.py | 12 +++++++++--- synapse/storage/events.py | 6 ++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fa6839cecade..3fe827cd4371 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1261,7 +1261,8 @@ def _simple_delete_txn(txn, table, keyvalues): " AND ".join("%s = ?" % (k,) for k in keyvalues), ) - return txn.execute(sql, list(keyvalues.values())) + txn.execute(sql, list(keyvalues.values())) + return txn.rowcount def _simple_delete_many(self, table, column, iterable, keyvalues, desc): return self.runInteraction( @@ -1280,9 +1281,12 @@ def _simple_delete_many_txn(txn, table, column, iterable, keyvalues): column : column name to test for inclusion against `iterable` iterable : list keyvalues : dict of column names and values to select the rows with + + Returns: + int: Number rows deleted """ if not iterable: - return + return 0 sql = "DELETE FROM %s" % table @@ -1297,7 +1301,9 @@ def _simple_delete_many_txn(txn, table, column, iterable, keyvalues): if clauses: sql = "%s WHERE %s" % (sql, " AND ".join(clauses)) - return txn.execute(sql, values) + txn.execute(sql, values) + + return txn.rowcount def _get_cache_dict( self, db_conn, table, entity_column, stream_column, max_value, limit=100000 diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9be143bd5c7..a9664928ca94 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2476,7 +2476,7 @@ def _cleanup_extremities_bg_update_txn(txn): logger.info("Deleting up to %d forward extremities", len(to_delete)) - self._simple_delete_many_txn( + deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", column="event_id", @@ -2484,7 +2484,9 @@ def _cleanup_extremities_bg_update_txn(txn): keyvalues={}, ) - if to_delete: + logger.info("Deleted %d forward extremities", deleted) + + if deleted: # We now need to invalidate the caches of these rooms rows = self._simple_select_many_txn( txn, From 532b825ed9dff2faec1360fa3ee3734e0d782bd3 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 30 May 2019 00:55:18 +1000 Subject: [PATCH 054/231] Serve CAS login over r0 (#5286) --- changelog.d/5286.feature | 1 + synapse/rest/client/v1/login.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5286.feature diff --git a/changelog.d/5286.feature b/changelog.d/5286.feature new file mode 100644 index 000000000000..81860279a32e --- /dev/null +++ b/changelog.d/5286.feature @@ -0,0 +1 @@ +CAS login will now hit the r0 API, not the deprecated v1 one. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 5180e9eaf169..029039c162fd 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -386,7 +386,7 @@ def on_GET(self, request): b"redirectUrl": args[b"redirectUrl"][0] }).encode('ascii') hs_redirect_url = (self.cas_service_url + - b"/_matrix/client/api/v1/login/cas/ticket") + b"/_matrix/client/r0/login/cas/ticket") service_param = urllib.parse.urlencode({ b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param) }).encode('ascii') @@ -395,7 +395,7 @@ def on_GET(self, request): class CasTicketServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login/cas/ticket", releases=()) + PATTERNS = client_path_patterns("/login/cas/ticket") def __init__(self, hs): super(CasTicketServlet, self).__init__(hs) From d7add713a8351024aec9a51c1744f78ac39f552e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 14:19:11 +0100 Subject: [PATCH 055/231] Add test --- tests/storage/test_cleanup_extrems.py | 248 ++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 tests/storage/test_cleanup_extrems.py diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py new file mode 100644 index 000000000000..6dda66ecd3b9 --- /dev/null +++ b/tests/storage/test_cleanup_extrems.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path + +from synapse.api.constants import EventTypes +from synapse.storage import prepare_database +from synapse.types import Requester, UserID + +from tests.unittest import HomeserverTestCase + + +class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): + """Test the background update to clean forward extremities table. + """ + + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + self.event_creator = homeserver.get_event_creation_handler() + self.room_creator = homeserver.get_room_creation_handler() + + # Create a test user and room + self.user = UserID("alice", "test") + self.requester = Requester(self.user, None, False, None, None) + info = self.get_success(self.room_creator.create_room(self.requester, {})) + self.room_id = info["room_id"] + + def create_and_send_event(self, soft_failed=False, prev_event_ids=None): + """Create and send an event. + + Args: + soft_failed (bool): Whether to create a soft failed event or not + prev_event_ids (list[str]|None): Explicitly set the prev events, + or if None just use the default + + Returns: + str: The new event's ID. + """ + prev_events_and_hashes = None + if prev_event_ids: + prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] + + event, context = self.get_success( + self.event_creator.create_event( + self.requester, + { + "type": EventTypes.Message, + "room_id": self.room_id, + "sender": self.user.to_string(), + "content": {"body": "", "msgtype": "m.text"}, + }, + prev_events_and_hashes=prev_events_and_hashes, + ) + ) + + if soft_failed: + event.internal_metadata.soft_failed = True + + self.get_success( + self.event_creator.send_nonmember_event(self.requester, event, context) + ) + + return event.event_id + + def add_extremity(self, event_id): + """Add the given event as an extremity to the room. + """ + self.get_success( + self.store._simple_insert( + table="event_forward_extremities", + values={"room_id": self.room_id, "event_id": event_id}, + desc="test_add_extremity", + ) + ) + + self.store.get_latest_event_ids_in_room.invalidate((self.room_id,)) + + def run_background_update(self): + """Re run the background update to clean up the extremities. + """ + # Make sure we don't clash with in progress updates. + self.assertTrue(self.store._all_done, "Background updates are still ongoing") + + schema_path = os.path.join( + prepare_database.dir_path, + "schema", + "delta", + "54", + "delete_forward_extremities.sql", + ) + + def run_delta_file(txn): + prepare_database.executescript(txn, schema_path) + + self.get_success( + self.store.runInteraction("test_delete_forward_extremities", run_delta_file) + ) + + # Ugh, have to reset this flag + self.store._all_done = False + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + def test_soft_failed_extremities_handled_correctly(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed. + """ + + # Create the room graph + event_id_1 = self.create_and_send_event() + event_id_2 = self.create_and_send_event(True, [event_id_1]) + event_id_3 = self.create_and_send_event(True, [event_id_2]) + event_id_4 = self.create_and_send_event(False, [event_id_3]) + + # Check the latest events are as expected + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + + self.assertEqual(latest_event_ids, [event_id_4]) + + def test_basic_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_b = self.create_and_send_event(False, [event_id_sf1]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_chain_of_fail_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_sf1]) + event_id_b = self.create_and_send_event(False, [event_id_sf2]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_forked_graph_cleanup(self): + r"""Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like, where time flows down the page: + + A B + / \ / + / \ / + SF1 SF2 + | | + SF3 | + / \ | + | \ | + C SF4 + + Where SF* are soft failed, and with them A, B and C marked as + extremities. This should resolve to B and C being marked as extremity. + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_b = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_a, event_id_b]) + event_id_sf3 = self.create_and_send_event(True, [event_id_sf1]) + self.create_and_send_event(True, [event_id_sf2, event_id_sf3]) # SF4 + event_id_c = self.create_and_send_event(False, [event_id_sf3]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual( + set(latest_event_ids), set((event_id_a, event_id_b, event_id_c)) + ) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set([event_id_b, event_id_c])) From 67e0631f8f8bfc2843d2c06ebf20fe2226810686 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:56:02 +0100 Subject: [PATCH 056/231] Newsfile --- changelog.d/5278.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5278.bugfix diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix new file mode 100644 index 000000000000..9e14d20289f8 --- /dev/null +++ b/changelog.d/5278.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 46c8f7a5170d04dfa6ad02c69667d4aa48635231 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 30 May 2019 01:47:16 +1000 Subject: [PATCH 057/231] Implement the SHHS complexity API (#5216) --- changelog.d/5216.misc | 1 + synapse/api/urls.py | 1 + synapse/federation/transport/server.py | 31 ++++++++- synapse/rest/admin/__init__.py | 12 +++- synapse/storage/events_worker.py | 50 +++++++++++++- tests/federation/test_complexity.py | 90 ++++++++++++++++++++++++++ 6 files changed, 180 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5216.misc create mode 100644 tests/federation/test_complexity.py diff --git a/changelog.d/5216.misc b/changelog.d/5216.misc new file mode 100644 index 000000000000..dbfa29475f0a --- /dev/null +++ b/changelog.d/5216.misc @@ -0,0 +1 @@ +Synapse will now serve the experimental "room complexity" API endpoint. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 3c6bddff7a9d..e16c386a14d2 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -26,6 +26,7 @@ FEDERATION_PREFIX = "/_matrix/federation" FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1" FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2" +FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable" STATIC_PREFIX = "/_matrix/static" WEB_CLIENT_PREFIX = "/_matrix/client" CONTENT_REPO_PREFIX = "/_matrix/content" diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 385eda2dca91..d0efc4e0d325 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -23,7 +23,11 @@ import synapse from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.room_versions import RoomVersions -from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX +from synapse.api.urls import ( + FEDERATION_UNSTABLE_PREFIX, + FEDERATION_V1_PREFIX, + FEDERATION_V2_PREFIX, +) from synapse.http.endpoint import parse_and_validate_server_name from synapse.http.server import JsonResource from synapse.http.servlet import ( @@ -1304,6 +1308,30 @@ def on_PUT(self, origin, content, query, group_id): defer.returnValue((200, new_content)) +class RoomComplexityServlet(BaseFederationServlet): + """ + Indicates to other servers how complex (and therefore likely + resource-intensive) a public room this server knows about is. + """ + PATH = "/rooms/(?P[^/]*)/complexity" + PREFIX = FEDERATION_UNSTABLE_PREFIX + + @defer.inlineCallbacks + def on_GET(self, origin, content, query, room_id): + + store = self.handler.hs.get_datastore() + + is_public = yield store.is_room_world_readable_or_publicly_joinable( + room_id + ) + + if not is_public: + raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM) + + complexity = yield store.get_room_complexity(room_id) + defer.returnValue((200, complexity)) + + FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationEventServlet, @@ -1327,6 +1355,7 @@ def on_PUT(self, origin, content, query, group_id): FederationThirdPartyInviteExchangeServlet, On3pidBindServlet, FederationVersionServlet, + RoomComplexityServlet, ) OPENID_SERVLET_CLASSES = ( diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 744d85594fb5..d6c4dcdb1816 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -822,10 +822,16 @@ class AdminRestResource(JsonResource): def __init__(self, hs): JsonResource.__init__(self, hs, canonical_json=False) + register_servlets(hs, self) - register_servlets_for_client_rest_resource(hs, self) - SendServerNoticeServlet(hs).register(self) - VersionServlet(hs).register(self) + +def register_servlets(hs, http_server): + """ + Register all the admin servlets. + """ + register_servlets_for_client_rest_resource(hs, http_server) + SendServerNoticeServlet(hs).register(http_server) + VersionServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 21b353cad3fe..b56c83e4603f 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import division + import itertools import logging from collections import namedtuple @@ -614,7 +616,7 @@ def f(txn): def _get_total_state_event_counts_txn(self, txn, room_id): """ - See get_state_event_counts. + See get_total_state_event_counts. """ sql = "SELECT COUNT(*) FROM state_events WHERE room_id=?" txn.execute(sql, (room_id,)) @@ -635,3 +637,49 @@ def get_total_state_event_counts(self, room_id): "get_total_state_event_counts", self._get_total_state_event_counts_txn, room_id ) + + def _get_current_state_event_counts_txn(self, txn, room_id): + """ + See get_current_state_event_counts. + """ + sql = "SELECT COUNT(*) FROM current_state_events WHERE room_id=?" + txn.execute(sql, (room_id,)) + row = txn.fetchone() + return row[0] if row else 0 + + def get_current_state_event_counts(self, room_id): + """ + Gets the current number of state events in a room. + + Args: + room_id (str) + + Returns: + Deferred[int] + """ + return self.runInteraction( + "get_current_state_event_counts", + self._get_current_state_event_counts_txn, room_id + ) + + @defer.inlineCallbacks + def get_room_complexity(self, room_id): + """ + Get a rough approximation of the complexity of the room. This is used by + remote servers to decide whether they wish to join the room or not. + Higher complexity value indicates that being in the room will consume + more resources. + + Args: + room_id (str) + + Returns: + Deferred[dict[str:int]] of complexity version to complexity. + """ + state_events = yield self.get_current_state_event_counts(room_id) + + # Call this one "v1", so we can introduce new ones as we want to develop + # it. + complexity_v1 = round(state_events / 500, 2) + + defer.returnValue({"v1": complexity_v1}) diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py new file mode 100644 index 000000000000..1e3e5aec6643 --- /dev/null +++ b/tests/federation/test_complexity.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 Matrix.org Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.config.ratelimiting import FederationRateLimitConfig +from synapse.federation.transport import server +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.util.ratelimitutils import FederationRateLimiter + +from tests import unittest + + +class RoomComplexityTests(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def default_config(self, name='test'): + config = super(RoomComplexityTests, self).default_config(name=name) + config["limit_large_remote_room_joins"] = True + config["limit_large_remote_room_complexity"] = 0.05 + return config + + def prepare(self, reactor, clock, homeserver): + class Authenticator(object): + def authenticate_request(self, request, content): + return defer.succeed("otherserver.nottld") + + ratelimiter = FederationRateLimiter( + clock, + FederationRateLimitConfig( + window_size=1, + sleep_limit=1, + sleep_msec=1, + reject_limit=1000, + concurrent_requests=1000, + ), + ) + server.register_servlets( + homeserver, self.resource, Authenticator(), ratelimiter + ) + + def test_complexity_simple(self): + + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + self.helper.send_state( + room_1, event_type="m.room.topic", body={"topic": "foo"}, tok=u1_token + ) + + # Get the room complexity + request, channel = self.make_request( + "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) + ) + self.render(request) + self.assertEquals(200, channel.code) + complexity = channel.json_body["v1"] + self.assertTrue(complexity > 0, complexity) + + # Artificially raise the complexity + store = self.hs.get_datastore() + store.get_current_state_event_counts = lambda x: defer.succeed(500 * 1.23) + + # Get the room complexity again -- make sure it's our artificial value + request, channel = self.make_request( + "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) + ) + self.render(request) + self.assertEquals(200, channel.code) + complexity = channel.json_body["v1"] + self.assertEqual(complexity, 1.23) From 3dcf2feba8ee38c43c63c0f321fd379f843a5929 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 19:27:50 +0100 Subject: [PATCH 058/231] Improve logging for logcontext leaks. (#5288) --- changelog.d/5288.misc | 1 + synapse/util/logcontext.py | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) create mode 100644 changelog.d/5288.misc diff --git a/changelog.d/5288.misc b/changelog.d/5288.misc new file mode 100644 index 000000000000..fbf049ba6aad --- /dev/null +++ b/changelog.d/5288.misc @@ -0,0 +1 @@ +Improve logging for logcontext leaks. diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 311b49e18a35..fe412355d858 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -226,6 +226,8 @@ def __init__(self, name=None, parent_context=None, request=None): self.request = request def __str__(self): + if self.request: + return str(self.request) return "%s@%x" % (self.name, id(self)) @classmethod @@ -274,12 +276,10 @@ def __exit__(self, type, value, traceback): current = self.set_current_context(self.previous_context) if current is not self: if current is self.sentinel: - logger.warn("Expected logging context %s has been lost", self) + logger.warning("Expected logging context %s was lost", self) else: - logger.warn( - "Current logging context %s is not expected context %s", - current, - self + logger.warning( + "Expected logging context %s but found %s", self, current ) self.previous_context = None self.alive = False @@ -433,10 +433,14 @@ def __exit__(self, type, value, traceback): context = LoggingContext.set_current_context(self.current_context) if context != self.new_context: - logger.warn( - "Unexpected logging context: %s is not %s", - context, self.new_context, - ) + if context is LoggingContext.sentinel: + logger.warning("Expected logging context %s was lost", self.new_context) + else: + logger.warning( + "Expected logging context %s but found %s", + self.new_context, + context, + ) if self.current_context is not LoggingContext.sentinel: if not self.current_context.alive: From 8d92329214f92b0e4e4f2d6fa21e1197a691ba5b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 19:31:52 +0100 Subject: [PATCH 059/231] Remove spurious debug from MatrixFederationHttpClient.get_json (#5287) This is just unhelpful spam --- changelog.d/5287.misc | 1 + synapse/http/matrixfederationclient.py | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/5287.misc diff --git a/changelog.d/5287.misc b/changelog.d/5287.misc new file mode 100644 index 000000000000..1286f1dd08db --- /dev/null +++ b/changelog.d/5287.misc @@ -0,0 +1 @@ +Remove spurious debug from MatrixFederationHttpClient.get_json. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 7eefc7b1fc23..8197619a7806 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -711,10 +711,6 @@ def get_json(self, destination, path, args=None, retry_on_dns_fail=True, RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ - logger.debug("get_json args: %s", args) - - logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) - request = MatrixFederationRequest( method="GET", destination=destination, From 123918b73938bdba89e6e0ce66482444590f2b4e Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 29 May 2019 14:44:25 -0500 Subject: [PATCH 060/231] Lint Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 2cf373e83cf2..ba20e75033af 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -17,8 +17,6 @@ from twisted.internet import defer -from synapse.api.errors import AuthError - from .base import ClientV1RestServlet, client_path_patterns logger = logging.getLogger(__name__) From 640fcbb07f8dc7d89465734f009d8e0a458c2b17 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 10:55:55 +0100 Subject: [PATCH 061/231] Fixup comments and logging --- synapse/storage/events.py | 21 +++++++++++-------- .../delta/54/delete_forward_extremities.sql | 3 +++ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9664928ca94..418d88b8dca8 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2387,7 +2387,8 @@ def _cleanup_extremities_bg_update_txn(txn): soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their prev events, if any, and their prev events rejection status. + # their successor events, if any, and their successor events + # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, rejections.event_id IS NOT NULL, events.outlier @@ -2450,11 +2451,10 @@ def _cleanup_extremities_bg_update_txn(txn): if event_id in graph: # Already handled this event previously, but we still # want to record the edge. - graph.setdefault(event_id, set()).add(prev_event_id) - logger.info("Already handled") + graph[event_id].add(prev_event_id) continue - graph.setdefault(event_id, set()).add(prev_event_id) + graph[event_id] = {prev_event_id} soft_failed = json.loads(metadata).get("soft_failed") if soft_failed or rejected: @@ -2474,8 +2474,6 @@ def _cleanup_extremities_bg_update_txn(txn): to_delete.intersection_update(original_set) - logger.info("Deleting up to %d forward extremities", len(to_delete)) - deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", @@ -2484,7 +2482,11 @@ def _cleanup_extremities_bg_update_txn(txn): keyvalues={}, ) - logger.info("Deleted %d forward extremities", deleted) + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) if deleted: # We now need to invalidate the caches of these rooms @@ -2496,10 +2498,11 @@ def _cleanup_extremities_bg_update_txn(txn): keyvalues={}, retcols=("room_id",) ) - for row in rows: + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: txn.call_after( self.get_latest_event_ids_in_room.invalidate, - (row["room_id"],) + (room_id,) ) self._simple_delete_many_txn( diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index 7056bd1d0019..aa40f13da7b0 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -13,7 +13,10 @@ * limitations under the License. */ +-- Start a background job to cleanup extremities that were incorrectly added +-- by bug #5269. INSERT INTO background_updates (update_name, progress_json) VALUES ('delete_soft_failed_extremities', '{}'); +DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 5c1ece0ffcd803eb4bf8e5748d3e2633426e00a0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:22:59 +0100 Subject: [PATCH 062/231] Move event background updates to a separate file --- synapse/storage/__init__.py | 2 + synapse/storage/events.py | 371 +------------------------ synapse/storage/events_bg_updates.py | 401 +++++++++++++++++++++++++++ 3 files changed, 405 insertions(+), 369 deletions(-) create mode 100644 synapse/storage/events_bg_updates.py diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 66675d08aee5..71316f7d093e 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -36,6 +36,7 @@ from .event_federation import EventFederationStore from .event_push_actions import EventPushActionsStore from .events import EventsStore +from .events_bg_updates import EventsBackgroundUpdatesStore from .filtering import FilteringStore from .group_server import GroupServerStore from .keys import KeyStore @@ -66,6 +67,7 @@ class DataStore( + EventsBackgroundUpdatesStore, RoomMemberStore, RoomStore, RegistrationStore, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 418d88b8dca8..f9162be9b90a 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018-2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -219,47 +220,11 @@ class EventsStore( EventsWorkerStore, BackgroundUpdateStore, ): - EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" - EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) - self.register_background_update_handler( - self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts - ) - self.register_background_update_handler( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, - self._background_reindex_fields_sender, - ) - - self.register_background_index_update( - "event_contains_url_index", - index_name="event_contains_url_index", - table="events", - columns=["room_id", "topological_ordering", "stream_ordering"], - where_clause="contains_url = true AND outlier = false", - ) - - # an event_id index on event_search is useful for the purge_history - # api. Plus it means we get to enforce some integrity with a UNIQUE - # clause - self.register_background_index_update( - "event_search_event_id_idx", - index_name="event_search_event_id_idx", - table="event_search", - columns=["event_id"], - unique=True, - psql_only=True, - ) - - self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, - self._cleanup_extremities_bg_update, - ) self._event_persist_queue = _EventPeristenceQueue() - self._state_resolution_handler = hs.get_state_resolution_handler() @defer.inlineCallbacks @@ -1585,153 +1550,6 @@ def _count(txn): ret = yield self.runInteraction("count_daily_active_rooms", _count) defer.returnValue(ret) - @defer.inlineCallbacks - def _background_reindex_fields_sender(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_txn(txn): - sql = ( - "SELECT stream_ordering, event_id, json FROM events" - " INNER JOIN event_json USING (event_id)" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - - update_rows = [] - for row in rows: - try: - event_id = row[1] - event_json = json.loads(row[2]) - sender = event_json["sender"] - content = event_json["content"] - - contains_url = "url" in content - if contains_url: - contains_url &= isinstance(content["url"], text_type) - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - update_rows.append((sender, contains_url, event_id)) - - sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" - - for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): - clump = update_rows[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows), - } - - self._background_update_progress_txn( - txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress - ) - - return len(rows) - - result = yield self.runInteraction( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) - - defer.returnValue(result) - - @defer.inlineCallbacks - def _background_reindex_origin_server_ts(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_search_txn(txn): - sql = ( - "SELECT stream_ordering, event_id FROM events" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - event_ids = [row[1] for row in rows] - - rows_to_update = [] - - chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] - for chunk in chunks: - ev_rows = self._simple_select_many_txn( - txn, - table="event_json", - column="event_id", - iterable=chunk, - retcols=["event_id", "json"], - keyvalues={}, - ) - - for row in ev_rows: - event_id = row["event_id"] - event_json = json.loads(row["json"]) - try: - origin_server_ts = event_json["origin_server_ts"] - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - rows_to_update.append((origin_server_ts, event_id)) - - sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" - - for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): - clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows_to_update), - } - - self._background_update_progress_txn( - txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress - ) - - return len(rows_to_update) - - result = yield self.runInteraction( - self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) - - defer.returnValue(result) - def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" return -self._backfill_id_gen.get_current_token() @@ -2347,191 +2165,6 @@ def get_all_updated_current_state_deltas_txn(txn): get_all_updated_current_state_deltas_txn, ) - @defer.inlineCallbacks - def _cleanup_extremities_bg_update(self, progress, batch_size): - """Background update to clean out extremities that should have been - deleted previously. - - Mainly used to deal with the aftermath of #5269. - """ - - # This works by first copying all existing forward extremities into the - # `_extremities_to_check` table at start up, and then checking each - # event in that table whether we have any descendants that are not - # soft-failed/rejected. If that is the case then we delete that event - # from the forward extremities table. - # - # For efficiency, we do this in batches by recursively pulling out all - # descendants of a batch until we find the non soft-failed/rejected - # events, i.e. the set of descendants whose chain of prev events back - # to the batch of extremities are all soft-failed or rejected. - # Typically, we won't find any such events as extremities will rarely - # have any descendants, but if they do then we should delete those - # extremities. - - def _cleanup_extremities_bg_update_txn(txn): - # The set of extremity event IDs that we're checking this round - original_set = set() - - # A dict[str, set[str]] of event ID to their prev events. - graph = {} - - # The set of descendants of the original set that are not rejected - # nor soft-failed. Ancestors of these events should be removed - # from the forward extremities table. - non_rejected_leaves = set() - - # Set of event IDs that have been soft failed, and for which we - # should check if they have descendants which haven't been soft - # failed. - soft_failed_events_to_lookup = set() - - # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events - # rejection status. - txn.execute( - """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL, events.outlier - FROM ( - SELECT event_id AS prev_event_id - FROM _extremities_to_check - LIMIT ? - ) AS f - LEFT JOIN event_edges USING (prev_event_id) - LEFT JOIN events USING (event_id) - LEFT JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - """, (batch_size,) - ) - - for prev_event_id, event_id, metadata, rejected, outlier in txn: - original_set.add(prev_event_id) - - if not event_id or outlier: - # Common case where the forward extremity doesn't have any - # descendants. - continue - - graph.setdefault(event_id, set()).add(prev_event_id) - - soft_failed = False - if metadata: - soft_failed = json.loads(metadata).get("soft_failed") - - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # Now we recursively check all the soft-failed descendants we - # found above in the same way, until we have nothing left to - # check. - while soft_failed_events_to_lookup: - # We only want to do 100 at a time, so we split given list - # into two. - batch = list(soft_failed_events_to_lookup) - to_check, to_defer = batch[:100], batch[100:] - soft_failed_events_to_lookup = set(to_defer) - - sql = """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL - FROM event_edges - INNER JOIN events USING (event_id) - INNER JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - WHERE - prev_event_id IN (%s) - AND NOT events.outlier - """ % ( - ",".join("?" for _ in to_check), - ) - txn.execute(sql, to_check) - - for prev_event_id, event_id, metadata, rejected in txn: - if event_id in graph: - # Already handled this event previously, but we still - # want to record the edge. - graph[event_id].add(prev_event_id) - continue - - graph[event_id] = {prev_event_id} - - soft_failed = json.loads(metadata).get("soft_failed") - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # We have a set of non-soft-failed descendants, so we recurse up - # the graph to find all ancestors and add them to the set of event - # IDs that we can delete from forward extremities table. - to_delete = set() - while non_rejected_leaves: - event_id = non_rejected_leaves.pop() - prev_event_ids = graph.get(event_id, set()) - non_rejected_leaves.update(prev_event_ids) - to_delete.update(prev_event_ids) - - to_delete.intersection_update(original_set) - - deleted = self._simple_delete_many_txn( - txn=txn, - table="event_forward_extremities", - column="event_id", - iterable=to_delete, - keyvalues={}, - ) - - logger.info( - "Deleted %d forward extremities of %d checked, to clean up #5269", - deleted, - len(original_set), - ) - - if deleted: - # We now need to invalidate the caches of these rooms - rows = self._simple_select_many_txn( - txn, - table="events", - column="event_id", - iterable=to_delete, - keyvalues={}, - retcols=("room_id",) - ) - room_ids = set(row["room_id"] for row in rows) - for room_id in room_ids: - txn.call_after( - self.get_latest_event_ids_in_room.invalidate, - (room_id,) - ) - - self._simple_delete_many_txn( - txn=txn, - table="_extremities_to_check", - column="event_id", - iterable=original_set, - keyvalues={}, - ) - - return len(original_set) - - num_handled = yield self.runInteraction( - "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, - ) - - if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) - - def _drop_table_txn(txn): - txn.execute("DROP TABLE _extremities_to_check") - - yield self.runInteraction( - "_cleanup_extremities_bg_update_drop_table", - _drop_table_txn, - ) - - defer.returnValue(num_handled) - AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py new file mode 100644 index 000000000000..2eba106abfa1 --- /dev/null +++ b/synapse/storage/events_bg_updates.py @@ -0,0 +1,401 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from six import text_type + +from canonicaljson import json + +from twisted.internet import defer + +from synapse.storage.background_updates import BackgroundUpdateStore + +logger = logging.getLogger(__name__) + + +class EventsBackgroundUpdatesStore(BackgroundUpdateStore): + + EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" + EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + + def __init__(self, db_conn, hs): + super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) + + self.register_background_update_handler( + self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts + ) + self.register_background_update_handler( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, + self._background_reindex_fields_sender, + ) + + self.register_background_index_update( + "event_contains_url_index", + index_name="event_contains_url_index", + table="events", + columns=["room_id", "topological_ordering", "stream_ordering"], + where_clause="contains_url = true AND outlier = false", + ) + + # an event_id index on event_search is useful for the purge_history + # api. Plus it means we get to enforce some integrity with a UNIQUE + # clause + self.register_background_index_update( + "event_search_event_id_idx", + index_name="event_search_event_id_idx", + table="event_search", + columns=["event_id"], + unique=True, + psql_only=True, + ) + + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + + @defer.inlineCallbacks + def _background_reindex_fields_sender(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_txn(txn): + sql = ( + "SELECT stream_ordering, event_id, json FROM events" + " INNER JOIN event_json USING (event_id)" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + + update_rows = [] + for row in rows: + try: + event_id = row[1] + event_json = json.loads(row[2]) + sender = event_json["sender"] + content = event_json["content"] + + contains_url = "url" in content + if contains_url: + contains_url &= isinstance(content["url"], text_type) + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + update_rows.append((sender, contains_url, event_id)) + + sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" + + for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): + clump = update_rows[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows), + } + + self._background_update_progress_txn( + txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress + ) + + return len(rows) + + result = yield self.runInteraction( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _background_reindex_origin_server_ts(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_search_txn(txn): + sql = ( + "SELECT stream_ordering, event_id FROM events" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + event_ids = [row[1] for row in rows] + + rows_to_update = [] + + chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] + for chunk in chunks: + ev_rows = self._simple_select_many_txn( + txn, + table="event_json", + column="event_id", + iterable=chunk, + retcols=["event_id", "json"], + keyvalues={}, + ) + + for row in ev_rows: + event_id = row["event_id"] + event_json = json.loads(row["json"]) + try: + origin_server_ts = event_json["origin_server_ts"] + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + rows_to_update.append((origin_server_ts, event_id)) + + sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" + + for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): + clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows_to_update), + } + + self._background_update_progress_txn( + txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress + ) + + return len(rows_to_update) + + result = yield self.runInteraction( + self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their successor events, if any, and their successor events + # rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph[event_id].add(prev_event_id) + continue + + graph[event_id] = {prev_event_id} + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + deleted = self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) + + if deleted: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (room_id,) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) From 468bd090ff354f27597e08b54a969f22afbfad43 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:24:42 +0100 Subject: [PATCH 063/231] Rename constant --- synapse/storage/events_bg_updates.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 2eba106abfa1..22aac1393dbd 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -30,7 +30,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) @@ -64,7 +64,7 @@ def __init__(self, db_conn, hs): ) self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update, ) @@ -388,7 +388,7 @@ def _cleanup_extremities_bg_update_txn(txn): ) if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + yield self._end_background_update(self.DELETE_SOFT_FAILED_EXTREMITIES) def _drop_table_txn(txn): txn.execute("DROP TABLE _extremities_to_check") From cb967e2346096d7e647c757e3b57093549746f14 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:06:42 +0100 Subject: [PATCH 064/231] Update synapse/storage/events_bg_updates.py Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 22aac1393dbd..75c1935bf34a 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -255,7 +255,7 @@ def _cleanup_extremities_bg_update_txn(txn): soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events + # their successor events, if any, and the successor events' # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, From 6cdfb0207e2de72286a7a8d3b3c417c2808e90dc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:54:56 +0100 Subject: [PATCH 065/231] Add index to temp table --- synapse/storage/schema/delta/54/delete_forward_extremities.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index aa40f13da7b0..b062ec840ce9 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -20,3 +20,4 @@ INSERT INTO background_updates (update_name, progress_json) VALUES DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; +CREATE INDEX _extremities_to_check_id ON _extremities_to_check(event_id); From 06675db684f06b5a369846aac896216bf4cc74ed Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:05:26 +0100 Subject: [PATCH 066/231] Newsfile --- changelog.d/5291.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5291.bugfix diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix new file mode 100644 index 000000000000..9e14d20289f8 --- /dev/null +++ b/changelog.d/5291.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 54d50fbfdf8c39d92c36291a572419cee6b9b916 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:15:13 +0100 Subject: [PATCH 067/231] Get events all at once --- synapse/storage/stats.py | 59 ++++++++++++++++++---------------------- 1 file changed, 26 insertions(+), 33 deletions(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index eb0ced5b5e46..99b4af555509 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -179,46 +179,39 @@ def _get_next_batch(txn): current_state_ids = yield self.get_current_state_ids(room_id) - join_rules = yield self.get_event( - current_state_ids.get((EventTypes.JoinRules, "")), allow_none=True + join_rules_id = current_state_ids.get((EventTypes.JoinRules, "")) + history_visibility_id = current_state_ids.get( + (EventTypes.RoomHistoryVisibility, "") ) - history_visibility = yield self.get_event( - current_state_ids.get((EventTypes.RoomHistoryVisibility, "")), - allow_none=True, - ) - encryption = yield self.get_event( - current_state_ids.get((EventTypes.RoomEncryption, "")), allow_none=True - ) - name = yield self.get_event( - current_state_ids.get((EventTypes.Name, "")), allow_none=True - ) - topic = yield self.get_event( - current_state_ids.get((EventTypes.Topic, "")), allow_none=True - ) - avatar = yield self.get_event( - current_state_ids.get((EventTypes.RoomAvatar, "")), allow_none=True - ) - canonical_alias = yield self.get_event( - current_state_ids.get((EventTypes.CanonicalAlias, "")), allow_none=True - ) - - def _or_none(x, arg): - if x: - return x.content.get(arg) + encryption_id = current_state_ids.get((EventTypes.RoomEncryption, "")) + name_id = current_state_ids.get((EventTypes.Name, "")) + topic_id = current_state_ids.get((EventTypes.Topic, "")) + avatar_id = current_state_ids.get((EventTypes.RoomAvatar, "")) + canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, "")) + + state_events = yield self.get_events([ + join_rules_id, history_visibility_id, encryption_id, name_id, + topic_id, avatar_id, canonical_alias_id, + ]) + + def _get_or_none(event_id, arg): + event = state_events.get(event_id) + if event: + return event.content.get(arg) return None yield self.update_room_state( room_id, { - "join_rules": _or_none(join_rules, "join_rule"), - "history_visibility": _or_none( - history_visibility, "history_visibility" + "join_rules": _get_or_none(join_rules_id, "join_rule"), + "history_visibility": _get_or_none( + history_visibility_id, "history_visibility" ), - "encryption": _or_none(encryption, "algorithm"), - "name": _or_none(name, "name"), - "topic": _or_none(topic, "topic"), - "avatar": _or_none(avatar, "url"), - "canonical_alias": _or_none(canonical_alias, "alias"), + "encryption": _get_or_none(encryption_id, "algorithm"), + "name": _get_or_none(name_id, "name"), + "topic": _get_or_none(topic_id, "topic"), + "avatar": _get_or_none(avatar_id, "url"), + "canonical_alias": _get_or_none(canonical_alias_id, "alias"), }, ) From 04710cc2d71127b1f416e87f7a4aea3ce6d93410 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:22:32 +0100 Subject: [PATCH 068/231] Fetch membership counts all at once --- synapse/storage/roommember.py | 33 +++++++++++---------------------- synapse/storage/stats.py | 23 +++++++---------------- 2 files changed, 18 insertions(+), 38 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 4bd166945827..761791332674 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -142,26 +142,9 @@ def _get_room_summary_txn(txn): return self.runInteraction("get_room_summary", _get_room_summary_txn) - def _get_user_count_in_room_txn(self, txn, room_id, membership): + def _get_user_counts_in_room_txn(self, txn, room_id): """ - See get_user_count_in_room. - """ - sql = ( - "SELECT count(*) FROM room_memberships as m" - " INNER JOIN current_state_events as c" - " ON m.event_id = c.event_id " - " AND m.room_id = c.room_id " - " AND m.user_id = c.state_key" - " WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?" - ) - - txn.execute(sql, (room_id, membership)) - row = txn.fetchone() - return row[0] - - def get_user_count_in_room(self, room_id, membership): - """ - Get the user count in a room with a particular membership. + Get the user count in a room by membership. Args: room_id (str) @@ -170,9 +153,15 @@ def get_user_count_in_room(self, room_id, membership): Returns: Deferred[int] """ - return self.runInteraction( - "get_users_in_room", self._get_user_count_in_room_txn, room_id, membership - ) + sql = """ + SELECT m.membership, count(*) FROM room_memberships as m + INNER JOIN current_state_events as c USING(event_id) + WHERE c.type = 'm.room.member' AND c.room_id = ? + GROUP BY m.membership + """ + + txn.execute(sql, (room_id,)) + return {row[0]: row[1] for row in txn} @cached() def get_invited_rooms_for_user(self, user_id): diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 99b4af555509..727f60b3bd65 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -226,18 +226,9 @@ def _fetch_data(txn): current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn) current_state_events = len(current_state_ids) - joined_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.JOIN - ) - invited_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.INVITE - ) - left_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.LEAVE - ) - banned_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.BAN - ) + + membership_counts = self._get_user_counts_in_room_txn(txn, room_id) + total_state_events = self._get_total_state_event_counts_txn( txn, room_id ) @@ -250,10 +241,10 @@ def _fetch_data(txn): { "bucket_size": self.stats_bucket_size, "current_state_events": current_state_events, - "joined_members": joined_members, - "invited_members": invited_members, - "left_members": left_members, - "banned_members": banned_members, + "joined_members": membership_counts.get(Membership.JOIN, 0), + "invited_members": membership_counts.get(Membership.INVITE, 0), + "left_members": membership_counts.get(Membership.LEAVE, 0), + "banned_members": membership_counts.get(Membership.BAN, 0), "state_events": total_state_events, }, ) From e2c46ed851599dc08cc8a822e07c0d4f9a050ee2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:26:38 +0100 Subject: [PATCH 069/231] Move deletion from table inside txn --- synapse/storage/stats.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 727f60b3bd65..a99637d4b40b 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -254,10 +254,13 @@ def _fetch_data(txn): {"room_id": room_id, "token": current_token}, ) + # We've finished a room. Delete it from the table. + self._simple_delete_one_txn( + txn, TEMP_TABLE + "_rooms", {"room_id": room_id}, + ) + yield self.runInteraction("update_room_stats", _fetch_data) - # We've finished a room. Delete it from the table. - yield self._simple_delete_one(TEMP_TABLE + "_rooms", {"room_id": room_id}) # Update the remaining counter. progress["remaining"] -= 1 yield self.runInteraction( From 5ac75fc9a2d80ddf2974d281c381f82515606403 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:26:55 +0100 Subject: [PATCH 070/231] Join against events to use its room_id index --- synapse/storage/events_worker.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index b56c83e4603f..17824280485a 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -618,7 +618,12 @@ def _get_total_state_event_counts_txn(self, txn, room_id): """ See get_total_state_event_counts. """ - sql = "SELECT COUNT(*) FROM state_events WHERE room_id=?" + # We join against the events table as that has an index on room_id + sql = """ + SELECT COUNT(*) FROM state_events + INNER JOIN events USING (room_id, event_id) + WHERE room_id=? + """ txn.execute(sql, (room_id,)) row = txn.fetchone() return row[0] if row else 0 From 8824325b829baa5262242a50d0ea2c9b738feb78 Mon Sep 17 00:00:00 2001 From: Eisha Chen-yen-su Date: Thu, 30 May 2019 16:58:53 +0200 Subject: [PATCH 071/231] Fix ignored filter field in `/messages` endpoint This fixes a bug which were causing the "event_format" field to be ignored in the filter of requests to the `/messages` endpoint of the CS API. Signed-off-by: Eisha Chen-yen-su --- synapse/rest/client/v1/room.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 255a85c5888a..b92c6a9a9cd5 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -475,6 +475,8 @@ def on_GET(self, request, room_id): if filter_bytes: filter_json = urlparse.unquote(filter_bytes.decode("UTF-8")) event_filter = Filter(json.loads(filter_json)) + if event_filter.filter_json.get("event_format", "client") == "federation": + as_client_event = False else: event_filter = None msgs = yield self.pagination_handler.get_messages( From 0b6bc36402b747a6c1bad119aaffdcd326990346 Mon Sep 17 00:00:00 2001 From: Eisha Chen-yen-su Date: Thu, 30 May 2019 17:07:21 +0200 Subject: [PATCH 072/231] Add changelog --- changelog.d/5293.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5293.bugfix diff --git a/changelog.d/5293.bugfix b/changelog.d/5293.bugfix new file mode 100644 index 000000000000..aa519a8433ee --- /dev/null +++ b/changelog.d/5293.bugfix @@ -0,0 +1 @@ +Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. From 099829d5a95b913c47634d13391d6c9f200f0bde Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 1 Apr 2019 12:28:40 +0100 Subject: [PATCH 073/231] use attr.s for VerifyKeyRequest because namedtuple is awful --- changelog.d/5296.misc | 1 + synapse/crypto/keyring.py | 38 +++++++++++++++++++++----------------- 2 files changed, 22 insertions(+), 17 deletions(-) create mode 100644 changelog.d/5296.misc diff --git a/changelog.d/5296.misc b/changelog.d/5296.misc new file mode 100644 index 000000000000..a038a6f7f64b --- /dev/null +++ b/changelog.d/5296.misc @@ -0,0 +1 @@ +Refactor keyring.VerifyKeyRequest to use attr.s. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index c63f106cf366..e1e026214f01 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -15,12 +15,12 @@ # limitations under the License. import logging -from collections import namedtuple import six from six import raise_from from six.moves import urllib +import attr from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -57,22 +57,26 @@ logger = logging.getLogger(__name__) -VerifyKeyRequest = namedtuple( - "VerifyRequest", ("server_name", "key_ids", "json_object", "deferred") -) -""" -A request for a verify key to verify a JSON object. - -Attributes: - server_name(str): The name of the server to verify against. - key_ids(set(str)): The set of key_ids to that could be used to verify the - JSON object - json_object(dict): The JSON object to verify. - deferred(Deferred[str, str, nacl.signing.VerifyKey]): - A deferred (server_name, key_id, verify_key) tuple that resolves when - a verify key has been fetched. The deferreds' callbacks are run with no - logcontext. -""" +@attr.s(slots=True, cmp=False) +class VerifyKeyRequest(object): + """ + A request for a verify key to verify a JSON object. + + Attributes: + server_name(str): The name of the server to verify against. + key_ids(set[str]): The set of key_ids to that could be used to verify the + JSON object + json_object(dict): The JSON object to verify. + deferred(Deferred[str, str, nacl.signing.VerifyKey]): + A deferred (server_name, key_id, verify_key) tuple that resolves when + a verify key has been fetched. The deferreds' callbacks are run with no + logcontext. + """ + + server_name = attr.ib() + key_ids = attr.ib() + json_object = attr.ib() + deferred = attr.ib() class KeyLookupError(ValueError): From a82c96b87fd6fb8b8c71cc34e6a712a12ff4222f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 9 Apr 2019 18:30:13 +0100 Subject: [PATCH 074/231] Rewrite get_server_verify_keys, again. Attempt to simplify the logic in get_server_verify_keys by splitting it into two methods. --- changelog.d/5299.misc | 1 + synapse/crypto/keyring.py | 101 ++++++++++++++++++++------------------ 2 files changed, 54 insertions(+), 48 deletions(-) create mode 100644 changelog.d/5299.misc diff --git a/changelog.d/5299.misc b/changelog.d/5299.misc new file mode 100644 index 000000000000..53297c768b95 --- /dev/null +++ b/changelog.d/5299.misc @@ -0,0 +1 @@ +Rewrite get_server_verify_keys, again. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index c63f106cf366..194867db0369 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -270,59 +270,21 @@ def _get_server_verify_keys(self, verify_requests): verify_requests (list[VerifyKeyRequest]): list of verify requests """ + remaining_requests = set( + (rq for rq in verify_requests if not rq.deferred.called) + ) + @defer.inlineCallbacks def do_iterations(): with Measure(self.clock, "get_server_verify_keys"): - # dict[str, set(str)]: keys to fetch for each server - missing_keys = {} - for verify_request in verify_requests: - missing_keys.setdefault(verify_request.server_name, set()).update( - verify_request.key_ids - ) - for f in self._key_fetchers: - results = yield f.get_keys(missing_keys.items()) - - # We now need to figure out which verify requests we have keys - # for and which we don't - missing_keys = {} - requests_missing_keys = [] - for verify_request in verify_requests: - if verify_request.deferred.called: - # We've already called this deferred, which probably - # means that we've already found a key for it. - continue - - server_name = verify_request.server_name - - # see if any of the keys we got this time are sufficient to - # complete this VerifyKeyRequest. - result_keys = results.get(server_name, {}) - for key_id in verify_request.key_ids: - fetch_key_result = result_keys.get(key_id) - if fetch_key_result: - with PreserveLoggingContext(): - verify_request.deferred.callback( - ( - server_name, - key_id, - fetch_key_result.verify_key, - ) - ) - break - else: - # The else block is only reached if the loop above - # doesn't break. - missing_keys.setdefault(server_name, set()).update( - verify_request.key_ids - ) - requests_missing_keys.append(verify_request) - - if not missing_keys: - break + if not remaining_requests: + return + yield self._attempt_key_fetches_with_fetcher(f, remaining_requests) + # look for any requests which weren't satisfied with PreserveLoggingContext(): - for verify_request in requests_missing_keys: + for verify_request in remaining_requests: verify_request.deferred.errback( SynapseError( 401, @@ -333,13 +295,56 @@ def do_iterations(): ) def on_err(err): + # we don't really expect to get here, because any errors should already + # have been caught and logged. But if we do, let's log the error and make + # sure that all of the deferreds are resolved. + logger.error("Unexpected error in _get_server_verify_keys: %s", err) with PreserveLoggingContext(): - for verify_request in verify_requests: + for verify_request in remaining_requests: if not verify_request.deferred.called: verify_request.deferred.errback(err) run_in_background(do_iterations).addErrback(on_err) + @defer.inlineCallbacks + def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): + """Use a key fetcher to attempt to satisfy some key requests + + Args: + fetcher (KeyFetcher): fetcher to use to fetch the keys + remaining_requests (set[VerifyKeyRequest]): outstanding key requests. + Any successfully-completed requests will be reomved from the list. + """ + # dict[str, set(str)]: keys to fetch for each server + missing_keys = {} + for verify_request in remaining_requests: + # any completed requests should already have been removed + assert not verify_request.deferred.called + missing_keys.setdefault(verify_request.server_name, set()).update( + verify_request.key_ids + ) + + results = yield fetcher.get_keys(missing_keys.items()) + + completed = list() + for verify_request in remaining_requests: + server_name = verify_request.server_name + + # see if any of the keys we got this time are sufficient to + # complete this VerifyKeyRequest. + result_keys = results.get(server_name, {}) + for key_id in verify_request.key_ids: + key = result_keys.get(key_id) + if key: + with PreserveLoggingContext(): + verify_request.deferred.callback( + (server_name, key_id, key.verify_key) + ) + completed.append(verify_request) + break + + remaining_requests.difference_update(completed) + class KeyFetcher(object): def get_keys(self, server_name_and_key_ids): From 8ea2f756a947d668afc9a6b22707c12a29af6be4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 29 May 2019 17:21:39 +0100 Subject: [PATCH 075/231] Remove some pointless exception handling The verify_request deferred already returns a suitable SynapseError, so I don't really know what we expect to achieve by doing more wrapping, other than log spam. Fixes #4278. --- changelog.d/5300.bugfix | 1 + synapse/crypto/keyring.py | 33 ++++++++------------------------- 2 files changed, 9 insertions(+), 25 deletions(-) create mode 100644 changelog.d/5300.bugfix diff --git a/changelog.d/5300.bugfix b/changelog.d/5300.bugfix new file mode 100644 index 000000000000..049e93cd5a41 --- /dev/null +++ b/changelog.d/5300.bugfix @@ -0,0 +1 @@ +Fix noisy 'no key for server' logs. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index e1e026214f01..5756478ad7af 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -64,13 +64,19 @@ class VerifyKeyRequest(object): Attributes: server_name(str): The name of the server to verify against. + key_ids(set[str]): The set of key_ids to that could be used to verify the JSON object + json_object(dict): The JSON object to verify. + deferred(Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no logcontext. + + If we are unable to find a key which satisfies the request, the deferred + errbacks with an M_UNAUTHORIZED SynapseError. """ server_name = attr.ib() @@ -771,31 +777,8 @@ def _handle_key_deferred(verify_request): SynapseError if there was a problem performing the verification """ server_name = verify_request.server_name - try: - with PreserveLoggingContext(): - _, key_id, verify_key = yield verify_request.deferred - except KeyLookupError as e: - logger.warn( - "Failed to download keys for %s: %s %s", - server_name, - type(e).__name__, - str(e), - ) - raise SynapseError( - 502, "Error downloading keys for %s" % (server_name,), Codes.UNAUTHORIZED - ) - except Exception as e: - logger.exception( - "Got Exception when downloading keys for %s: %s %s", - server_name, - type(e).__name__, - str(e), - ) - raise SynapseError( - 401, - "No key for %s with id %s" % (server_name, verify_request.key_ids), - Codes.UNAUTHORIZED, - ) + with PreserveLoggingContext(): + _, key_id, verify_key = yield verify_request.deferred json_object = verify_request.json_object From 3e1af5109cb91b8e22f0e14aee875f86bd9fcd92 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 31 May 2019 02:45:46 -0600 Subject: [PATCH 076/231] Clarify that the admin change password endpoint logs them out (#5303) --- changelog.d/5303.misc | 1 + docs/admin_api/user_admin_api.rst | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5303.misc diff --git a/changelog.d/5303.misc b/changelog.d/5303.misc new file mode 100644 index 000000000000..f6a7f1f8e37f --- /dev/null +++ b/changelog.d/5303.misc @@ -0,0 +1 @@ +Clarify that the admin change password API logs the user out. diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 8aca4f158de0..213359d0c053 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -69,7 +69,7 @@ An empty body may be passed for backwards compatibility. Reset password ============== -Changes the password of another user. +Changes the password of another user. This will automatically log the user out of all their devices. The api is:: From 847b9dcd1c9d7d7a43333e85f69dc78471095475 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:54:46 +0100 Subject: [PATCH 077/231] Make max_delta equal to period * 10% --- synapse/config/registration.py | 15 ++++----------- synapse/storage/_base.py | 7 +++---- tests/rest/client/v2_alpha/test_register.py | 18 +----------------- 3 files changed, 8 insertions(+), 32 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index b4fd4af368d7..1835b4b1f376 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -39,9 +39,7 @@ def __init__(self, config, synapse_config): else: self.renew_email_subject = "Renew your %(app)s account" - self.startup_job_max_delta = self.parse_duration( - config.get("startup_job_max_delta", 0), - ) + self.startup_job_max_delta = self.period * 10. / 100. if self.renew_by_email_enabled and "public_baseurl" not in synapse_config: raise ConfigError("Can't send renewal emails without 'public_baseurl'") @@ -133,20 +131,15 @@ def default_config(self, generate_secrets=False, **kwargs): # This means that, if a validity period is set, and Synapse is restarted (it will # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' - # expiration dates won't be updated unless their account is manually renewed. - # - # If set, the ``startup_job_max_delta`` optional setting will make the startup job - # described above set a random expiration date between t + period and - # t + period + startup_job_max_delta, t being the date and time at which the job - # sets the expiration date for a given user. This is useful for server admins that - # want to avoid Synapse sending a lot of renewal emails at once. + # expiration dates won't be updated unless their account is manually renewed. This + # date will be randomly selected within a range [now + period ; now + period + d], + # where d is equal to 10% of the validity period. # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" - # startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 40802fd3dc43..7f944ec71753 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -329,14 +329,13 @@ def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): user_id (str): User ID to set an expiration date for. use_delta (bool): If set to False, the expiration date for the user will be now + validity period. If set to True, this expiration date will be a - random value in the [now + period; now + period + max_delta] range, - max_delta being the configured value for the size of the range, unless - delta is 0, in which case it sets it to now + period. + random value in the [now + period; now + period + d] range, d being a + delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() expiration_ts = now_ms + self._account_validity.period - if use_delta and self._account_validity.startup_job_max_delta: + if use_delta: expiration_ts = self.rand.randrange( expiration_ts, expiration_ts + self._account_validity.startup_job_max_delta, diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 68654e25ab2e..711628ded114 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -436,7 +436,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.validity_period = 10 - self.max_delta = 10 + self.max_delta = self.validity_period * 10. / 100. config = self.default_config() @@ -453,22 +453,6 @@ def make_homeserver(self, reactor, clock): return self.hs def test_background_job(self): - """ - Tests whether the account validity startup background job does the right thing, - which is sticking an expiration date to every account that doesn't already have - one. - """ - user_id = self.register_user("kermit", "user") - - self.hs.config.account_validity.startup_job_max_delta = 0 - - now_ms = self.hs.clock.time_msec() - self.get_success(self.store._set_expiration_date_when_missing()) - - res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertEqual(res, now_ms + self.validity_period) - - def test_background_job_with_max_delta(self): """ Tests the same thing as test_background_job, except that it sets the startup_job_max_delta parameter and checks that the expiration date is within the From 0c2362861e3fad44ede5e9c23dbef8e1a9113f36 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:56:52 +0100 Subject: [PATCH 078/231] Gah python --- synapse/config/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 1835b4b1f376..4af825a2ab41 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -133,7 +133,7 @@ def default_config(self, generate_secrets=False, **kwargs): # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This # date will be randomly selected within a range [now + period ; now + period + d], - # where d is equal to 10% of the validity period. + # where d is equal to 10%% of the validity period. # #account_validity: # enabled: True From 6bfc5ad3a189acd993a1ef9db36d28b963be345d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:56:57 +0100 Subject: [PATCH 079/231] Sample config --- docs/sample_config.yaml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8ff53d5cb40c..13c0ddc7c5cf 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -753,20 +753,15 @@ uploads_path: "DATADIR/uploads" # This means that, if a validity period is set, and Synapse is restarted (it will # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' -# expiration dates won't be updated unless their account is manually renewed. -# -# If set, the ``startup_job_max_delta`` optional setting will make the startup job -# described above set a random expiration date between t + period and -# t + period + startup_job_max_delta, t being the date and time at which the job -# sets the expiration date for a given user. This is useful for server admins that -# want to avoid Synapse sending a lot of renewal emails at once. +# expiration dates won't be updated unless their account is manually renewed. This +# date will be randomly selected within a range [now + period ; now + period + d], +# where d is equal to 10% of the validity period. # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" -# startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # From 5037326d6624d1d1780a0536d19ff79e275f8735 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:37:57 +0100 Subject: [PATCH 080/231] Add indices. Remove room_ids accidentally added We have to do this by re-inserting a background update and recreating tables, as the tables only get created during a background update and will later be deleted. We also make sure that we remove any entries that should have been removed but weren't due to a race that has been fixed in a previous commit. --- synapse/storage/schema/delta/54/stats2.sql | 28 +++++++++++++++ synapse/storage/stats.py | 41 +++++++++++++++------- 2 files changed, 56 insertions(+), 13 deletions(-) create mode 100644 synapse/storage/schema/delta/54/stats2.sql diff --git a/synapse/storage/schema/delta/54/stats2.sql b/synapse/storage/schema/delta/54/stats2.sql new file mode 100644 index 000000000000..3b2d48447f42 --- /dev/null +++ b/synapse/storage/schema/delta/54/stats2.sql @@ -0,0 +1,28 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- This delta file gets run after `54/stats.sql` delta. + +-- We want to add some indices to the temporary stats table, so we re-insert +-- 'populate_stats_createtables' if we are still processing the rooms update. +INSERT INTO background_updates (update_name, progress_json) + SELECT 'populate_stats_createtables', '{}' + WHERE + 'populate_stats_process_rooms' IN ( + SELECT update_name FROM background_updates + ) + AND 'populate_stats_createtables' NOT IN ( -- don't insert if already exists + SELECT update_name FROM background_updates + ); diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index a99637d4b40b..1c0b183a5681 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -18,6 +18,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.storage.prepare_database import get_statements from synapse.storage.state_deltas import StateDeltasStore from synapse.util.caches.descriptors import cached @@ -69,12 +70,25 @@ def _populate_stats_createtables(self, progress, batch_size): # Get all the rooms that we want to process. def _make_staging_area(txn): - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_rooms(room_id TEXT NOT NULL, events BIGINT NOT NULL)" - ) - txn.execute(sql) + # Create the temporary tables + stmts = get_statements(""" + -- We just recreate the table, we'll be reinserting the + -- correct entries again later anyway. + DROP TABLE IF EXISTS {temp}_rooms; + + CREATE TABLE IF NOT EXISTS {temp}_rooms( + room_id TEXT NOT NULL, + events BIGINT NOT NULL + ); + + CREATE INDEX {temp}_rooms_events + ON {temp}_rooms(events); + CREATE INDEX {temp}_rooms_id + ON {temp}_rooms(room_id); + """.format(temp=TEMP_TABLE).splitlines()) + + for statement in stmts: + txn.execute(statement) sql = ( "CREATE TABLE IF NOT EXISTS " @@ -83,15 +97,16 @@ def _make_staging_area(txn): ) txn.execute(sql) - # Get rooms we want to process from the database + # Get rooms we want to process from the database, only adding + # those that we haven't (i.e. those not in room_stats_earliest_token) sql = """ - SELECT room_id, count(*) FROM current_state_events - GROUP BY room_id - """ + INSERT INTO %s_rooms (room_id, events) + SELECT c.room_id, count(*) FROM current_state_events AS c + LEFT JOIN room_stats_earliest_token AS t USING (room_id) + WHERE t.room_id IS NULL + GROUP BY c.room_id + """ % (TEMP_TABLE,) txn.execute(sql) - rooms = [{"room_id": x[0], "events": x[1]} for x in txn.fetchall()] - self._simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms) - del rooms new_pos = yield self.get_max_stream_id_in_current_state_deltas() yield self.runInteraction("populate_stats_temp_build", _make_staging_area) From 39bbf6a4a5b954de56865a2aa0877587acbd9552 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 16:07:23 +0100 Subject: [PATCH 081/231] Newsfile --- changelog.d/5294.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5294.bugfix diff --git a/changelog.d/5294.bugfix b/changelog.d/5294.bugfix new file mode 100644 index 000000000000..5924bda31971 --- /dev/null +++ b/changelog.d/5294.bugfix @@ -0,0 +1 @@ +Fix performance problems with the rooms stats background update. From 4d794dae210ce30e87d8a6b9ee2f9b481cadf539 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 11:09:34 +0100 Subject: [PATCH 082/231] Move delta from +10% to -10% --- synapse/config/registration.py | 2 +- synapse/storage/_base.py | 4 ++-- tests/rest/client/v2_alpha/test_register.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 4af825a2ab41..aad3400819ca 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -132,7 +132,7 @@ def default_config(self, generate_secrets=False, **kwargs): # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This - # date will be randomly selected within a range [now + period ; now + period + d], + # date will be randomly selected within a range [now + period - d ; now + period], # where d is equal to 10%% of the validity period. # #account_validity: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 7f944ec71753..086318a5305f 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -329,7 +329,7 @@ def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): user_id (str): User ID to set an expiration date for. use_delta (bool): If set to False, the expiration date for the user will be now + validity period. If set to True, this expiration date will be a - random value in the [now + period; now + period + d] range, d being a + random value in the [now + period - d ; now + period] range, d being a delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() @@ -337,8 +337,8 @@ def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): if use_delta: expiration_ts = self.rand.randrange( + expiration_ts - self._account_validity.startup_job_max_delta, expiration_ts, - expiration_ts + self._account_validity.startup_job_max_delta, ) self._simple_insert_txn( diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 711628ded114..0cb6a363d64c 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -467,5 +467,5 @@ def test_background_job(self): res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertLessEqual(res, now_ms + self.validity_period + self.max_delta) - self.assertGreaterEqual(res, now_ms + self.validity_period) + self.assertGreaterEqual(res, now_ms + self.validity_period - self.max_delta) + self.assertLessEqual(res, now_ms + self.validity_period) From e975b15101c08299218bd15963a9dc5ea6f990ff Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 11:14:21 +0100 Subject: [PATCH 083/231] Sample config --- docs/sample_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 13c0ddc7c5cf..9536681068af 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -754,7 +754,7 @@ uploads_path: "DATADIR/uploads" # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This -# date will be randomly selected within a range [now + period ; now + period + d], +# date will be randomly selected within a range [now + period - d ; now + period], # where d is equal to 10% of the validity period. # #account_validity: From 3600f5568b5f8c6902a0dbeeb349c1891f8114b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 May 2019 15:48:36 +0100 Subject: [PATCH 084/231] Stop overwriting server keys with other keys Fix a bug where we would discard a key result which the origin server is no longer returning. Fixes #5305. --- changelog.d/5307.bugfix | 1 + synapse/crypto/keyring.py | 14 ++------------ 2 files changed, 3 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5307.bugfix diff --git a/changelog.d/5307.bugfix b/changelog.d/5307.bugfix new file mode 100644 index 000000000000..6b152f48544a --- /dev/null +++ b/changelog.d/5307.bugfix @@ -0,0 +1 @@ +Fix bug where a notary server would sometimes forget old keys. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5756478ad7af..8f47469a1c3f 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -394,7 +394,7 @@ def __init__(self, hs): @defer.inlineCallbacks def process_v2_response( - self, from_server, response_json, time_added_ms, requested_ids=[] + self, from_server, response_json, time_added_ms ): """Parse a 'Server Keys' structure from the result of a /key request @@ -417,10 +417,6 @@ def process_v2_response( time_added_ms (int): the timestamp to record in server_keys_json - requested_ids (iterable[str]): a list of the key IDs that were requested. - We will store the json for these key ids as well as any that are - actually in the response - Returns: Deferred[dict[str, FetchKeyResult]]: map from key_id to result object """ @@ -476,11 +472,6 @@ def process_v2_response( signed_key_json_bytes = encode_canonical_json(signed_key_json) - # for reasons I don't quite understand, we store this json for the key ids we - # requested, as well as those we got. - updated_key_ids = set(requested_ids) - updated_key_ids.update(verify_keys) - yield logcontext.make_deferred_yieldable( defer.gatherResults( [ @@ -493,7 +484,7 @@ def process_v2_response( ts_expires_ms=ts_valid_until_ms, key_json_bytes=signed_key_json_bytes, ) - for key_id in updated_key_ids + for key_id in verify_keys ], consumeErrors=True, ).addErrback(unwrapFirstError) @@ -749,7 +740,6 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): response_keys = yield self.process_v2_response( from_server=server_name, - requested_ids=[requested_key_id], response_json=response, time_added_ms=time_now_ms, ) From d16c6375fe39deaafd70b151e496f5e15fd7b29c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 1 Jun 2019 10:42:33 +0100 Subject: [PATCH 085/231] Limit displaynames and avatar URLs These end up in join events everywhere, so let's limit them. Fixes #5079 --- changelog.d/5309.bugfix | 1 + synapse/handlers/profile.py | 13 +++++++++++++ synapse/handlers/register.py | 2 ++ 3 files changed, 16 insertions(+) create mode 100644 changelog.d/5309.bugfix diff --git a/changelog.d/5309.bugfix b/changelog.d/5309.bugfix new file mode 100644 index 000000000000..97b35272665c --- /dev/null +++ b/changelog.d/5309.bugfix @@ -0,0 +1 @@ +Prevent users from setting huge displaynames and avatar URLs. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 91fc718ff833..a5fc6c5dbf82 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -31,6 +31,9 @@ logger = logging.getLogger(__name__) +MAX_DISPLAYNAME_LEN = 100 +MAX_AVATAR_URL_LEN = 1000 + class BaseProfileHandler(BaseHandler): """Handles fetching and updating user profile information. @@ -162,6 +165,11 @@ def set_displayname(self, target_user, requester, new_displayname, by_admin=Fals if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") + if len(new_displayname) > MAX_DISPLAYNAME_LEN: + raise SynapseError( + 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN, ), + ) + if new_displayname == '': new_displayname = None @@ -217,6 +225,11 @@ def set_avatar_url(self, target_user, requester, new_avatar_url, by_admin=False) if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") + if len(new_avatar_url) > MAX_AVATAR_URL_LEN: + raise SynapseError( + 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN, ), + ) + yield self.store.set_profile_avatar_url( target_user.localpart, new_avatar_url ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e83ee24f103b..9a388ea013f6 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -531,6 +531,8 @@ def get_or_create_user(self, requester, localpart, displayname, A tuple of (user_id, access_token). Raises: RegistrationError if there was a problem registering. + + NB this is only used in tests. TODO: move it to the test package! """ if localpart is None: raise SynapseError(400, "Request must include user id") From 93003aa1720af846f238bd0c6fd2f2a0df3c20ef Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 1 Jun 2019 11:13:49 +0100 Subject: [PATCH 086/231] add some tests --- tests/rest/client/v1/test_profile.py | 62 +++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 2 deletions(-) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 769c37ce5247..f4d0d48dad89 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -14,6 +14,8 @@ # limitations under the License. """Tests REST events for /profile paths.""" +import json + from mock import Mock from twisted.internet import defer @@ -31,8 +33,11 @@ PATH_PREFIX = "/_matrix/client/api/v1" -class ProfileTestCase(unittest.TestCase): - """ Tests profile management. """ +class MockHandlerProfileTestCase(unittest.TestCase): + """ Tests rest layer of profile management. + + Todo: move these into ProfileTestCase + """ @defer.inlineCallbacks def setUp(self): @@ -159,6 +164,59 @@ def test_set_my_avatar(self): self.assertEquals(mocked_set.call_args[0][2], "http://my.server/pic.gif") +class ProfileTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + profile.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + self.hs = self.setup_test_homeserver() + return self.hs + + def prepare(self, reactor, clock, hs): + self.owner = self.register_user("owner", "pass") + self.owner_tok = self.login("owner", "pass") + + def test_set_displayname(self): + request, channel = self.make_request( + "PUT", + "/profile/%s/displayname" % (self.owner, ), + content=json.dumps({"displayname": "test"}), + access_token=self.owner_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + res = self.get_displayname() + self.assertEqual(res, "test") + + def test_set_displayname_too_long(self): + """Attempts to set a stupid displayname should get a 400""" + request, channel = self.make_request( + "PUT", + "/profile/%s/displayname" % (self.owner, ), + content=json.dumps({"displayname": "test" * 100}), + access_token=self.owner_tok, + ) + self.render(request) + self.assertEqual(channel.code, 400, channel.result) + + res = self.get_displayname() + self.assertEqual(res, "owner") + + def get_displayname(self): + request, channel = self.make_request( + "GET", + "/profile/%s/displayname" % (self.owner, ), + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + return channel.json_body["displayname"] + + class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): servlets = [ From 220a733d7379be88514f7681ec37388755d4e612 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 09:56:45 +0100 Subject: [PATCH 087/231] Fix handling of failures when calling /event_auth. When processing an incoming event over federation, we may try and resolve any unexpected differences in auth events. This is a non-essential process and so should not stop the processing of the event if it fails (e.g. due to the remote disappearing or not implementing the necessary endpoints). Fixes #3330 --- synapse/handlers/federation.py | 50 ++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cf4fad7de0c9..fa735efedd00 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -35,6 +35,7 @@ CodeMessageException, FederationDeniedError, FederationError, + RequestSendFailed, StoreError, SynapseError, ) @@ -2027,9 +2028,15 @@ def do_auth(self, origin, event, context, auth_events): """ room_version = yield self.store.get_room_version(event.room_id) - yield self._update_auth_events_and_context_for_auth( - origin, event, context, auth_events - ) + try: + yield self._update_auth_events_and_context_for_auth( + origin, event, context, auth_events + ) + except Exception: + # We don't really mind if the above fails, so lets not fail + # processing if it does. + logger.exception("Failed to call _update_auth_events_and_context_for_auth") + try: self.auth.check(room_version, event, auth_events=auth_events) except AuthError as e: @@ -2042,6 +2049,15 @@ def _update_auth_events_and_context_for_auth( ): """Helper for do_auth. See there for docs. + Checks whether a given event has the expected auth events. If it + doesn't then we talk to the remote server to compare state to see if + we can come to a consensus (e.g. if one server missed some valid + state). + + This attempts to resovle any potential divergence of state between + servers, but is not essential and so failures should not block further + processing of the event. + Args: origin (str): event (synapse.events.EventBase): @@ -2088,9 +2104,14 @@ def _update_auth_events_and_context_for_auth( missing_auth, ) try: - remote_auth_chain = yield self.federation_client.get_event_auth( - origin, event.room_id, event.event_id - ) + try: + remote_auth_chain = yield self.federation_client.get_event_auth( + origin, event.room_id, event.event_id + ) + except RequestSendFailed: + # The other side isn't around or doesn't implement the + # endpoint, so lets just bail out. + return seen_remotes = yield self.store.have_seen_events( [e.event_id for e in remote_auth_chain] @@ -2236,12 +2257,17 @@ def _update_auth_events_and_context_for_auth( try: # 2. Get remote difference. - result = yield self.federation_client.query_auth( - origin, - event.room_id, - event.event_id, - local_auth_chain, - ) + try: + result = yield self.federation_client.query_auth( + origin, + event.room_id, + event.event_id, + local_auth_chain, + ) + except RequestSendFailed: + # The other side isn't around or doesn't implement the + # endpoint, so lets just bail out. + return seen_remotes = yield self.store.have_seen_events( [e.event_id for e in result["auth_chain"]] From fde37e4e98163c269a2b82e4892a70b2e37c619c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 10:22:03 +0100 Subject: [PATCH 088/231] Newsfile --- changelog.d/5317.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5317.bugfix diff --git a/changelog.d/5317.bugfix b/changelog.d/5317.bugfix new file mode 100644 index 000000000000..270937521493 --- /dev/null +++ b/changelog.d/5317.bugfix @@ -0,0 +1 @@ +Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. From 2889b055543c8db6bf93eaad7035d0eca1ec2874 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 21:28:59 +1000 Subject: [PATCH 089/231] Unify v1 and v2 REST client APIs (#5226) --- changelog.d/5226.misc | 1 + scripts-dev/list_url_patterns.py | 4 +- synapse/app/frontend_proxy.py | 11 +- synapse/rest/client/v1/base.py | 65 --------- synapse/rest/client/v1/directory.py | 28 ++-- synapse/rest/client/v1/events.py | 17 ++- synapse/rest/client/v1/initial_sync.py | 12 +- synapse/rest/client/v1/login.py | 22 +-- synapse/rest/client/v1/logout.py | 19 +-- synapse/rest/client/v1/presence.py | 13 +- synapse/rest/client/v1/profile.py | 29 ++-- synapse/rest/client/v1/push_rule.py | 12 +- synapse/rest/client/v1/pusher.py | 21 +-- synapse/rest/client/v1/room.py | 137 +++++++++++------- synapse/rest/client/v1/voip.py | 11 +- synapse/rest/client/v2_alpha/_base.py | 6 +- synapse/rest/client/v2_alpha/account.py | 20 +-- synapse/rest/client/v2_alpha/account_data.py | 6 +- .../rest/client/v2_alpha/account_validity.py | 6 +- synapse/rest/client/v2_alpha/auth.py | 4 +- synapse/rest/client/v2_alpha/capabilities.py | 4 +- synapse/rest/client/v2_alpha/devices.py | 8 +- synapse/rest/client/v2_alpha/filter.py | 6 +- synapse/rest/client/v2_alpha/groups.py | 50 +++---- synapse/rest/client/v2_alpha/keys.py | 10 +- synapse/rest/client/v2_alpha/notifications.py | 4 +- synapse/rest/client/v2_alpha/openid.py | 4 +- synapse/rest/client/v2_alpha/read_marker.py | 4 +- synapse/rest/client/v2_alpha/receipts.py | 4 +- synapse/rest/client/v2_alpha/register.py | 10 +- synapse/rest/client/v2_alpha/relations.py | 12 +- synapse/rest/client/v2_alpha/report_event.py | 4 +- synapse/rest/client/v2_alpha/room_keys.py | 8 +- .../v2_alpha/room_upgrade_rest_servlet.py | 4 +- synapse/rest/client/v2_alpha/sendtodevice.py | 4 +- synapse/rest/client/v2_alpha/sync.py | 4 +- synapse/rest/client/v2_alpha/tags.py | 6 +- synapse/rest/client/v2_alpha/thirdparty.py | 10 +- synapse/rest/client/v2_alpha/tokenrefresh.py | 4 +- .../rest/client/v2_alpha/user_directory.py | 4 +- tests/__init__.py | 2 +- tests/rest/admin/test_admin.py | 1 - tests/rest/client/v1/test_profile.py | 2 +- 43 files changed, 296 insertions(+), 317 deletions(-) create mode 100644 changelog.d/5226.misc delete mode 100644 synapse/rest/client/v1/base.py diff --git a/changelog.d/5226.misc b/changelog.d/5226.misc new file mode 100644 index 000000000000..e1b9dc58a3d9 --- /dev/null +++ b/changelog.d/5226.misc @@ -0,0 +1 @@ +The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py index da027be26e9e..62e5a07472b1 100755 --- a/scripts-dev/list_url_patterns.py +++ b/scripts-dev/list_url_patterns.py @@ -20,9 +20,7 @@ def visit_Call(self, node): else: return - if name == "client_path_patterns": - PATTERNS_V1.append(node.args[0].s) - elif name == "client_v2_patterns": + if name == "client_patterns": PATTERNS_V2.append(node.args[0].s) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 8479fee73813..6504da527825 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -37,8 +37,7 @@ from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns -from synapse.rest.client.v2_alpha._base import client_v2_patterns +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree @@ -49,11 +48,11 @@ logger = logging.getLogger("synapse.app.frontend_proxy") -class PresenceStatusStubServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status") +class PresenceStatusStubServlet(RestServlet): + PATTERNS = client_patterns("/presence/(?P[^/]*)/status") def __init__(self, hs): - super(PresenceStatusStubServlet, self).__init__(hs) + super(PresenceStatusStubServlet, self).__init__() self.http_client = hs.get_simple_http_client() self.auth = hs.get_auth() self.main_uri = hs.config.worker_main_http_uri @@ -84,7 +83,7 @@ def on_PUT(self, request, user_id): class KeyUploadServlet(RestServlet): - PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$") + PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py deleted file mode 100644 index dc63b661c02d..000000000000 --- a/synapse/rest/client/v1/base.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This module contains base REST classes for constructing client v1 servlets. -""" - -import logging -import re - -from synapse.api.urls import CLIENT_API_PREFIX -from synapse.http.servlet import RestServlet -from synapse.rest.client.transactions import HttpTransactionCache - -logger = logging.getLogger(__name__) - - -def client_path_patterns(path_regex, releases=(0,), include_in_unstable=True): - """Creates a regex compiled client path with the correct client path - prefix. - - Args: - path_regex (str): The regex string to match. This should NOT have a ^ - as this will be prefixed. - Returns: - SRE_Pattern - """ - patterns = [re.compile("^" + CLIENT_API_PREFIX + "/api/v1" + path_regex)] - if include_in_unstable: - unstable_prefix = CLIENT_API_PREFIX + "/unstable" - patterns.append(re.compile("^" + unstable_prefix + path_regex)) - for release in releases: - new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) - patterns.append(re.compile("^" + new_prefix + path_regex)) - return patterns - - -class ClientV1RestServlet(RestServlet): - """A base Synapse REST Servlet for the client version 1 API. - """ - - # This subclass was presumably created to allow the auth for the v1 - # protocol version to be different, however this behaviour was removed. - # it may no longer be necessary - - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): - """ - self.hs = hs - self.builder_factory = hs.get_event_builder_factory() - self.auth = hs.get_auth() - self.txns = HttpTransactionCache(hs) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 0220acf644c3..0035182bb91d 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -19,11 +19,10 @@ from twisted.internet import defer from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import RoomAlias -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) @@ -33,13 +32,14 @@ def register_servlets(hs, http_server): ClientAppserviceDirectoryListServer(hs).register(http_server) -class ClientDirectoryServer(ClientV1RestServlet): - PATTERNS = client_path_patterns("/directory/room/(?P[^/]*)$") +class ClientDirectoryServer(RestServlet): + PATTERNS = client_patterns("/directory/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryServer, self).__init__(hs) + super(ClientDirectoryServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_alias): @@ -120,13 +120,14 @@ def on_DELETE(self, request, room_alias): defer.returnValue((200, {})) -class ClientDirectoryListServer(ClientV1RestServlet): - PATTERNS = client_path_patterns("/directory/list/room/(?P[^/]*)$") +class ClientDirectoryListServer(RestServlet): + PATTERNS = client_patterns("/directory/list/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryListServer, self).__init__(hs) + super(ClientDirectoryListServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -162,15 +163,16 @@ def on_DELETE(self, request, room_id): defer.returnValue((200, {})) -class ClientAppserviceDirectoryListServer(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/directory/list/appservice/(?P[^/]*)/(?P[^/]*)$" +class ClientAppserviceDirectoryListServer(RestServlet): + PATTERNS = client_patterns( + "/directory/list/appservice/(?P[^/]*)/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(ClientAppserviceDirectoryListServer, self).__init__(hs) + super(ClientAppserviceDirectoryListServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() def on_PUT(self, request, network_id, room_id): content = parse_json_object_from_request(request) diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index c3b0a39ab704..84ca36270bf2 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -19,21 +19,22 @@ from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.streams.config import PaginationConfig -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class EventStreamRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/events$") +class EventStreamRestServlet(RestServlet): + PATTERNS = client_patterns("/events$", v1=True) DEFAULT_LONGPOLL_TIME_MS = 30000 def __init__(self, hs): - super(EventStreamRestServlet, self).__init__(hs) + super(EventStreamRestServlet, self).__init__() self.event_stream_handler = hs.get_event_stream_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -76,11 +77,11 @@ def on_OPTIONS(self, request): # TODO: Unit test gets, with and without auth, with different kinds of events. -class EventRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/events/(?P[^/]*)$") +class EventRestServlet(RestServlet): + PATTERNS = client_patterns("/events/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(EventRestServlet, self).__init__(hs) + super(EventRestServlet, self).__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 3ead75cb77ce..0fe5f2d79bd6 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -15,19 +15,19 @@ from twisted.internet import defer -from synapse.http.servlet import parse_boolean +from synapse.http.servlet import RestServlet, parse_boolean +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.streams.config import PaginationConfig -from .base import ClientV1RestServlet, client_path_patterns - # TODO: Needs unit testing -class InitialSyncRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/initialSync$") +class InitialSyncRestServlet(RestServlet): + PATTERNS = client_patterns("/initialSync$", v1=True) def __init__(self, hs): - super(InitialSyncRestServlet, self).__init__(hs) + super(InitialSyncRestServlet, self).__init__() self.initial_sync_handler = hs.get_initial_sync_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 029039c162fd..3b60728628bb 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -29,12 +29,11 @@ parse_json_object_from_request, parse_string, ) +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.well_known import WellKnownBuilder from synapse.types import UserID, map_username_to_mxid_localpart from synapse.util.msisdn import phone_number_to_msisdn -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) @@ -81,15 +80,16 @@ def login_id_thirdparty_from_phone(identifier): } -class LoginRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login$") +class LoginRestServlet(RestServlet): + PATTERNS = client_patterns("/login$", v1=True) CAS_TYPE = "m.login.cas" SSO_TYPE = "m.login.sso" TOKEN_TYPE = "m.login.token" JWT_TYPE = "m.login.jwt" def __init__(self, hs): - super(LoginRestServlet, self).__init__(hs) + super(LoginRestServlet, self).__init__() + self.hs = hs self.jwt_enabled = hs.config.jwt_enabled self.jwt_secret = hs.config.jwt_secret self.jwt_algorithm = hs.config.jwt_algorithm @@ -371,7 +371,7 @@ def do_jwt_login(self, login_submission): class CasRedirectServlet(RestServlet): - PATTERNS = client_path_patterns("/login/(cas|sso)/redirect") + PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True) def __init__(self, hs): super(CasRedirectServlet, self).__init__() @@ -394,27 +394,27 @@ def on_GET(self, request): finish_request(request) -class CasTicketServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login/cas/ticket") +class CasTicketServlet(RestServlet): + PATTERNS = client_patterns("/login/cas/ticket", v1=True) def __init__(self, hs): - super(CasTicketServlet, self).__init__(hs) + super(CasTicketServlet, self).__init__() self.cas_server_url = hs.config.cas_server_url self.cas_service_url = hs.config.cas_service_url self.cas_required_attributes = hs.config.cas_required_attributes self._sso_auth_handler = SSOAuthHandler(hs) + self._http_client = hs.get_simple_http_client() @defer.inlineCallbacks def on_GET(self, request): client_redirect_url = parse_string(request, "redirectUrl", required=True) - http_client = self.hs.get_simple_http_client() uri = self.cas_server_url + "/proxyValidate" args = { "ticket": parse_string(request, "ticket", required=True), "service": self.cas_service_url } try: - body = yield http_client.get_raw(uri, args) + body = yield self._http_client.get_raw(uri, args) except PartialDownloadError as pde: # Twisted raises this error if the connection is closed, # even if that's being used old-http style to signal end-of-data diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index ba20e75033af..b8064f261e11 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -17,17 +17,18 @@ from twisted.internet import defer -from .base import ClientV1RestServlet, client_path_patterns +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns logger = logging.getLogger(__name__) -class LogoutRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/logout$") +class LogoutRestServlet(RestServlet): + PATTERNS = client_patterns("/logout$", v1=True) def __init__(self, hs): - super(LogoutRestServlet, self).__init__(hs) - self._auth = hs.get_auth() + super(LogoutRestServlet, self).__init__() + self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() @@ -41,7 +42,7 @@ def on_POST(self, request): if requester.device_id is None: # the acccess token wasn't associated with a device. # Just delete the access token - access_token = self._auth.get_access_token_from_request(request) + access_token = self.auth.get_access_token_from_request(request) yield self._auth_handler.delete_access_token(access_token) else: yield self._device_handler.delete_device( @@ -50,11 +51,11 @@ def on_POST(self, request): defer.returnValue((200, {})) -class LogoutAllRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/logout/all$") +class LogoutAllRestServlet(RestServlet): + PATTERNS = client_patterns("/logout/all$", v1=True) def __init__(self, hs): - super(LogoutAllRestServlet, self).__init__(hs) + super(LogoutAllRestServlet, self).__init__() self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 045d5a20aca0..e263da3cb7e7 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -23,21 +23,22 @@ from synapse.api.errors import AuthError, SynapseError from synapse.handlers.presence import format_user_presence_state -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import UserID -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class PresenceStatusRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status") +class PresenceStatusRestServlet(RestServlet): + PATTERNS = client_patterns("/presence/(?P[^/]*)/status", v1=True) def __init__(self, hs): - super(PresenceStatusRestServlet, self).__init__(hs) + super(PresenceStatusRestServlet, self).__init__() + self.hs = hs self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index eac1966c5eb6..e15d9d82a6b6 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -16,18 +16,19 @@ """ This module contains REST servlets to do with profile: /profile/ """ from twisted.internet import defer -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import UserID -from .base import ClientV1RestServlet, client_path_patterns - -class ProfileDisplaynameRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/displayname") +class ProfileDisplaynameRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)/displayname", v1=True) def __init__(self, hs): - super(ProfileDisplaynameRestServlet, self).__init__(hs) + super(ProfileDisplaynameRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -71,12 +72,14 @@ def on_OPTIONS(self, request, user_id): return (200, {}) -class ProfileAvatarURLRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/avatar_url") +class ProfileAvatarURLRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)/avatar_url", v1=True) def __init__(self, hs): - super(ProfileAvatarURLRestServlet, self).__init__(hs) + super(ProfileAvatarURLRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -119,12 +122,14 @@ def on_OPTIONS(self, request, user_id): return (200, {}) -class ProfileRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)") +class ProfileRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)", v1=True) def __init__(self, hs): - super(ProfileRestServlet, self).__init__(hs) + super(ProfileRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 506ec95ddd4e..3d6326fe2fc3 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -21,22 +21,22 @@ SynapseError, UnrecognizedRequestError, ) -from synapse.http.servlet import parse_json_value_from_request, parse_string +from synapse.http.servlet import RestServlet, parse_json_value_from_request, parse_string from synapse.push.baserules import BASE_RULE_IDS from synapse.push.clientformat import format_push_rules_for_user from synapse.push.rulekinds import PRIORITY_CLASS_MAP +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException -from .base import ClientV1RestServlet, client_path_patterns - -class PushRuleRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/(?Ppushrules/.*)$") +class PushRuleRestServlet(RestServlet): + PATTERNS = client_patterns("/(?Ppushrules/.*)$", v1=True) SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = ( "Unrecognised request: You probably wanted a trailing slash") def __init__(self, hs): - super(PushRuleRestServlet, self).__init__(hs) + super(PushRuleRestServlet, self).__init__() + self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() self._is_worker = hs.config.worker_app is not None diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 4c07ae7f45b4..15d860db3762 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -26,17 +26,18 @@ parse_string, ) from synapse.push import PusherConfigException - -from .base import ClientV1RestServlet, client_path_patterns +from synapse.rest.client.v2_alpha._base import client_patterns logger = logging.getLogger(__name__) -class PushersRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/pushers$") +class PushersRestServlet(RestServlet): + PATTERNS = client_patterns("/pushers$", v1=True) def __init__(self, hs): - super(PushersRestServlet, self).__init__(hs) + super(PushersRestServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -69,11 +70,13 @@ def on_OPTIONS(self, _): return 200, {} -class PushersSetRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/pushers/set$") +class PushersSetRestServlet(RestServlet): + PATTERNS = client_patterns("/pushers/set$", v1=True) def __init__(self, hs): - super(PushersSetRestServlet, self).__init__(hs) + super(PushersSetRestServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() self.notifier = hs.get_notifier() self.pusher_pool = self.hs.get_pusherpool() @@ -141,7 +144,7 @@ class PushersRemoveRestServlet(RestServlet): """ To allow pusher to be delete by clicking a link (ie. GET request) """ - PATTERNS = client_path_patterns("/pushers/remove$") + PATTERNS = client_patterns("/pushers/remove$", v1=True) SUCCESS_HTML = b"You have been unsubscribed" def __init__(self, hs): diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index b92c6a9a9cd5..e8f672c4ba42 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -28,37 +28,45 @@ from synapse.api.filtering import Filter from synapse.events.utils import format_event_for_client_v2 from synapse.http.servlet import ( + RestServlet, assert_params_in_dict, parse_integer, parse_json_object_from_request, parse_string, ) +from synapse.rest.client.transactions import HttpTransactionCache +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class RoomCreateRestServlet(ClientV1RestServlet): +class TransactionRestServlet(RestServlet): + def __init__(self, hs): + super(TransactionRestServlet, self).__init__() + self.txns = HttpTransactionCache(hs) + + +class RoomCreateRestServlet(TransactionRestServlet): # No PATTERN; we have custom dispatch rules here def __init__(self, hs): super(RoomCreateRestServlet, self).__init__(hs) self._room_creation_handler = hs.get_room_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = "/createRoom" register_txn_path(self, PATTERNS, http_server) # define CORS for all of /rooms in RoomCreateRestServlet for simplicity http_server.register_paths("OPTIONS", - client_path_patterns("/rooms(?:/.*)?$"), + client_patterns("/rooms(?:/.*)?$", v1=True), self.on_OPTIONS) # define CORS for /createRoom[/txnid] http_server.register_paths("OPTIONS", - client_path_patterns("/createRoom(?:/.*)?$"), + client_patterns("/createRoom(?:/.*)?$", v1=True), self.on_OPTIONS) def on_PUT(self, request, txn_id): @@ -85,13 +93,14 @@ def on_OPTIONS(self, request): # TODO: Needs unit testing for generic events -class RoomStateEventRestServlet(ClientV1RestServlet): +class RoomStateEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomStateEventRestServlet, self).__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() def register(self, http_server): # /room/$roomid/state/$eventtype @@ -102,16 +111,16 @@ def register(self, http_server): "(?P[^/]*)/(?P[^/]*)$") http_server.register_paths("GET", - client_path_patterns(state_key), + client_patterns(state_key, v1=True), self.on_GET) http_server.register_paths("PUT", - client_path_patterns(state_key), + client_patterns(state_key, v1=True), self.on_PUT) http_server.register_paths("GET", - client_path_patterns(no_state_key), + client_patterns(no_state_key, v1=True), self.on_GET_no_state_key) http_server.register_paths("PUT", - client_path_patterns(no_state_key), + client_patterns(no_state_key, v1=True), self.on_PUT_no_state_key) def on_GET_no_state_key(self, request, room_id, event_type): @@ -185,11 +194,12 @@ def on_PUT(self, request, room_id, event_type, state_key, txn_id=None): # TODO: Needs unit testing for generic events + feedback -class RoomSendEventRestServlet(ClientV1RestServlet): +class RoomSendEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomSendEventRestServlet, self).__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): # /rooms/$roomid/send/$event_type[/$txn_id] @@ -229,10 +239,11 @@ def on_PUT(self, request, room_id, event_type, txn_id): # TODO: Needs unit testing for room ID + alias joins -class JoinRoomAliasServlet(ClientV1RestServlet): +class JoinRoomAliasServlet(TransactionRestServlet): def __init__(self, hs): super(JoinRoomAliasServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): # /join/$room_identifier[/$txn_id] @@ -291,8 +302,13 @@ def on_PUT(self, request, room_identifier, txn_id): # TODO: Needs unit testing -class PublicRoomListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/publicRooms$") +class PublicRoomListRestServlet(TransactionRestServlet): + PATTERNS = client_patterns("/publicRooms$", v1=True) + + def __init__(self, hs): + super(PublicRoomListRestServlet, self).__init__(hs) + self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -382,12 +398,13 @@ def on_POST(self, request): # TODO: Needs unit testing -class RoomMemberListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/members$") +class RoomMemberListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/members$", v1=True) def __init__(self, hs): - super(RoomMemberListRestServlet, self).__init__(hs) + super(RoomMemberListRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -436,12 +453,13 @@ def on_GET(self, request, room_id): # deprecated in favour of /members?membership=join? # except it does custom AS logic and has a simpler return format -class JoinedRoomMemberListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/joined_members$") +class JoinedRoomMemberListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/joined_members$", v1=True) def __init__(self, hs): - super(JoinedRoomMemberListRestServlet, self).__init__(hs) + super(JoinedRoomMemberListRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -457,12 +475,13 @@ def on_GET(self, request, room_id): # TODO: Needs better unit testing -class RoomMessageListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/messages$") +class RoomMessageListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/messages$", v1=True) def __init__(self, hs): - super(RoomMessageListRestServlet, self).__init__(hs) + super(RoomMessageListRestServlet, self).__init__() self.pagination_handler = hs.get_pagination_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -491,12 +510,13 @@ def on_GET(self, request, room_id): # TODO: Needs unit testing -class RoomStateRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/state$") +class RoomStateRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/state$", v1=True) def __init__(self, hs): - super(RoomStateRestServlet, self).__init__(hs) + super(RoomStateRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -511,12 +531,13 @@ def on_GET(self, request, room_id): # TODO: Needs unit testing -class RoomInitialSyncRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/initialSync$") +class RoomInitialSyncRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/initialSync$", v1=True) def __init__(self, hs): - super(RoomInitialSyncRestServlet, self).__init__(hs) + super(RoomInitialSyncRestServlet, self).__init__() self.initial_sync_handler = hs.get_initial_sync_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -530,16 +551,17 @@ def on_GET(self, request, room_id): defer.returnValue((200, content)) -class RoomEventServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/event/(?P[^/]*)$" +class RoomEventServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/event/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomEventServlet, self).__init__(hs) + super(RoomEventServlet, self).__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): @@ -554,16 +576,17 @@ def on_GET(self, request, room_id, event_id): defer.returnValue((404, "Event not found.")) -class RoomEventContextServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/context/(?P[^/]*)$" +class RoomEventContextServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/context/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomEventContextServlet, self).__init__(hs) + super(RoomEventContextServlet, self).__init__() self.clock = hs.get_clock() self.room_context_handler = hs.get_room_context_handler() self._event_serializer = hs.get_event_client_serializer() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): @@ -609,10 +632,11 @@ def on_GET(self, request, room_id, event_id): defer.returnValue((200, results)) -class RoomForgetRestServlet(ClientV1RestServlet): +class RoomForgetRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomForgetRestServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = ("/rooms/(?P[^/]*)/forget") @@ -639,11 +663,12 @@ def on_PUT(self, request, room_id, txn_id): # TODO: Needs unit testing -class RoomMembershipRestServlet(ClientV1RestServlet): +class RoomMembershipRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomMembershipRestServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): # /rooms/$roomid/[invite|join|leave] @@ -722,11 +747,12 @@ def on_PUT(self, request, room_id, membership_action, txn_id): ) -class RoomRedactEventRestServlet(ClientV1RestServlet): +class RoomRedactEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomRedactEventRestServlet, self).__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = ("/rooms/(?P[^/]*)/redact/(?P[^/]*)") @@ -757,15 +783,16 @@ def on_PUT(self, request, room_id, event_id, txn_id): ) -class RoomTypingRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/typing/(?P[^/]*)$" +class RoomTypingRestServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/typing/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomTypingRestServlet, self).__init__(hs) + super(RoomTypingRestServlet, self).__init__() self.presence_handler = hs.get_presence_handler() self.typing_handler = hs.get_typing_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_PUT(self, request, room_id, user_id): @@ -798,14 +825,13 @@ def on_PUT(self, request, room_id, user_id): defer.returnValue((200, {})) -class SearchRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/search$" - ) +class SearchRestServlet(RestServlet): + PATTERNS = client_patterns("/search$", v1=True) def __init__(self, hs): - super(SearchRestServlet, self).__init__(hs) + super(SearchRestServlet, self).__init__() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request): @@ -823,12 +849,13 @@ def on_POST(self, request): defer.returnValue((200, results)) -class JoinedRoomsRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/joined_rooms$") +class JoinedRoomsRestServlet(RestServlet): + PATTERNS = client_patterns("/joined_rooms$", v1=True) def __init__(self, hs): - super(JoinedRoomsRestServlet, self).__init__(hs) + super(JoinedRoomsRestServlet, self).__init__() self.store = hs.get_datastore() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -853,18 +880,18 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False): """ http_server.register_paths( "POST", - client_path_patterns(regex_string + "$"), + client_patterns(regex_string + "$", v1=True), servlet.on_POST ) http_server.register_paths( "PUT", - client_path_patterns(regex_string + "/(?P[^/]*)$"), + client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_PUT ) if with_get: http_server.register_paths( "GET", - client_path_patterns(regex_string + "/(?P[^/]*)$"), + client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_GET ) diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 53da905eeadc..0975df84cf66 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -19,11 +19,16 @@ from twisted.internet import defer -from .base import ClientV1RestServlet, client_path_patterns +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns -class VoipRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/voip/turnServer$") +class VoipRestServlet(RestServlet): + PATTERNS = client_patterns("/voip/turnServer$", v1=True) + + def __init__(self, hs): + super(VoipRestServlet, self).__init__() + self.hs = hs @defer.inlineCallbacks def on_GET(self, request): diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index 24ac26bf0332..5236d5d566e7 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -26,8 +26,7 @@ logger = logging.getLogger(__name__) -def client_v2_patterns(path_regex, releases=(0,), - unstable=True): +def client_patterns(path_regex, releases=(0,), unstable=True, v1=False): """Creates a regex compiled client path with the correct client path prefix. @@ -41,6 +40,9 @@ def client_v2_patterns(path_regex, releases=(0,), if unstable: unstable_prefix = CLIENT_API_PREFIX + "/unstable" patterns.append(re.compile("^" + unstable_prefix + path_regex)) + if v1: + v1_prefix = CLIENT_API_PREFIX + "/api/v1" + patterns.append(re.compile("^" + v1_prefix + path_regex)) for release in releases: new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) patterns.append(re.compile("^" + new_prefix + path_regex)) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ee069179f0aa..ca35dc3c8395 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -30,13 +30,13 @@ from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.threepids import check_3pid_allowed -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler logger = logging.getLogger(__name__) class EmailPasswordRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password/email/requestToken$") + PATTERNS = client_patterns("/account/password/email/requestToken$") def __init__(self, hs): super(EmailPasswordRequestTokenRestServlet, self).__init__() @@ -70,7 +70,7 @@ def on_POST(self, request): class MsisdnPasswordRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password/msisdn/requestToken$") + PATTERNS = client_patterns("/account/password/msisdn/requestToken$") def __init__(self, hs): super(MsisdnPasswordRequestTokenRestServlet, self).__init__() @@ -108,7 +108,7 @@ def on_POST(self, request): class PasswordRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password$") + PATTERNS = client_patterns("/account/password$") def __init__(self, hs): super(PasswordRestServlet, self).__init__() @@ -180,7 +180,7 @@ def on_OPTIONS(self, _): class DeactivateAccountRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/deactivate$") + PATTERNS = client_patterns("/account/deactivate$") def __init__(self, hs): super(DeactivateAccountRestServlet, self).__init__() @@ -228,7 +228,7 @@ def on_POST(self, request): class EmailThreepidRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$") + PATTERNS = client_patterns("/account/3pid/email/requestToken$") def __init__(self, hs): self.hs = hs @@ -263,7 +263,7 @@ def on_POST(self, request): class MsisdnThreepidRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/msisdn/requestToken$") + PATTERNS = client_patterns("/account/3pid/msisdn/requestToken$") def __init__(self, hs): self.hs = hs @@ -300,7 +300,7 @@ def on_POST(self, request): class ThreepidRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid$") + PATTERNS = client_patterns("/account/3pid$") def __init__(self, hs): super(ThreepidRestServlet, self).__init__() @@ -364,7 +364,7 @@ def on_POST(self, request): class ThreepidDeleteRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/delete$") + PATTERNS = client_patterns("/account/3pid/delete$") def __init__(self, hs): super(ThreepidDeleteRestServlet, self).__init__() @@ -401,7 +401,7 @@ def on_POST(self, request): class WhoamiRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/whoami$") + PATTERNS = client_patterns("/account/whoami$") def __init__(self, hs): super(WhoamiRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index f171b8d6269e..574a6298cea2 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -20,7 +20,7 @@ from synapse.api.errors import AuthError, NotFoundError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -30,7 +30,7 @@ class AccountDataServlet(RestServlet): PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1 GET /user/{user_id}/account_data/{account_dataType} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/account_data/(?P[^/]*)" ) @@ -79,7 +79,7 @@ class RoomAccountDataServlet(RestServlet): PUT /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1 GET /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)" "/rooms/(?P[^/]*)" "/account_data/(?P[^/]*)" diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index fc8dbeb617f0..55c4ed56607a 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -21,13 +21,13 @@ from synapse.http.server import finish_request from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class AccountValidityRenewServlet(RestServlet): - PATTERNS = client_v2_patterns("/account_validity/renew$") + PATTERNS = client_patterns("/account_validity/renew$") SUCCESS_HTML = b"Your account has been successfully renewed." def __init__(self, hs): @@ -60,7 +60,7 @@ def on_GET(self, request): class AccountValiditySendMailServlet(RestServlet): - PATTERNS = client_v2_patterns("/account_validity/send_mail$") + PATTERNS = client_patterns("/account_validity/send_mail$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 4c380ab84da3..8dfe5cba0298 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -23,7 +23,7 @@ from synapse.http.server import finish_request from synapse.http.servlet import RestServlet, parse_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -122,7 +122,7 @@ class AuthRestServlet(RestServlet): cannot be handled in the normal flow (with requests to the same endpoint). Current use is for web fallback auth. """ - PATTERNS = client_v2_patterns(r"/auth/(?P[\w\.]*)/fallback/web") + PATTERNS = client_patterns(r"/auth/(?P[\w\.]*)/fallback/web") def __init__(self, hs): super(AuthRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index 2b4892330c4d..fc7e2f4dd56b 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -19,7 +19,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -27,7 +27,7 @@ class CapabilitiesRestServlet(RestServlet): """End point to expose the capabilities of the server.""" - PATTERNS = client_v2_patterns("/capabilities$") + PATTERNS = client_patterns("/capabilities$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 5a5be7c3904e..78665304a50d 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -24,13 +24,13 @@ parse_json_object_from_request, ) -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler logger = logging.getLogger(__name__) class DevicesRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/devices$") + PATTERNS = client_patterns("/devices$") def __init__(self, hs): """ @@ -56,7 +56,7 @@ class DeleteDevicesRestServlet(RestServlet): API for bulk deletion of devices. Accepts a JSON object with a devices key which lists the device_ids to delete. Requires user interactive auth. """ - PATTERNS = client_v2_patterns("/delete_devices") + PATTERNS = client_patterns("/delete_devices") def __init__(self, hs): super(DeleteDevicesRestServlet, self).__init__() @@ -95,7 +95,7 @@ def on_POST(self, request): class DeviceRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$") + PATTERNS = client_patterns("/devices/(?P[^/]*)$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index ae867288793d..65db48c3cc56 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -21,13 +21,13 @@ from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID -from ._base import client_v2_patterns, set_timeline_upper_limit +from ._base import client_patterns, set_timeline_upper_limit logger = logging.getLogger(__name__) class GetFilterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user/(?P[^/]*)/filter/(?P[^/]*)") + PATTERNS = client_patterns("/user/(?P[^/]*)/filter/(?P[^/]*)") def __init__(self, hs): super(GetFilterRestServlet, self).__init__() @@ -63,7 +63,7 @@ def on_GET(self, request, user_id, filter_id): class CreateFilterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user/(?P[^/]*)/filter") + PATTERNS = client_patterns("/user/(?P[^/]*)/filter") def __init__(self, hs): super(CreateFilterRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 21e02c07c0c6..d082385ec704 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -21,7 +21,7 @@ from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import GroupID -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -29,7 +29,7 @@ class GroupServlet(RestServlet): """Get the group profile """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/profile$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/profile$") def __init__(self, hs): super(GroupServlet, self).__init__() @@ -65,7 +65,7 @@ def on_POST(self, request, group_id): class GroupSummaryServlet(RestServlet): """Get the full group summary """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/summary$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/summary$") def __init__(self, hs): super(GroupSummaryServlet, self).__init__() @@ -93,7 +93,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): - /groups/:group/summary/rooms/:room_id - /groups/:group/summary/categories/:category/rooms/:room_id """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/summary" "(/categories/(?P[^/]+))?" "/rooms/(?P[^/]*)$" @@ -137,7 +137,7 @@ def on_DELETE(self, request, group_id, category_id, room_id): class GroupCategoryServlet(RestServlet): """Get/add/update/delete a group category """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/categories/(?P[^/]+)$" ) @@ -189,7 +189,7 @@ def on_DELETE(self, request, group_id, category_id): class GroupCategoriesServlet(RestServlet): """Get all group categories """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/categories/$" ) @@ -214,7 +214,7 @@ def on_GET(self, request, group_id): class GroupRoleServlet(RestServlet): """Get/add/update/delete a group role """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/roles/(?P[^/]+)$" ) @@ -266,7 +266,7 @@ def on_DELETE(self, request, group_id, role_id): class GroupRolesServlet(RestServlet): """Get all group roles """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/roles/$" ) @@ -295,7 +295,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): - /groups/:group/summary/users/:room_id - /groups/:group/summary/roles/:role/users/:user_id """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/summary" "(/roles/(?P[^/]+))?" "/users/(?P[^/]*)$" @@ -339,7 +339,7 @@ def on_DELETE(self, request, group_id, role_id, user_id): class GroupRoomServlet(RestServlet): """Get all rooms in a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/rooms$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/rooms$") def __init__(self, hs): super(GroupRoomServlet, self).__init__() @@ -360,7 +360,7 @@ def on_GET(self, request, group_id): class GroupUsersServlet(RestServlet): """Get all users in a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/users$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/users$") def __init__(self, hs): super(GroupUsersServlet, self).__init__() @@ -381,7 +381,7 @@ def on_GET(self, request, group_id): class GroupInvitedUsersServlet(RestServlet): """Get users invited to a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/invited_users$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/invited_users$") def __init__(self, hs): super(GroupInvitedUsersServlet, self).__init__() @@ -405,7 +405,7 @@ def on_GET(self, request, group_id): class GroupSettingJoinPolicyServlet(RestServlet): """Set group join policy """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") def __init__(self, hs): super(GroupSettingJoinPolicyServlet, self).__init__() @@ -431,7 +431,7 @@ def on_PUT(self, request, group_id): class GroupCreateServlet(RestServlet): """Create a group """ - PATTERNS = client_v2_patterns("/create_group$") + PATTERNS = client_patterns("/create_group$") def __init__(self, hs): super(GroupCreateServlet, self).__init__() @@ -462,7 +462,7 @@ def on_POST(self, request): class GroupAdminRoomsServlet(RestServlet): """Add a room to the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)$" ) @@ -499,7 +499,7 @@ def on_DELETE(self, request, group_id, room_id): class GroupAdminRoomsConfigServlet(RestServlet): """Update the config of a room in a group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)" "/config/(?P[^/]*)$" ) @@ -526,7 +526,7 @@ def on_PUT(self, request, group_id, room_id, config_key): class GroupAdminUsersInviteServlet(RestServlet): """Invite a user to the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/users/invite/(?P[^/]*)$" ) @@ -555,7 +555,7 @@ def on_PUT(self, request, group_id, user_id): class GroupAdminUsersKickServlet(RestServlet): """Kick a user from the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/users/remove/(?P[^/]*)$" ) @@ -581,7 +581,7 @@ def on_PUT(self, request, group_id, user_id): class GroupSelfLeaveServlet(RestServlet): """Leave a joined group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/leave$" ) @@ -607,7 +607,7 @@ def on_PUT(self, request, group_id): class GroupSelfJoinServlet(RestServlet): """Attempt to join a group, or knock """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/join$" ) @@ -633,7 +633,7 @@ def on_PUT(self, request, group_id): class GroupSelfAcceptInviteServlet(RestServlet): """Accept a group invite """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/accept_invite$" ) @@ -659,7 +659,7 @@ def on_PUT(self, request, group_id): class GroupSelfUpdatePublicityServlet(RestServlet): """Update whether we publicise a users membership of a group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/update_publicity$" ) @@ -686,7 +686,7 @@ def on_PUT(self, request, group_id): class PublicisedGroupsForUserServlet(RestServlet): """Get the list of groups a user is advertising """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/publicised_groups/(?P[^/]*)$" ) @@ -711,7 +711,7 @@ def on_GET(self, request, user_id): class PublicisedGroupsForUsersServlet(RestServlet): """Get the list of groups a user is advertising """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/publicised_groups$" ) @@ -739,7 +739,7 @@ def on_POST(self, request): class GroupsForUserServlet(RestServlet): """Get all groups the logged in user is joined to """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/joined_groups$" ) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 8486086b510a..4cbfbf5631b7 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -26,7 +26,7 @@ ) from synapse.types import StreamToken -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ class KeyUploadServlet(RestServlet): }, } """ - PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$") + PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") def __init__(self, hs): """ @@ -130,7 +130,7 @@ class KeyQueryServlet(RestServlet): } } } } } } """ - PATTERNS = client_v2_patterns("/keys/query$") + PATTERNS = client_patterns("/keys/query$") def __init__(self, hs): """ @@ -159,7 +159,7 @@ class KeyChangesServlet(RestServlet): 200 OK { "changed": ["@foo:example.com"] } """ - PATTERNS = client_v2_patterns("/keys/changes$") + PATTERNS = client_patterns("/keys/changes$") def __init__(self, hs): """ @@ -209,7 +209,7 @@ class OneTimeKeyServlet(RestServlet): } } } } """ - PATTERNS = client_v2_patterns("/keys/claim$") + PATTERNS = client_patterns("/keys/claim$") def __init__(self, hs): super(OneTimeKeyServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 0a1eb0ae45f2..53e666989bb0 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -20,13 +20,13 @@ from synapse.events.utils import format_event_for_client_v2_without_room_id from synapse.http.servlet import RestServlet, parse_integer, parse_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class NotificationsServlet(RestServlet): - PATTERNS = client_v2_patterns("/notifications$") + PATTERNS = client_patterns("/notifications$") def __init__(self, hs): super(NotificationsServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index 01c90aa2a3a7..bb927d9f9d86 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -22,7 +22,7 @@ from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.util.stringutils import random_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ class IdTokenServlet(RestServlet): "expires_in": 3600, } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/openid/request_token" ) diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index a6e582a5aecb..f4bd0d077f76 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -19,13 +19,13 @@ from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReadMarkerRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/rooms/(?P[^/]*)/read_markers$") + PATTERNS = client_patterns("/rooms/(?P[^/]*)/read_markers$") def __init__(self, hs): super(ReadMarkerRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index de370cac4503..fa12ac3e4d11 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -20,13 +20,13 @@ from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReceiptRestServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)" "/receipt/(?P[^/]*)" "/(?P[^/]*)$" diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 042f6361357e..79c085408bf3 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -43,7 +43,7 @@ from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.threepids import check_3pid_allowed -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler # We ought to be using hmac.compare_digest() but on older pythons it doesn't # exist. It's a _really minor_ security flaw to use plain string comparison @@ -60,7 +60,7 @@ def compare_digest(a, b): class EmailRegisterRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/email/requestToken$") + PATTERNS = client_patterns("/register/email/requestToken$") def __init__(self, hs): """ @@ -98,7 +98,7 @@ def on_POST(self, request): class MsisdnRegisterRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/msisdn/requestToken$") + PATTERNS = client_patterns("/register/msisdn/requestToken$") def __init__(self, hs): """ @@ -142,7 +142,7 @@ def on_POST(self, request): class UsernameAvailabilityRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/available") + PATTERNS = client_patterns("/register/available") def __init__(self, hs): """ @@ -182,7 +182,7 @@ def on_GET(self, request): class RegisterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register$") + PATTERNS = client_patterns("/register$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 41e0a4493667..f8f8742bdc24 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -34,7 +34,7 @@ from synapse.rest.client.transactions import HttpTransactionCache from synapse.storage.relations import AggregationPaginationToken, RelationPaginationToken -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -66,12 +66,12 @@ def __init__(self, hs): def register(self, http_server): http_server.register_paths( "POST", - client_v2_patterns(self.PATTERN + "$", releases=()), + client_patterns(self.PATTERN + "$", releases=()), self.on_PUT_or_POST, ) http_server.register_paths( "PUT", - client_v2_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), + client_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), self.on_PUT, ) @@ -120,7 +120,7 @@ class RelationPaginationServlet(RestServlet): filtered by relation type and event type. """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/relations/(?P[^/]*)" "(/(?P[^/]*)(/(?P[^/]*))?)?$", releases=(), @@ -197,7 +197,7 @@ class RelationAggregationPaginationServlet(RestServlet): } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" "(/(?P[^/]*)(/(?P[^/]*))?)?$", releases=(), @@ -269,7 +269,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" "/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)$", releases=(), diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 95d2a71ec2f1..10198662a9eb 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -27,13 +27,13 @@ parse_json_object_from_request, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReportEventRestServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/report/(?P[^/]*)$" ) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 220a0de30bf6..87779645f971 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -24,13 +24,13 @@ parse_string, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class RoomKeysServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$" ) @@ -256,7 +256,7 @@ def on_DELETE(self, request, room_id, session_id): class RoomKeysNewVersionServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/version$" ) @@ -314,7 +314,7 @@ def on_POST(self, request): class RoomKeysVersionServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/version(/(?P[^/]+))?$" ) diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 62b8de71fa03..c621a90fbaa2 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -25,7 +25,7 @@ parse_json_object_from_request, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -47,7 +47,7 @@ class RoomUpgradeRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( # /rooms/$roomid/upgrade "/rooms/(?P[^/]*)/upgrade$", ) diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 21e9cef2d0aa..120a71336198 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -21,13 +21,13 @@ from synapse.http.servlet import parse_json_object_from_request from synapse.rest.client.transactions import HttpTransactionCache -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class SendToDeviceRestServlet(servlet.RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/sendToDevice/(?P[^/]*)/(?P[^/]*)$", ) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index d3025025e337..148fc6c985b5 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -32,7 +32,7 @@ from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.types import StreamToken -from ._base import client_v2_patterns, set_timeline_upper_limit +from ._base import client_patterns, set_timeline_upper_limit logger = logging.getLogger(__name__) @@ -73,7 +73,7 @@ class SyncRestServlet(RestServlet): } """ - PATTERNS = client_v2_patterns("/sync$") + PATTERNS = client_patterns("/sync$") ALLOWED_PRESENCE = set(["online", "offline", "unavailable"]) def __init__(self, hs): diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 4fea614e9561..ebff7cff4516 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -20,7 +20,7 @@ from synapse.api.errors import AuthError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -29,7 +29,7 @@ class TagListServlet(RestServlet): """ GET /user/{user_id}/rooms/{room_id}/tags HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/rooms/(?P[^/]*)/tags" ) @@ -54,7 +54,7 @@ class TagServlet(RestServlet): PUT /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1 DELETE /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/rooms/(?P[^/]*)/tags/(?P[^/]*)" ) diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index b9b5d0767733..e7a987466ab5 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -21,13 +21,13 @@ from synapse.api.constants import ThirdPartyEntityKind from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ThirdPartyProtocolsServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/protocols") + PATTERNS = client_patterns("/thirdparty/protocols") def __init__(self, hs): super(ThirdPartyProtocolsServlet, self).__init__() @@ -44,7 +44,7 @@ def on_GET(self, request): class ThirdPartyProtocolServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/protocol/(?P[^/]+)$") + PATTERNS = client_patterns("/thirdparty/protocol/(?P[^/]+)$") def __init__(self, hs): super(ThirdPartyProtocolServlet, self).__init__() @@ -66,7 +66,7 @@ def on_GET(self, request, protocol): class ThirdPartyUserServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/user(/(?P[^/]+))?$") + PATTERNS = client_patterns("/thirdparty/user(/(?P[^/]+))?$") def __init__(self, hs): super(ThirdPartyUserServlet, self).__init__() @@ -89,7 +89,7 @@ def on_GET(self, request, protocol): class ThirdPartyLocationServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/location(/(?P[^/]+))?$") + PATTERNS = client_patterns("/thirdparty/location(/(?P[^/]+))?$") def __init__(self, hs): super(ThirdPartyLocationServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 6e76b9e9c2f6..6c366142e1dc 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -18,7 +18,7 @@ from synapse.api.errors import AuthError from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns class TokenRefreshRestServlet(RestServlet): @@ -26,7 +26,7 @@ class TokenRefreshRestServlet(RestServlet): Exchanges refresh tokens for a pair of an access token and a new refresh token. """ - PATTERNS = client_v2_patterns("/tokenrefresh") + PATTERNS = client_patterns("/tokenrefresh") def __init__(self, hs): super(TokenRefreshRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index 36b02de37f84..69e4efc47a85 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -20,13 +20,13 @@ from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class UserDirectorySearchRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user_directory/search$") + PATTERNS = client_patterns("/user_directory/search$") def __init__(self, hs): """ diff --git a/tests/__init__.py b/tests/__init__.py index d3181f9403a8..f7fc502f018f 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -21,4 +21,4 @@ # attempt to do the patch before we load any synapse code tests.patch_inline_callbacks.do_patch() -util.DEFAULT_TIMEOUT_DURATION = 10 +util.DEFAULT_TIMEOUT_DURATION = 20 diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index ee5f09041f27..e5fc2fcd159e 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -408,7 +408,6 @@ def test_shutdown_room_consent(self): users_in_room = self.get_success(self.store.get_users_in_room(room_id)) self.assertEqual([], users_in_room) - @unittest.DEBUG def test_shutdown_room_block_peek(self): """Test that a world_readable room can no longer be peeked into after it has been shut down. diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index f4d0d48dad89..72c7ed93cb3d 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -30,7 +30,7 @@ from ....utils import MockHttpResource, setup_test_homeserver myid = "@1234ABCD:test" -PATH_PREFIX = "/_matrix/client/api/v1" +PATH_PREFIX = "/_matrix/client/r0" class MockHandlerProfileTestCase(unittest.TestCase): From 37057d5d6047a7f984fc9f1db094b9169a4e4c73 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:02:47 +1000 Subject: [PATCH 090/231] prepare --- synapse/storage/prepare_database.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c1711bc8bd5f..07478b66728c 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -20,6 +20,8 @@ import os import re +from synapse.storage.engines.postgres import PostgresEngine + logger = logging.getLogger(__name__) @@ -115,8 +117,16 @@ def _setup_new_database(cur, database_engine): valid_dirs = [] pattern = re.compile(r"^\d+(\.sql)?$") + + if isinstance(database_engine, PostgresEngine): + specific = "postgres" + else: + specific = "sqlite" + + specific_pattern = re.compile(r"^\d+(\.sql." + specific + r")?$") + for filename in directory_entries: - match = pattern.match(filename) + match = pattern.match(filename) or specific_pattern.match(filename) abs_path = os.path.join(current_dir, filename) if match and os.path.isdir(abs_path): ver = int(match.group(0)) @@ -136,7 +146,9 @@ def _setup_new_database(cur, database_engine): directory_entries = os.listdir(sql_dir) - for filename in fnmatch.filter(directory_entries, "*.sql"): + for filename in fnmatch.filter(directory_entries, "*.sql") + fnmatch.filter( + directory_entries, "*.sql." + specific + ): sql_loc = os.path.join(sql_dir, filename) logger.debug("Applying schema %s", sql_loc) executescript(cur, sql_loc) From dc72b90cd674f69fea1d27a1c1dab60a60d5ab9d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:03:28 +1000 Subject: [PATCH 091/231] full schema --- .../schema/full_schemas/54/full.sql.postgres | 2040 +++++++++++++++++ .../schema/full_schemas/54/full.sql.sqlite | 261 +++ .../storage/schema/full_schemas/README.txt | 14 + 3 files changed, 2315 insertions(+) create mode 100644 synapse/storage/schema/full_schemas/54/full.sql.postgres create mode 100644 synapse/storage/schema/full_schemas/54/full.sql.sqlite create mode 100644 synapse/storage/schema/full_schemas/README.txt diff --git a/synapse/storage/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/full_schemas/54/full.sql.postgres new file mode 100644 index 000000000000..5fb54cfe7751 --- /dev/null +++ b/synapse/storage/schema/full_schemas/54/full.sql.postgres @@ -0,0 +1,2040 @@ + + + + + +CREATE TABLE _extremities_to_check ( + event_id text +); + + + +CREATE TABLE access_tokens ( + id bigint NOT NULL, + user_id text NOT NULL, + device_id text, + token text NOT NULL, + last_used bigint +); + + + +CREATE TABLE account_data ( + user_id text NOT NULL, + account_data_type text NOT NULL, + stream_id bigint NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE account_data_max_stream_id ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_id bigint NOT NULL, + CONSTRAINT private_user_data_max_stream_id_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE account_validity ( + user_id text NOT NULL, + expiration_ts_ms bigint NOT NULL, + email_sent boolean NOT NULL, + renewal_token text +); + + + +CREATE TABLE application_services_state ( + as_id text NOT NULL, + state character varying(5), + last_txn integer +); + + + +CREATE TABLE application_services_txns ( + as_id text NOT NULL, + txn_id integer NOT NULL, + event_ids text NOT NULL +); + + + +CREATE TABLE applied_module_schemas ( + module_name text NOT NULL, + file text NOT NULL +); + + + +CREATE TABLE applied_schema_deltas ( + version integer NOT NULL, + file text NOT NULL +); + + + +CREATE TABLE appservice_room_list ( + appservice_id text NOT NULL, + network_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE appservice_stream_position ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_ordering bigint, + CONSTRAINT appservice_stream_position_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE background_updates ( + update_name text NOT NULL, + progress_json text NOT NULL, + depends_on text +); + + + +CREATE TABLE blocked_rooms ( + room_id text NOT NULL, + user_id text NOT NULL +); + + + +CREATE TABLE cache_invalidation_stream ( + stream_id bigint, + cache_func text, + keys text[], + invalidation_ts bigint +); + + + +CREATE TABLE current_state_delta_stream ( + stream_id bigint NOT NULL, + room_id text NOT NULL, + type text NOT NULL, + state_key text NOT NULL, + event_id text, + prev_event_id text +); + + + +CREATE TABLE current_state_events ( + event_id text NOT NULL, + room_id text NOT NULL, + type text NOT NULL, + state_key text NOT NULL +); + + + +CREATE TABLE deleted_pushers ( + stream_id bigint NOT NULL, + app_id text NOT NULL, + pushkey text NOT NULL, + user_id text NOT NULL +); + + + +CREATE TABLE destinations ( + destination text NOT NULL, + retry_last_ts bigint, + retry_interval integer +); + + + +CREATE TABLE device_federation_inbox ( + origin text NOT NULL, + message_id text NOT NULL, + received_ts bigint NOT NULL +); + + + +CREATE TABLE device_federation_outbox ( + destination text NOT NULL, + stream_id bigint NOT NULL, + queued_ts bigint NOT NULL, + messages_json text NOT NULL +); + + + +CREATE TABLE device_inbox ( + user_id text NOT NULL, + device_id text NOT NULL, + stream_id bigint NOT NULL, + message_json text NOT NULL +); + + + +CREATE TABLE device_lists_outbound_last_success ( + destination text NOT NULL, + user_id text NOT NULL, + stream_id bigint NOT NULL +); + + + +CREATE TABLE device_lists_outbound_pokes ( + destination text NOT NULL, + stream_id bigint NOT NULL, + user_id text NOT NULL, + device_id text NOT NULL, + sent boolean NOT NULL, + ts bigint NOT NULL +); + + + +CREATE TABLE device_lists_remote_cache ( + user_id text NOT NULL, + device_id text NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE device_lists_remote_extremeties ( + user_id text NOT NULL, + stream_id text NOT NULL +); + + + +CREATE TABLE device_lists_stream ( + stream_id bigint NOT NULL, + user_id text NOT NULL, + device_id text NOT NULL +); + + + +CREATE TABLE device_max_stream_id ( + stream_id bigint NOT NULL +); + + + +CREATE TABLE devices ( + user_id text NOT NULL, + device_id text NOT NULL, + display_name text +); + + + +CREATE TABLE e2e_device_keys_json ( + user_id text NOT NULL, + device_id text NOT NULL, + ts_added_ms bigint NOT NULL, + key_json text NOT NULL +); + + + +CREATE TABLE e2e_one_time_keys_json ( + user_id text NOT NULL, + device_id text NOT NULL, + algorithm text NOT NULL, + key_id text NOT NULL, + ts_added_ms bigint NOT NULL, + key_json text NOT NULL +); + + + +CREATE TABLE e2e_room_keys ( + user_id text NOT NULL, + room_id text NOT NULL, + session_id text NOT NULL, + version bigint NOT NULL, + first_message_index integer, + forwarded_count integer, + is_verified boolean, + session_data text NOT NULL +); + + + +CREATE TABLE e2e_room_keys_versions ( + user_id text NOT NULL, + version bigint NOT NULL, + algorithm text NOT NULL, + auth_data text NOT NULL, + deleted smallint DEFAULT 0 NOT NULL +); + + + +CREATE TABLE erased_users ( + user_id text NOT NULL +); + + + +CREATE TABLE event_auth ( + event_id text NOT NULL, + auth_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE event_backward_extremities ( + event_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE event_edges ( + event_id text NOT NULL, + prev_event_id text NOT NULL, + room_id text NOT NULL, + is_state boolean NOT NULL +); + + + +CREATE TABLE event_forward_extremities ( + event_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE event_json ( + event_id text NOT NULL, + room_id text NOT NULL, + internal_metadata text NOT NULL, + json text NOT NULL, + format_version integer +); + + + +CREATE TABLE event_push_actions ( + room_id text NOT NULL, + event_id text NOT NULL, + user_id text NOT NULL, + profile_tag character varying(32), + actions text NOT NULL, + topological_ordering bigint, + stream_ordering bigint, + notif smallint, + highlight smallint +); + + + +CREATE TABLE event_push_actions_staging ( + event_id text NOT NULL, + user_id text NOT NULL, + actions text NOT NULL, + notif smallint NOT NULL, + highlight smallint NOT NULL +); + + + +CREATE TABLE event_push_summary ( + user_id text NOT NULL, + room_id text NOT NULL, + notif_count bigint NOT NULL, + stream_ordering bigint NOT NULL +); + + + +CREATE TABLE event_push_summary_stream_ordering ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_ordering bigint NOT NULL, + CONSTRAINT event_push_summary_stream_ordering_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE event_reference_hashes ( + event_id text, + algorithm text, + hash bytea +); + + + +CREATE TABLE event_relations ( + event_id text NOT NULL, + relates_to_id text NOT NULL, + relation_type text NOT NULL, + aggregation_key text +); + + + +CREATE TABLE event_reports ( + id bigint NOT NULL, + received_ts bigint NOT NULL, + room_id text NOT NULL, + event_id text NOT NULL, + user_id text NOT NULL, + reason text, + content text +); + + + +CREATE TABLE event_search ( + event_id text, + room_id text, + sender text, + key text, + vector tsvector, + origin_server_ts bigint, + stream_ordering bigint +); + + + +CREATE TABLE event_to_state_groups ( + event_id text NOT NULL, + state_group bigint NOT NULL +); + + + +CREATE TABLE events ( + stream_ordering integer NOT NULL, + topological_ordering bigint NOT NULL, + event_id text NOT NULL, + type text NOT NULL, + room_id text NOT NULL, + content text, + unrecognized_keys text, + processed boolean NOT NULL, + outlier boolean NOT NULL, + depth bigint DEFAULT 0 NOT NULL, + origin_server_ts bigint, + received_ts bigint, + sender text, + contains_url boolean +); + + + +CREATE TABLE ex_outlier_stream ( + event_stream_ordering bigint NOT NULL, + event_id text NOT NULL, + state_group bigint NOT NULL +); + + + +CREATE TABLE federation_stream_position ( + type text NOT NULL, + stream_id integer NOT NULL +); + + + +CREATE TABLE group_attestations_remote ( + group_id text NOT NULL, + user_id text NOT NULL, + valid_until_ms bigint NOT NULL, + attestation_json text NOT NULL +); + + + +CREATE TABLE group_attestations_renewals ( + group_id text NOT NULL, + user_id text NOT NULL, + valid_until_ms bigint NOT NULL +); + + + +CREATE TABLE group_invites ( + group_id text NOT NULL, + user_id text NOT NULL +); + + + +CREATE TABLE group_roles ( + group_id text NOT NULL, + role_id text NOT NULL, + profile text NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE group_room_categories ( + group_id text NOT NULL, + category_id text NOT NULL, + profile text NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE group_rooms ( + group_id text NOT NULL, + room_id text NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE group_summary_roles ( + group_id text NOT NULL, + role_id text NOT NULL, + role_order bigint NOT NULL, + CONSTRAINT group_summary_roles_role_order_check CHECK ((role_order > 0)) +); + + + +CREATE TABLE group_summary_room_categories ( + group_id text NOT NULL, + category_id text NOT NULL, + cat_order bigint NOT NULL, + CONSTRAINT group_summary_room_categories_cat_order_check CHECK ((cat_order > 0)) +); + + + +CREATE TABLE group_summary_rooms ( + group_id text NOT NULL, + room_id text NOT NULL, + category_id text NOT NULL, + room_order bigint NOT NULL, + is_boolean NOT NULL, + CONSTRAINT group_summary_rooms_room_order_check CHECK ((room_order > 0)) +); + + + +CREATE TABLE group_summary_users ( + group_id text NOT NULL, + user_id text NOT NULL, + role_id text NOT NULL, + user_order bigint NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE group_users ( + group_id text NOT NULL, + user_id text NOT NULL, + is_admin boolean NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE groups ( + group_id text NOT NULL, + name text, + avatar_url text, + short_description text, + long_description text, + is_boolean NOT NULL, + join_policy text DEFAULT 'invite'::text NOT NULL +); + + + +CREATE TABLE guest_access ( + event_id text NOT NULL, + room_id text NOT NULL, + guest_access text NOT NULL +); + + + +CREATE TABLE history_visibility ( + event_id text NOT NULL, + room_id text NOT NULL, + history_visibility text NOT NULL +); + + + +CREATE TABLE local_group_membership ( + group_id text NOT NULL, + user_id text NOT NULL, + is_admin boolean NOT NULL, + membership text NOT NULL, + is_sed boolean NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE local_group_updates ( + stream_id bigint NOT NULL, + group_id text NOT NULL, + user_id text NOT NULL, + type text NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE local_invites ( + stream_id bigint NOT NULL, + inviter text NOT NULL, + invitee text NOT NULL, + event_id text NOT NULL, + room_id text NOT NULL, + locally_rejected text, + replaced_by text +); + + + +CREATE TABLE local_media_repository ( + media_id text, + media_type text, + media_length integer, + created_ts bigint, + upload_name text, + user_id text, + quarantined_by text, + url_cache text, + last_access_ts bigint +); + + + +CREATE TABLE local_media_repository_thumbnails ( + media_id text, + thumbnail_width integer, + thumbnail_height integer, + thumbnail_type text, + thumbnail_method text, + thumbnail_length integer +); + + + +CREATE TABLE local_media_repository_url_cache ( + url text, + response_code integer, + etag text, + expires_ts bigint, + og text, + media_id text, + download_ts bigint +); + + + +CREATE TABLE monthly_active_users ( + user_id text NOT NULL, + "timestamp" bigint NOT NULL +); + + + +CREATE TABLE open_id_tokens ( + token text NOT NULL, + ts_valid_until_ms bigint NOT NULL, + user_id text NOT NULL +); + + + +CREATE TABLE presence ( + user_id text NOT NULL, + state character varying(20), + status_msg text, + mtime bigint +); + + + +CREATE TABLE presence_allow_inbound ( + observed_user_id text NOT NULL, + observer_user_id text NOT NULL +); + + + +CREATE TABLE presence_stream ( + stream_id bigint, + user_id text, + state text, + last_active_ts bigint, + last_federation_update_ts bigint, + last_user_sync_ts bigint, + status_msg text, + currently_active boolean +); + + + +CREATE TABLE profiles ( + user_id text NOT NULL, + displayname text, + avatar_url text +); + + + +CREATE TABLE room_list_stream ( + stream_id bigint NOT NULL, + room_id text NOT NULL, + visibility boolean NOT NULL, + appservice_id text, + network_id text +); + + + +CREATE TABLE push_rules ( + id bigint NOT NULL, + user_name text NOT NULL, + rule_id text NOT NULL, + priority_class smallint NOT NULL, + priority integer DEFAULT 0 NOT NULL, + conditions text NOT NULL, + actions text NOT NULL +); + + + +CREATE TABLE push_rules_enable ( + id bigint NOT NULL, + user_name text NOT NULL, + rule_id text NOT NULL, + enabled smallint +); + + + +CREATE TABLE push_rules_stream ( + stream_id bigint NOT NULL, + event_stream_ordering bigint NOT NULL, + user_id text NOT NULL, + rule_id text NOT NULL, + op text NOT NULL, + priority_class smallint, + priority integer, + conditions text, + actions text +); + + + +CREATE TABLE pusher_throttle ( + pusher bigint NOT NULL, + room_id text NOT NULL, + last_sent_ts bigint, + throttle_ms bigint +); + + + +CREATE TABLE pushers ( + id bigint NOT NULL, + user_name text NOT NULL, + access_token bigint, + profile_tag text NOT NULL, + kind text NOT NULL, + app_id text NOT NULL, + app_display_name text NOT NULL, + device_display_name text NOT NULL, + pushkey text NOT NULL, + ts bigint NOT NULL, + lang text, + data text, + last_stream_ordering integer, + last_success bigint, + failing_since bigint +); + + + +CREATE TABLE ratelimit_override ( + user_id text NOT NULL, + messages_per_second bigint, + burst_count bigint +); + + + +CREATE TABLE receipts_graph ( + room_id text NOT NULL, + receipt_type text NOT NULL, + user_id text NOT NULL, + event_ids text NOT NULL, + data text NOT NULL +); + + + +CREATE TABLE receipts_linearized ( + stream_id bigint NOT NULL, + room_id text NOT NULL, + receipt_type text NOT NULL, + user_id text NOT NULL, + event_id text NOT NULL, + data text NOT NULL +); + + + +CREATE TABLE received_transactions ( + transaction_id text, + origin text, + ts bigint, + response_code integer, + response_json bytea, + has_been_referenced smallint DEFAULT 0 +); + + + +CREATE TABLE redactions ( + event_id text NOT NULL, + redacts text NOT NULL +); + + + +CREATE TABLE rejections ( + event_id text NOT NULL, + reason text NOT NULL, + last_check text NOT NULL +); + + + +CREATE TABLE remote_media_cache ( + media_origin text, + media_id text, + media_type text, + created_ts bigint, + upload_name text, + media_length integer, + filesystem_id text, + last_access_ts bigint, + quarantined_by text +); + + + +CREATE TABLE remote_media_cache_thumbnails ( + media_origin text, + media_id text, + thumbnail_width integer, + thumbnail_height integer, + thumbnail_method text, + thumbnail_type text, + thumbnail_length integer, + filesystem_id text +); + + + +CREATE TABLE remote_profile_cache ( + user_id text NOT NULL, + displayname text, + avatar_url text, + last_check bigint NOT NULL +); + + + +CREATE TABLE room_account_data ( + user_id text NOT NULL, + room_id text NOT NULL, + account_data_type text NOT NULL, + stream_id bigint NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE room_alias_servers ( + room_alias text NOT NULL, + server text NOT NULL +); + + + +CREATE TABLE room_aliases ( + room_alias text NOT NULL, + room_id text NOT NULL, + creator text +); + + + +CREATE TABLE room_depth ( + room_id text NOT NULL, + min_depth integer NOT NULL +); + + + +CREATE TABLE room_memberships ( + event_id text NOT NULL, + user_id text NOT NULL, + sender text NOT NULL, + room_id text NOT NULL, + membership text NOT NULL, + forgotten integer DEFAULT 0, + display_name text, + avatar_url text +); + + + +CREATE TABLE room_names ( + event_id text NOT NULL, + room_id text NOT NULL, + name text NOT NULL +); + + + +CREATE TABLE room_state ( + room_id text NOT NULL, + join_rules text, + history_visibility text, + encryption text, + name text, + topic text, + avatar text, + canonical_alias text +); + + + +CREATE TABLE room_stats ( + room_id text NOT NULL, + ts bigint NOT NULL, + bucket_size integer NOT NULL, + current_state_events integer NOT NULL, + joined_members integer NOT NULL, + invited_members integer NOT NULL, + left_members integer NOT NULL, + banned_members integer NOT NULL, + state_events integer NOT NULL +); + + + +CREATE TABLE room_stats_earliest_token ( + room_id text NOT NULL, + token bigint NOT NULL +); + + + +CREATE TABLE room_tags ( + user_id text NOT NULL, + room_id text NOT NULL, + tag text NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE room_tags_revisions ( + user_id text NOT NULL, + room_id text NOT NULL, + stream_id bigint NOT NULL +); + + + +CREATE TABLE rooms ( + room_id text NOT NULL, + is_boolean, + creator text +); + + + +CREATE TABLE schema_version ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + version integer NOT NULL, + upgraded boolean NOT NULL, + CONSTRAINT schema_version_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE server_keys_json ( + server_name text NOT NULL, + key_id text NOT NULL, + from_server text NOT NULL, + ts_added_ms bigint NOT NULL, + ts_valid_until_ms bigint NOT NULL, + key_json bytea NOT NULL +); + + + +CREATE TABLE server_signature_keys ( + server_name text, + key_id text, + from_server text, + ts_added_ms bigint, + verify_key bytea, + ts_valid_until_ms bigint +); + + + +CREATE TABLE state_events ( + event_id text NOT NULL, + room_id text NOT NULL, + type text NOT NULL, + state_key text NOT NULL, + prev_state text +); + + + +CREATE TABLE state_group_edges ( + state_group bigint NOT NULL, + prev_state_group bigint NOT NULL +); + + + +CREATE SEQUENCE state_group_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + +CREATE TABLE state_groups ( + id bigint NOT NULL, + room_id text NOT NULL, + event_id text NOT NULL +); + + + +CREATE TABLE state_groups_state ( + state_group bigint NOT NULL, + room_id text NOT NULL, + type text NOT NULL, + state_key text NOT NULL, + event_id text NOT NULL +); + + + +CREATE TABLE stats_stream_pos ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_id bigint, + CONSTRAINT stats_stream_pos_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE stream_ordering_to_exterm ( + stream_ordering bigint NOT NULL, + room_id text NOT NULL, + event_id text NOT NULL +); + + + +CREATE TABLE threepid_guest_access_tokens ( + medium text, + address text, + guest_access_token text, + first_inviter text +); + + + +CREATE TABLE topics ( + event_id text NOT NULL, + room_id text NOT NULL, + topic text NOT NULL +); + + + +CREATE TABLE user_daily_visits ( + user_id text NOT NULL, + device_id text, + "timestamp" bigint NOT NULL +); + + + +CREATE TABLE user_directory ( + user_id text NOT NULL, + room_id text, + display_name text, + avatar_url text +); + + + +CREATE TABLE user_directory_search ( + user_id text NOT NULL, + vector tsvector +); + + + +CREATE TABLE user_directory_stream_pos ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_id bigint, + CONSTRAINT user_directory_stream_pos_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE user_filters ( + user_id text, + filter_id bigint, + filter_json bytea +); + + + +CREATE TABLE user_ips ( + user_id text NOT NULL, + access_token text NOT NULL, + device_id text, + ip text NOT NULL, + user_agent text NOT NULL, + last_seen bigint NOT NULL +); + + + +CREATE TABLE user_stats ( + user_id text NOT NULL, + ts bigint NOT NULL, + bucket_size integer NOT NULL, + rooms integer NOT NULL, + private_rooms integer NOT NULL +); + + + +CREATE TABLE user_threepid_id_server ( + user_id text NOT NULL, + medium text NOT NULL, + address text NOT NULL, + id_server text NOT NULL +); + + + +CREATE TABLE user_threepids ( + user_id text NOT NULL, + medium text NOT NULL, + address text NOT NULL, + validated_at bigint NOT NULL, + added_at bigint NOT NULL +); + + + +CREATE TABLE users ( + name text, + password_hash text, + creation_ts bigint, + admin smallint DEFAULT 0 NOT NULL, + upgrade_ts bigint, + is_guest smallint DEFAULT 0 NOT NULL, + appservice_id text, + consent_version text, + consent_server_notice_sent text, + user_type text +); + + + +CREATE TABLE users_in_rooms ( + user_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE users_pending_deactivation ( + user_id text NOT NULL +); + + + +CREATE TABLE users_who_share_private_rooms ( + user_id text NOT NULL, + other_user_id text NOT NULL, + room_id text NOT NULL +); + + + +ALTER TABLE ONLY access_tokens + ADD CONSTRAINT access_tokens_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY access_tokens + ADD CONSTRAINT access_tokens_token_key UNIQUE (token); + + + +ALTER TABLE ONLY account_data + ADD CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type); + + + +ALTER TABLE ONLY account_validity + ADD CONSTRAINT account_validity_pkey PRIMARY KEY (user_id); + + + +ALTER TABLE ONLY application_services_state + ADD CONSTRAINT application_services_state_pkey PRIMARY KEY (as_id); + + + +ALTER TABLE ONLY application_services_txns + ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id); + + + +ALTER TABLE ONLY applied_module_schemas + ADD CONSTRAINT applied_module_schemas_module_name_file_key UNIQUE (module_name, file); + + + +ALTER TABLE ONLY applied_schema_deltas + ADD CONSTRAINT applied_schema_deltas_version_file_key UNIQUE (version, file); + + + +ALTER TABLE ONLY appservice_stream_position + ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY background_updates + ADD CONSTRAINT background_updates_uniqueness UNIQUE (update_name); + + + +ALTER TABLE ONLY current_state_events + ADD CONSTRAINT current_state_events_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY current_state_events + ADD CONSTRAINT current_state_events_room_id_type_state_key_key UNIQUE (room_id, type, state_key); + + + +ALTER TABLE ONLY destinations + ADD CONSTRAINT destinations_pkey PRIMARY KEY (destination); + + + +ALTER TABLE ONLY devices + ADD CONSTRAINT device_uniqueness UNIQUE (user_id, device_id); + + + +ALTER TABLE ONLY e2e_device_keys_json + ADD CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id); + + + +ALTER TABLE ONLY e2e_one_time_keys_json + ADD CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id); + + + +ALTER TABLE ONLY event_backward_extremities + ADD CONSTRAINT event_backward_extremities_event_id_room_id_key UNIQUE (event_id, room_id); + + + +ALTER TABLE ONLY event_edges + ADD CONSTRAINT event_edges_event_id_prev_event_id_room_id_is_state_key UNIQUE (event_id, prev_event_id, room_id, is_state); + + + +ALTER TABLE ONLY event_forward_extremities + ADD CONSTRAINT event_forward_extremities_event_id_room_id_key UNIQUE (event_id, room_id); + + + +ALTER TABLE ONLY event_push_actions + ADD CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag); + + + +ALTER TABLE ONLY event_json + ADD CONSTRAINT event_json_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY event_push_summary_stream_ordering + ADD CONSTRAINT event_push_summary_stream_ordering_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY event_reference_hashes + ADD CONSTRAINT event_reference_hashes_event_id_algorithm_key UNIQUE (event_id, algorithm); + + + +ALTER TABLE ONLY event_reports + ADD CONSTRAINT event_reports_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY event_to_state_groups + ADD CONSTRAINT event_to_state_groups_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY events + ADD CONSTRAINT events_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY events + ADD CONSTRAINT events_pkey PRIMARY KEY (stream_ordering); + + + +ALTER TABLE ONLY ex_outlier_stream + ADD CONSTRAINT ex_outlier_stream_pkey PRIMARY KEY (event_stream_ordering); + + + +ALTER TABLE ONLY group_roles + ADD CONSTRAINT group_roles_group_id_role_id_key UNIQUE (group_id, role_id); + + + +ALTER TABLE ONLY group_room_categories + ADD CONSTRAINT group_room_categories_group_id_category_id_key UNIQUE (group_id, category_id); + + + +ALTER TABLE ONLY group_summary_roles + ADD CONSTRAINT group_summary_roles_group_id_role_id_role_order_key UNIQUE (group_id, role_id, role_order); + + + +ALTER TABLE ONLY group_summary_room_categories + ADD CONSTRAINT group_summary_room_categories_group_id_category_id_cat_orde_key UNIQUE (group_id, category_id, cat_order); + + + +ALTER TABLE ONLY group_summary_rooms + ADD CONSTRAINT group_summary_rooms_group_id_category_id_room_id_room_order_key UNIQUE (group_id, category_id, room_id, room_order); + + + +ALTER TABLE ONLY guest_access + ADD CONSTRAINT guest_access_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY history_visibility + ADD CONSTRAINT history_visibility_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY local_media_repository + ADD CONSTRAINT local_media_repository_media_id_key UNIQUE (media_id); + + + +ALTER TABLE ONLY local_media_repository_thumbnails + ADD CONSTRAINT local_media_repository_thumbn_media_id_thumbnail_width_thum_key UNIQUE (media_id, thumbnail_width, thumbnail_height, thumbnail_type); + + + +ALTER TABLE ONLY user_threepids + ADD CONSTRAINT medium_address UNIQUE (medium, address); + + + +ALTER TABLE ONLY open_id_tokens + ADD CONSTRAINT open_id_tokens_pkey PRIMARY KEY (token); + + + +ALTER TABLE ONLY presence_allow_inbound + ADD CONSTRAINT presence_allow_inbound_observed_user_id_observer_user_id_key UNIQUE (observed_user_id, observer_user_id); + + + +ALTER TABLE ONLY presence + ADD CONSTRAINT presence_user_id_key UNIQUE (user_id); + + + +ALTER TABLE ONLY account_data_max_stream_id + ADD CONSTRAINT private_user_data_max_stream_id_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY profiles + ADD CONSTRAINT profiles_user_id_key UNIQUE (user_id); + + + +ALTER TABLE ONLY push_rules_enable + ADD CONSTRAINT push_rules_enable_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY push_rules_enable + ADD CONSTRAINT push_rules_enable_user_name_rule_id_key UNIQUE (user_name, rule_id); + + + +ALTER TABLE ONLY push_rules + ADD CONSTRAINT push_rules_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY push_rules + ADD CONSTRAINT push_rules_user_name_rule_id_key UNIQUE (user_name, rule_id); + + + +ALTER TABLE ONLY pusher_throttle + ADD CONSTRAINT pusher_throttle_pkey PRIMARY KEY (pusher, room_id); + + + +ALTER TABLE ONLY pushers + ADD CONSTRAINT pushers2_app_id_pushkey_user_name_key UNIQUE (app_id, pushkey, user_name); + + + +ALTER TABLE ONLY pushers + ADD CONSTRAINT pushers2_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY receipts_graph + ADD CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id); + + + +ALTER TABLE ONLY receipts_linearized + ADD CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id); + + + +ALTER TABLE ONLY received_transactions + ADD CONSTRAINT received_transactions_transaction_id_origin_key UNIQUE (transaction_id, origin); + + + +ALTER TABLE ONLY redactions + ADD CONSTRAINT redactions_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY rejections + ADD CONSTRAINT rejections_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY remote_media_cache + ADD CONSTRAINT remote_media_cache_media_origin_media_id_key UNIQUE (media_origin, media_id); + + + +ALTER TABLE ONLY remote_media_cache_thumbnails + ADD CONSTRAINT remote_media_cache_thumbnails_media_origin_media_id_thumbna_key UNIQUE (media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type); + + + +ALTER TABLE ONLY room_account_data + ADD CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type); + + + +ALTER TABLE ONLY room_aliases + ADD CONSTRAINT room_aliases_room_alias_key UNIQUE (room_alias); + + + +ALTER TABLE ONLY room_depth + ADD CONSTRAINT room_depth_room_id_key UNIQUE (room_id); + + + +ALTER TABLE ONLY room_memberships + ADD CONSTRAINT room_memberships_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY room_names + ADD CONSTRAINT room_names_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY room_tags_revisions + ADD CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id); + + + +ALTER TABLE ONLY room_tags + ADD CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag); + + + +ALTER TABLE ONLY rooms + ADD CONSTRAINT rooms_pkey PRIMARY KEY (room_id); + + + +ALTER TABLE ONLY schema_version + ADD CONSTRAINT schema_version_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY server_keys_json + ADD CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server); + + + +ALTER TABLE ONLY server_signature_keys + ADD CONSTRAINT server_signature_keys_server_name_key_id_key UNIQUE (server_name, key_id); + + + +ALTER TABLE ONLY state_events + ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY state_groups + ADD CONSTRAINT state_groups_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY stats_stream_pos + ADD CONSTRAINT stats_stream_pos_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY topics + ADD CONSTRAINT topics_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY user_directory_stream_pos + ADD CONSTRAINT user_directory_stream_pos_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY users + ADD CONSTRAINT users_name_key UNIQUE (name); + + + +CREATE INDEX _extremities_to_check_id ON _extremities_to_check USING btree (event_id); + + + +CREATE INDEX account_data_stream_id ON account_data USING btree (user_id, stream_id); + + + +CREATE INDEX application_services_txns_id ON application_services_txns USING btree (as_id); + + + +CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list USING btree (appservice_id, network_id, room_id); + + + +CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms USING btree (room_id); + + + +CREATE INDEX cache_invalidation_stream_id ON cache_invalidation_stream USING btree (stream_id); + + + +CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream USING btree (stream_id); + + + +CREATE INDEX deleted_pushers_stream_id ON deleted_pushers USING btree (stream_id); + + + +CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox USING btree (origin, message_id); + + + +CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox USING btree (destination, stream_id); + + + +CREATE INDEX device_federation_outbox_id ON device_federation_outbox USING btree (stream_id); + + + +CREATE INDEX device_inbox_stream_id ON device_inbox USING btree (stream_id); + + + +CREATE INDEX device_inbox_user_stream_id ON device_inbox USING btree (user_id, device_id, stream_id); + + + +CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success USING btree (destination, user_id, stream_id); + + + +CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes USING btree (destination, stream_id); + + + +CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes USING btree (stream_id); + + + +CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes USING btree (destination, user_id); + + + +CREATE INDEX device_lists_stream_id ON device_lists_stream USING btree (stream_id, user_id); + + + +CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys USING btree (user_id, room_id, session_id); + + + +CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions USING btree (user_id, version); + + + +CREATE UNIQUE INDEX erased_users_user ON erased_users USING btree (user_id); + + + +CREATE INDEX ev_b_extrem_id ON event_backward_extremities USING btree (event_id); + + + +CREATE INDEX ev_b_extrem_room ON event_backward_extremities USING btree (room_id); + + + +CREATE INDEX ev_edges_id ON event_edges USING btree (event_id); + + + +CREATE INDEX ev_edges_prev_id ON event_edges USING btree (prev_event_id); + + + +CREATE INDEX ev_extrem_id ON event_forward_extremities USING btree (event_id); + + + +CREATE INDEX ev_extrem_room ON event_forward_extremities USING btree (room_id); + + + +CREATE INDEX evauth_edges_id ON event_auth USING btree (event_id); + + + +CREATE INDEX event_json_room_id ON event_json USING btree (room_id); + + + +CREATE INDEX event_push_actions_rm_tokens ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering); + + + +CREATE INDEX event_push_actions_room_id_user_id ON event_push_actions USING btree (room_id, user_id); + + + +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging USING btree (event_id); + + + +CREATE INDEX event_push_actions_stream_ordering ON event_push_actions USING btree (stream_ordering, user_id); + + + +CREATE INDEX event_push_summary_user_rm ON event_push_summary USING btree (user_id, room_id); + + + +CREATE INDEX event_reference_hashes_id ON event_reference_hashes USING btree (event_id); + + + +CREATE UNIQUE INDEX event_relations_id ON event_relations USING btree (event_id); + + + +CREATE INDEX event_relations_relates ON event_relations USING btree (relates_to_id, relation_type, aggregation_key); + + + +CREATE INDEX event_search_ev_ridx ON event_search USING btree (room_id); + + + +CREATE INDEX event_search_fts_idx ON event_search USING gin (vector); + + + +CREATE INDEX events_order_room ON events USING btree (room_id, topological_ordering, stream_ordering); + + + +CREATE INDEX events_room_stream ON events USING btree (room_id, stream_ordering); + + + +CREATE INDEX events_ts ON events USING btree (origin_server_ts, stream_ordering); + + + +CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote USING btree (group_id, user_id); + + + +CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote USING btree (user_id); + + + +CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote USING btree (valid_until_ms); + + + +CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals USING btree (group_id, user_id); + + + +CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals USING btree (user_id); + + + +CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals USING btree (valid_until_ms); + + + +CREATE UNIQUE INDEX group_invites_g_idx ON group_invites USING btree (group_id, user_id); + + + +CREATE INDEX group_invites_u_idx ON group_invites USING btree (user_id); + + + +CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms USING btree (group_id, room_id); + + + +CREATE INDEX group_rooms_r_idx ON group_rooms USING btree (room_id); + + + +CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms USING btree (group_id, room_id, category_id); + + + +CREATE INDEX group_summary_users_g_idx ON group_summary_users USING btree (group_id); + + + +CREATE UNIQUE INDEX group_users_g_idx ON group_users USING btree (group_id, user_id); + + + +CREATE INDEX group_users_u_idx ON group_users USING btree (user_id); + + + +CREATE UNIQUE INDEX groups_idx ON groups USING btree (group_id); + + + +CREATE INDEX local_group_membership_g_idx ON local_group_membership USING btree (group_id); + + + +CREATE INDEX local_group_membership_u_idx ON local_group_membership USING btree (user_id, group_id); + + + +CREATE INDEX local_invites_for_user_idx ON local_invites USING btree (invitee, locally_rejected, replaced_by, room_id); + + + +CREATE INDEX local_invites_id ON local_invites USING btree (stream_id); + + + +CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails USING btree (media_id); + + + +CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache USING btree (url, download_ts); + + + +CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache USING btree (expires_ts); + + + +CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache USING btree (media_id); + + + +CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users USING btree ("timestamp"); + + + +CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users USING btree (user_id); + + + +CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens USING btree (ts_valid_until_ms); + + + +CREATE INDEX presence_stream_id ON presence_stream USING btree (stream_id, user_id); + + + +CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id); + + + +CREATE INDEX room_index ON rooms USING btree (is_; + + + +CREATE INDEX room_list_stream_idx ON room_list_stream USING btree (stream_id); + + + +CREATE INDEX room_list_stream_rm_idx ON room_list_stream USING btree (room_id, stream_id); + + + +CREATE INDEX push_rules_enable_user_name ON push_rules_enable USING btree (user_name); + + + +CREATE INDEX push_rules_stream_id ON push_rules_stream USING btree (stream_id); + + + +CREATE INDEX push_rules_stream_user_stream_id ON push_rules_stream USING btree (user_id, stream_id); + + + +CREATE INDEX push_rules_user_name ON push_rules USING btree (user_name); + + + +CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override USING btree (user_id); + + + +CREATE INDEX receipts_linearized_id ON receipts_linearized USING btree (stream_id); + + + +CREATE INDEX receipts_linearized_room_stream ON receipts_linearized USING btree (room_id, stream_id); + + + +CREATE INDEX receipts_linearized_user ON receipts_linearized USING btree (user_id); + + + +CREATE INDEX received_transactions_ts ON received_transactions USING btree (ts); + + + +CREATE INDEX redactions_redacts ON redactions USING btree (redacts); + + + +CREATE INDEX remote_profile_cache_time ON remote_profile_cache USING btree (last_check); + + + +CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache USING btree (user_id); + + + +CREATE INDEX room_account_data_stream_id ON room_account_data USING btree (user_id, stream_id); + + + +CREATE INDEX room_alias_servers_alias ON room_alias_servers USING btree (room_alias); + + + +CREATE INDEX room_aliases_id ON room_aliases USING btree (room_id); + + + +CREATE INDEX room_depth_room ON room_depth USING btree (room_id); + + + +CREATE INDEX room_memberships_room_id ON room_memberships USING btree (room_id); + + + +CREATE INDEX room_memberships_user_id ON room_memberships USING btree (user_id); + + + +CREATE INDEX room_names_room_id ON room_names USING btree (room_id); + + + +CREATE UNIQUE INDEX room_state_room ON room_state USING btree (room_id); + + + +CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token USING btree (room_id); + + + +CREATE UNIQUE INDEX room_stats_room_ts ON room_stats USING btree (room_id, ts); + + + +CREATE INDEX state_group_edges_idx ON state_group_edges USING btree (state_group); + + + +CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_state_group); + + + +CREATE INDEX state_groups_state_id ON state_groups_state USING btree (state_group); + + + +CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering); + + + +CREATE INDEX stream_ordering_to_exterm_rm_idx ON stream_ordering_to_exterm USING btree (room_id, stream_ordering); + + + +CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens USING btree (medium, address); + + + +CREATE INDEX topics_room_id ON topics USING btree (room_id); + + + +CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits USING btree ("timestamp"); + + + +CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits USING btree (user_id, "timestamp"); + + + +CREATE INDEX user_directory_room_idx ON user_directory USING btree (room_id); + + + +CREATE INDEX user_directory_search_fts_idx ON user_directory_search USING gin (vector); + + + +CREATE UNIQUE INDEX user_directory_search_user_idx ON user_directory_search USING btree (user_id); + + + +CREATE UNIQUE INDEX user_directory_user_idx ON user_directory USING btree (user_id); + + + +CREATE INDEX user_filters_by_user_id_filter_id ON user_filters USING btree (user_id, filter_id); + + + +CREATE INDEX user_ips_user_ip ON user_ips USING btree (user_id, access_token, ip); + + + +CREATE UNIQUE INDEX user_stats_user_ts ON user_stats USING btree (user_id, ts); + + + +CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server USING btree (user_id, medium, address, id_server); + + + +CREATE INDEX user_threepids_medium_address ON user_threepids USING btree (medium, address); + + + +CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id); + + + +CREATE UNIQUE INDEX users_in_rooms_u_idx ON users_in_rooms USING btree (user_id, room_id); + + + +CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms USING btree (other_user_id); + + + +CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms USING btree (room_id); + + + +CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms USING btree (user_id, other_user_id, room_id); + + + diff --git a/synapse/storage/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/full_schemas/54/full.sql.sqlite new file mode 100644 index 000000000000..0b60a6c78935 --- /dev/null +++ b/synapse/storage/schema/full_schemas/54/full.sql.sqlite @@ -0,0 +1,261 @@ +CREATE TABLE application_services_state( as_id TEXT PRIMARY KEY, state VARCHAR(5), last_txn INTEGER ); +CREATE TABLE application_services_txns( as_id TEXT NOT NULL, txn_id INTEGER NOT NULL, event_ids TEXT NOT NULL, UNIQUE(as_id, txn_id) ); +CREATE INDEX application_services_txns_id ON application_services_txns ( as_id ); +CREATE TABLE presence( user_id TEXT NOT NULL, state VARCHAR(20), status_msg TEXT, mtime BIGINT, UNIQUE (user_id) ); +CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_user_id TEXT NOT NULL, UNIQUE (observed_user_id, observer_user_id) ); +CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) ); +CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) ); +CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL ); +CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) ); +CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) ); +CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER ); +CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) ); +CREATE INDEX events_order_room ON events ( room_id, topological_ordering, stream_ordering ); +CREATE TABLE event_json( event_id TEXT NOT NULL, room_id TEXT NOT NULL, internal_metadata TEXT NOT NULL, json TEXT NOT NULL, format_version INTEGER, UNIQUE (event_id) ); +CREATE INDEX event_json_room_id ON event_json(room_id); +CREATE TABLE state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, prev_state TEXT, UNIQUE (event_id) ); +CREATE TABLE current_state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, UNIQUE (event_id), UNIQUE (room_id, type, state_key) ); +CREATE TABLE room_memberships( event_id TEXT NOT NULL, user_id TEXT NOT NULL, sender TEXT NOT NULL, room_id TEXT NOT NULL, membership TEXT NOT NULL, forgotten INTEGER DEFAULT 0, display_name TEXT, avatar_url TEXT, UNIQUE (event_id) ); +CREATE INDEX room_memberships_room_id ON room_memberships (room_id); +CREATE INDEX room_memberships_user_id ON room_memberships (user_id); +CREATE TABLE topics( event_id TEXT NOT NULL, room_id TEXT NOT NULL, topic TEXT NOT NULL, UNIQUE (event_id) ); +CREATE INDEX topics_room_id ON topics(room_id); +CREATE TABLE room_names( event_id TEXT NOT NULL, room_id TEXT NOT NULL, name TEXT NOT NULL, UNIQUE (event_id) ); +CREATE INDEX room_names_room_id ON room_names(room_id); +CREATE TABLE rooms( room_id TEXT PRIMARY KEY NOT NULL, is_public BOOL, creator TEXT ); +CREATE TABLE server_signature_keys( server_name TEXT, key_id TEXT, from_server TEXT, ts_added_ms BIGINT, verify_key bytea, ts_valid_until_ms BIGINT, UNIQUE (server_name, key_id) ); +CREATE TABLE rejections( event_id TEXT NOT NULL, reason TEXT NOT NULL, last_check TEXT NOT NULL, UNIQUE (event_id) ); +CREATE TABLE push_rules ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, priority_class SMALLINT NOT NULL, priority INTEGER NOT NULL DEFAULT 0, conditions TEXT NOT NULL, actions TEXT NOT NULL, UNIQUE(user_name, rule_id) ); +CREATE INDEX push_rules_user_name on push_rules (user_name); +CREATE TABLE user_filters( user_id TEXT, filter_id BIGINT, filter_json bytea ); +CREATE INDEX user_filters_by_user_id_filter_id ON user_filters( user_id, filter_id ); +CREATE TABLE push_rules_enable ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, enabled SMALLINT, UNIQUE(user_name, rule_id) ); +CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name); +CREATE TABLE event_forward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) ); +CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id); +CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id); +CREATE TABLE event_backward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) ); +CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id); +CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id); +CREATE TABLE event_edges( event_id TEXT NOT NULL, prev_event_id TEXT NOT NULL, room_id TEXT NOT NULL, is_state BOOL NOT NULL, UNIQUE (event_id, prev_event_id, room_id, is_state) ); +CREATE INDEX ev_edges_id ON event_edges(event_id); +CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id); +CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) ); +CREATE INDEX room_depth_room ON room_depth(room_id); +CREATE TABLE state_groups( id BIGINT PRIMARY KEY, room_id TEXT NOT NULL, event_id TEXT NOT NULL ); +CREATE TABLE state_groups_state( state_group BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT NOT NULL ); +CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) ); +CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, UNIQUE (media_id) ); +CREATE TABLE local_media_repository_thumbnails ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type ) ); +CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id); +CREATE TABLE remote_media_cache ( media_origin TEXT, media_id TEXT, media_type TEXT, created_ts BIGINT, upload_name TEXT, media_length INTEGER, filesystem_id TEXT, last_access_ts BIGINT, quarantined_by TEXT, UNIQUE (media_origin, media_id) ); +CREATE TABLE remote_media_cache_thumbnails ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type ) ); +CREATE TABLE redactions ( event_id TEXT NOT NULL, redacts TEXT NOT NULL, UNIQUE (event_id) ); +CREATE INDEX redactions_redacts ON redactions (redacts); +CREATE TABLE room_aliases( room_alias TEXT NOT NULL, room_id TEXT NOT NULL, creator TEXT, UNIQUE (room_alias) ); +CREATE INDEX room_aliases_id ON room_aliases(room_id); +CREATE TABLE room_alias_servers( room_alias TEXT NOT NULL, server TEXT NOT NULL ); +CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias); +CREATE TABLE event_reference_hashes ( event_id TEXT, algorithm TEXT, hash bytea, UNIQUE (event_id, algorithm) ); +CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id); +CREATE TABLE IF NOT EXISTS "server_keys_json" ( server_name TEXT NOT NULL, key_id TEXT NOT NULL, from_server TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, ts_valid_until_ms BIGINT NOT NULL, key_json bytea NOT NULL, CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server) ); +CREATE TABLE e2e_device_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id) ); +CREATE TABLE e2e_one_time_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, algorithm TEXT NOT NULL, key_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id) ); +CREATE TABLE receipts_graph( room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_ids TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id) ); +CREATE TABLE receipts_linearized ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_id TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id) ); +CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id ); +CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id ); +CREATE TABLE IF NOT EXISTS "user_threepids" ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, validated_at BIGINT NOT NULL, added_at BIGINT NOT NULL, CONSTRAINT medium_address UNIQUE (medium, address) ); +CREATE INDEX user_threepids_user_id ON user_threepids(user_id); +CREATE TABLE background_updates( update_name TEXT NOT NULL, progress_json TEXT NOT NULL, depends_on TEXT, CONSTRAINT background_updates_uniqueness UNIQUE (update_name) ); +CREATE VIRTUAL TABLE event_search USING fts4 ( event_id, room_id, sender, key, value ) +/* event_search(event_id,room_id,sender,"key",value) */; +CREATE TABLE IF NOT EXISTS 'event_search_content'(docid INTEGER PRIMARY KEY, 'c0event_id', 'c1room_id', 'c2sender', 'c3key', 'c4value'); +CREATE TABLE IF NOT EXISTS 'event_search_segments'(blockid INTEGER PRIMARY KEY, block BLOB); +CREATE TABLE IF NOT EXISTS 'event_search_segdir'(level INTEGER,idx INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block INTEGER,root BLOB,PRIMARY KEY(level, idx)); +CREATE TABLE IF NOT EXISTS 'event_search_docsize'(docid INTEGER PRIMARY KEY, size BLOB); +CREATE TABLE IF NOT EXISTS 'event_search_stat'(id INTEGER PRIMARY KEY, value BLOB); +CREATE TABLE guest_access( event_id TEXT NOT NULL, room_id TEXT NOT NULL, guest_access TEXT NOT NULL, UNIQUE (event_id) ); +CREATE TABLE history_visibility( event_id TEXT NOT NULL, room_id TEXT NOT NULL, history_visibility TEXT NOT NULL, UNIQUE (event_id) ); +CREATE TABLE room_tags( user_id TEXT NOT NULL, room_id TEXT NOT NULL, tag TEXT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag) ); +CREATE TABLE room_tags_revisions ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, stream_id BIGINT NOT NULL, CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id) ); +CREATE TABLE IF NOT EXISTS "account_data_max_stream_id"( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT NOT NULL, CHECK (Lock='X') ); +CREATE TABLE account_data( user_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type) ); +CREATE TABLE room_account_data( user_id TEXT NOT NULL, room_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type) ); +CREATE INDEX account_data_stream_id on account_data(user_id, stream_id); +CREATE INDEX room_account_data_stream_id on room_account_data(user_id, stream_id); +CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering); +CREATE TABLE event_push_actions( room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, profile_tag VARCHAR(32), actions TEXT NOT NULL, topological_ordering BIGINT, stream_ordering BIGINT, notif SMALLINT, highlight SMALLINT, CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) ); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); +CREATE INDEX events_room_stream on events(room_id, stream_ordering); +CREATE INDEX public_room_index on rooms(is_public); +CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id ); +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering ); +CREATE TABLE presence_stream( stream_id BIGINT, user_id TEXT, state TEXT, last_active_ts BIGINT, last_federation_update_ts BIGINT, last_user_sync_ts BIGINT, status_msg TEXT, currently_active BOOLEAN ); +CREATE INDEX presence_stream_id ON presence_stream(stream_id, user_id); +CREATE INDEX presence_stream_user_id ON presence_stream(user_id); +CREATE TABLE push_rules_stream( stream_id BIGINT NOT NULL, event_stream_ordering BIGINT NOT NULL, user_id TEXT NOT NULL, rule_id TEXT NOT NULL, op TEXT NOT NULL, priority_class SMALLINT, priority INTEGER, conditions TEXT, actions TEXT ); +CREATE INDEX push_rules_stream_id ON push_rules_stream(stream_id); +CREATE INDEX push_rules_stream_user_stream_id on push_rules_stream(user_id, stream_id); +CREATE TABLE ex_outlier_stream( event_stream_ordering BIGINT PRIMARY KEY NOT NULL, event_id TEXT NOT NULL, state_group BIGINT NOT NULL ); +CREATE TABLE threepid_guest_access_tokens( medium TEXT, address TEXT, guest_access_token TEXT, first_inviter TEXT ); +CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens(medium, address); +CREATE TABLE local_invites( stream_id BIGINT NOT NULL, inviter TEXT NOT NULL, invitee TEXT NOT NULL, event_id TEXT NOT NULL, room_id TEXT NOT NULL, locally_rejected TEXT, replaced_by TEXT ); +CREATE INDEX local_invites_id ON local_invites(stream_id); +CREATE INDEX local_invites_for_user_idx ON local_invites(invitee, locally_rejected, replaced_by, room_id); +CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id ); +CREATE TABLE open_id_tokens ( token TEXT NOT NULL PRIMARY KEY, ts_valid_until_ms bigint NOT NULL, user_id TEXT NOT NULL, UNIQUE (token) ); +CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens(ts_valid_until_ms); +CREATE TABLE pusher_throttle( pusher BIGINT NOT NULL, room_id TEXT NOT NULL, last_sent_ts BIGINT, throttle_ms BIGINT, PRIMARY KEY (pusher, room_id) ); +CREATE TABLE event_reports( id BIGINT NOT NULL PRIMARY KEY, received_ts BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, reason TEXT, content TEXT ); +CREATE TABLE devices ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, display_name TEXT, CONSTRAINT device_uniqueness UNIQUE (user_id, device_id) ); +CREATE TABLE appservice_stream_position( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT, CHECK (Lock='X') ); +CREATE TABLE device_inbox ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, stream_id BIGINT NOT NULL, message_json TEXT NOT NULL ); +CREATE INDEX device_inbox_user_stream_id ON device_inbox(user_id, device_id, stream_id); +CREATE INDEX received_transactions_ts ON received_transactions(ts); +CREATE TABLE device_federation_outbox ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, queued_ts BIGINT NOT NULL, messages_json TEXT NOT NULL ); +CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox(destination, stream_id); +CREATE TABLE device_federation_inbox ( origin TEXT NOT NULL, message_id TEXT NOT NULL, received_ts BIGINT NOT NULL ); +CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox(origin, message_id); +CREATE TABLE device_max_stream_id ( stream_id BIGINT NOT NULL ); +CREATE TABLE public_room_list_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, visibility BOOLEAN NOT NULL , appservice_id TEXT, network_id TEXT); +CREATE INDEX public_room_list_stream_idx on public_room_list_stream( stream_id ); +CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( room_id, stream_id ); +CREATE TABLE state_group_edges( state_group BIGINT NOT NULL, prev_state_group BIGINT NOT NULL ); +CREATE INDEX state_group_edges_idx ON state_group_edges(state_group); +CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group); +CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL ); +CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering ); +CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering ); +CREATE TABLE IF NOT EXISTS "event_auth"( event_id TEXT NOT NULL, auth_id TEXT NOT NULL, room_id TEXT NOT NULL ); +CREATE INDEX evauth_edges_id ON event_auth(event_id); +CREATE INDEX user_threepids_medium_address on user_threepids (medium, address); +CREATE TABLE appservice_room_list( appservice_id TEXT NOT NULL, network_id TEXT NOT NULL, room_id TEXT NOT NULL ); +CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list( appservice_id, network_id, room_id ); +CREATE INDEX device_federation_outbox_id ON device_federation_outbox(stream_id); +CREATE TABLE federation_stream_position( type TEXT NOT NULL, stream_id INTEGER NOT NULL ); +CREATE TABLE device_lists_remote_cache ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, content TEXT NOT NULL ); +CREATE TABLE device_lists_remote_extremeties ( user_id TEXT NOT NULL, stream_id TEXT NOT NULL ); +CREATE TABLE device_lists_stream ( stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL ); +CREATE INDEX device_lists_stream_id ON device_lists_stream(stream_id, user_id); +CREATE TABLE device_lists_outbound_pokes ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL, sent BOOLEAN NOT NULL, ts BIGINT NOT NULL ); +CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes(destination, stream_id); +CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes(destination, user_id); +CREATE TABLE event_push_summary ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, notif_count BIGINT NOT NULL, stream_ordering BIGINT NOT NULL ); +CREATE INDEX event_push_summary_user_rm ON event_push_summary(user_id, room_id); +CREATE TABLE event_push_summary_stream_ordering ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT NOT NULL, CHECK (Lock='X') ); +CREATE TABLE IF NOT EXISTS "pushers" ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, access_token BIGINT DEFAULT NULL, profile_tag TEXT NOT NULL, kind TEXT NOT NULL, app_id TEXT NOT NULL, app_display_name TEXT NOT NULL, device_display_name TEXT NOT NULL, pushkey TEXT NOT NULL, ts BIGINT NOT NULL, lang TEXT, data TEXT, last_stream_ordering INTEGER, last_success BIGINT, failing_since BIGINT, UNIQUE (app_id, pushkey, user_name) ); +CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes(stream_id); +CREATE TABLE ratelimit_override ( user_id TEXT NOT NULL, messages_per_second BIGINT, burst_count BIGINT ); +CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override(user_id); +CREATE TABLE current_state_delta_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT, prev_event_id TEXT ); +CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream(stream_id); +CREATE TABLE device_lists_outbound_last_success ( destination TEXT NOT NULL, user_id TEXT NOT NULL, stream_id BIGINT NOT NULL ); +CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success( destination, user_id, stream_id ); +CREATE TABLE user_directory_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') ); +CREATE VIRTUAL TABLE user_directory_search USING fts4 ( user_id, value ) +/* user_directory_search(user_id,value) */; +CREATE TABLE IF NOT EXISTS 'user_directory_search_content'(docid INTEGER PRIMARY KEY, 'c0user_id', 'c1value'); +CREATE TABLE IF NOT EXISTS 'user_directory_search_segments'(blockid INTEGER PRIMARY KEY, block BLOB); +CREATE TABLE IF NOT EXISTS 'user_directory_search_segdir'(level INTEGER,idx INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block INTEGER,root BLOB,PRIMARY KEY(level, idx)); +CREATE TABLE IF NOT EXISTS 'user_directory_search_docsize'(docid INTEGER PRIMARY KEY, size BLOB); +CREATE TABLE IF NOT EXISTS 'user_directory_search_stat'(id INTEGER PRIMARY KEY, value BLOB); +CREATE TABLE blocked_rooms ( room_id TEXT NOT NULL, user_id TEXT NOT NULL ); +CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms(room_id); +CREATE TABLE IF NOT EXISTS "local_media_repository_url_cache"( url TEXT, response_code INTEGER, etag TEXT, expires_ts BIGINT, og TEXT, media_id TEXT, download_ts BIGINT ); +CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts); +CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache(url, download_ts); +CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache(media_id); +CREATE TABLE group_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, is_public BOOLEAN NOT NULL ); +CREATE TABLE group_invites ( group_id TEXT NOT NULL, user_id TEXT NOT NULL ); +CREATE TABLE group_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, is_public BOOLEAN NOT NULL ); +CREATE TABLE group_summary_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, category_id TEXT NOT NULL, room_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id, room_id, room_order), CHECK (room_order > 0) ); +CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms(group_id, room_id, category_id); +CREATE TABLE group_summary_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, cat_order BIGINT NOT NULL, UNIQUE (group_id, category_id, cat_order), CHECK (cat_order > 0) ); +CREATE TABLE group_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id) ); +CREATE TABLE group_summary_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, role_id TEXT NOT NULL, user_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL ); +CREATE INDEX group_summary_users_g_idx ON group_summary_users(group_id); +CREATE TABLE group_summary_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, role_order BIGINT NOT NULL, UNIQUE (group_id, role_id, role_order), CHECK (role_order > 0) ); +CREATE TABLE group_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, role_id) ); +CREATE TABLE group_attestations_renewals ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL ); +CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals(group_id, user_id); +CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals(user_id); +CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals(valid_until_ms); +CREATE TABLE group_attestations_remote ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL, attestation_json TEXT NOT NULL ); +CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote(group_id, user_id); +CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote(user_id); +CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote(valid_until_ms); +CREATE TABLE local_group_membership ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, membership TEXT NOT NULL, is_publicised BOOLEAN NOT NULL, content TEXT NOT NULL ); +CREATE INDEX local_group_membership_u_idx ON local_group_membership(user_id, group_id); +CREATE INDEX local_group_membership_g_idx ON local_group_membership(group_id); +CREATE TABLE local_group_updates ( stream_id BIGINT NOT NULL, group_id TEXT NOT NULL, user_id TEXT NOT NULL, type TEXT NOT NULL, content TEXT NOT NULL ); +CREATE TABLE remote_profile_cache ( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, last_check BIGINT NOT NULL ); +CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache(user_id); +CREATE INDEX remote_profile_cache_time ON remote_profile_cache(last_check); +CREATE TABLE IF NOT EXISTS "deleted_pushers" ( stream_id BIGINT NOT NULL, app_id TEXT NOT NULL, pushkey TEXT NOT NULL, user_id TEXT NOT NULL ); +CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id); +CREATE TABLE IF NOT EXISTS "groups" ( group_id TEXT NOT NULL, name TEXT, avatar_url TEXT, short_description TEXT, long_description TEXT, is_public BOOL NOT NULL , join_policy TEXT NOT NULL DEFAULT 'invite'); +CREATE UNIQUE INDEX groups_idx ON groups(group_id); +CREATE TABLE IF NOT EXISTS "user_directory" ( user_id TEXT NOT NULL, room_id TEXT, display_name TEXT, avatar_url TEXT ); +CREATE INDEX user_directory_room_idx ON user_directory(room_id); +CREATE UNIQUE INDEX user_directory_user_idx ON user_directory(user_id); +CREATE TABLE event_push_actions_staging ( event_id TEXT NOT NULL, user_id TEXT NOT NULL, actions TEXT NOT NULL, notif SMALLINT NOT NULL, highlight SMALLINT NOT NULL ); +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id); +CREATE TABLE users_pending_deactivation ( user_id TEXT NOT NULL ); +CREATE UNIQUE INDEX group_invites_g_idx ON group_invites(group_id, user_id); +CREATE UNIQUE INDEX group_users_g_idx ON group_users(group_id, user_id); +CREATE INDEX group_users_u_idx ON group_users(user_id); +CREATE INDEX group_invites_u_idx ON group_invites(user_id); +CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id); +CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); +CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL ); +CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp); +CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp); +CREATE TABLE erased_users ( user_id TEXT NOT NULL ); +CREATE UNIQUE INDEX erased_users_user ON erased_users(user_id); +CREATE TABLE monthly_active_users ( user_id TEXT NOT NULL, timestamp BIGINT NOT NULL ); +CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users(user_id); +CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users(timestamp); +CREATE TABLE IF NOT EXISTS "e2e_room_keys_versions" ( user_id TEXT NOT NULL, version BIGINT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL, deleted SMALLINT DEFAULT 0 NOT NULL ); +CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); +CREATE TABLE IF NOT EXISTS "e2e_room_keys" ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, version BIGINT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, session_data TEXT NOT NULL ); +CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); +CREATE TABLE users_who_share_private_rooms ( user_id TEXT NOT NULL, other_user_id TEXT NOT NULL, room_id TEXT NOT NULL ); +CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id); +CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id); +CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id); +CREATE TABLE user_threepid_id_server ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, id_server TEXT NOT NULL ); +CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server( user_id, medium, address, id_server ); +CREATE TABLE users_in_public_rooms ( user_id TEXT NOT NULL, room_id TEXT NOT NULL ); +CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id); +CREATE TABLE account_validity ( user_id TEXT PRIMARY KEY, expiration_ts_ms BIGINT NOT NULL, email_sent BOOLEAN NOT NULL, renewal_token TEXT ); +CREATE TABLE event_relations ( event_id TEXT NOT NULL, relates_to_id TEXT NOT NULL, relation_type TEXT NOT NULL, aggregation_key TEXT ); +CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id); +CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key); +CREATE TABLE stats_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') ); +CREATE TABLE user_stats ( user_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, public_rooms INT NOT NULL, private_rooms INT NOT NULL ); +CREATE UNIQUE INDEX user_stats_user_ts ON user_stats(user_id, ts); +CREATE TABLE room_stats ( room_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, current_state_events INT NOT NULL, joined_members INT NOT NULL, invited_members INT NOT NULL, left_members INT NOT NULL, banned_members INT NOT NULL, state_events INT NOT NULL ); +CREATE UNIQUE INDEX room_stats_room_ts ON room_stats(room_id, ts); +CREATE TABLE room_state ( room_id TEXT NOT NULL, join_rules TEXT, history_visibility TEXT, encryption TEXT, name TEXT, topic TEXT, avatar TEXT, canonical_alias TEXT ); +CREATE UNIQUE INDEX room_state_room ON room_state(room_id); +CREATE TABLE room_stats_earliest_token ( room_id TEXT NOT NULL, token BIGINT NOT NULL ); +CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id); +CREATE INDEX access_tokens_device_id ON access_tokens (user_id, device_id); +CREATE INDEX user_ips_device_id ON user_ips (user_id, device_id, last_seen); +CREATE INDEX event_contains_url_index ON events (room_id, topological_ordering, stream_ordering); +CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering); +CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering); +CREATE INDEX current_state_events_member_index ON current_state_events (state_key); +CREATE INDEX device_inbox_stream_id_user_id ON device_inbox (stream_id, user_id); +CREATE INDEX device_lists_stream_user_id ON device_lists_stream (user_id, device_id); +CREATE INDEX local_media_repository_url_idx ON local_media_repository (created_ts); +CREATE INDEX user_ips_last_seen ON user_ips (user_id, last_seen); +CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen); +CREATE INDEX users_creation_ts ON users (creation_ts); +CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group); +CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id); +CREATE TABLE sqlite_stat1(tbl,idx,stat); +CREATE INDEX state_groups_state_type_idx ON state_groups_state(state_group, type, state_key); +CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id); +CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip); diff --git a/synapse/storage/schema/full_schemas/README.txt b/synapse/storage/schema/full_schemas/README.txt new file mode 100644 index 000000000000..12d4eb074640 --- /dev/null +++ b/synapse/storage/schema/full_schemas/README.txt @@ -0,0 +1,14 @@ +Building full schema dumps +========================== + +Postgres +-------- + +$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres + +SQLite +------ + +$ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite + +Delete the CREATE statements for "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file From 7f81b967ca1d4004832e96513ad33e802f9a6f78 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:23:40 +1000 Subject: [PATCH 092/231] fix schemas --- synapse/storage/prepare_database.py | 4 +- .../schema/full_schemas/54/full.sql.postgres | 72 +++++-------------- .../schema/full_schemas/54/full.sql.sqlite | 1 - .../storage/schema/full_schemas/README.txt | 4 +- tests/handlers/test_user_directory.py | 2 + 5 files changed, 22 insertions(+), 61 deletions(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 07478b66728c..b81c05369f5d 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -146,9 +146,9 @@ def _setup_new_database(cur, database_engine): directory_entries = os.listdir(sql_dir) - for filename in fnmatch.filter(directory_entries, "*.sql") + fnmatch.filter( + for filename in sorted(fnmatch.filter(directory_entries, "*.sql") + fnmatch.filter( directory_entries, "*.sql." + specific - ): + )): sql_loc = os.path.join(sql_dir, filename) logger.debug("Applying schema %s", sql_loc) executescript(cur, sql_loc) diff --git a/synapse/storage/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/full_schemas/54/full.sql.postgres index 5fb54cfe7751..ea3859fd24b7 100644 --- a/synapse/storage/schema/full_schemas/54/full.sql.postgres +++ b/synapse/storage/schema/full_schemas/54/full.sql.postgres @@ -60,21 +60,6 @@ CREATE TABLE application_services_txns ( ); - -CREATE TABLE applied_module_schemas ( - module_name text NOT NULL, - file text NOT NULL -); - - - -CREATE TABLE applied_schema_deltas ( - version integer NOT NULL, - file text NOT NULL -); - - - CREATE TABLE appservice_room_list ( appservice_id text NOT NULL, network_id text NOT NULL, @@ -475,7 +460,7 @@ CREATE TABLE group_roles ( group_id text NOT NULL, role_id text NOT NULL, profile text NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -484,7 +469,7 @@ CREATE TABLE group_room_categories ( group_id text NOT NULL, category_id text NOT NULL, profile text NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -492,7 +477,7 @@ CREATE TABLE group_room_categories ( CREATE TABLE group_rooms ( group_id text NOT NULL, room_id text NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -520,7 +505,7 @@ CREATE TABLE group_summary_rooms ( room_id text NOT NULL, category_id text NOT NULL, room_order bigint NOT NULL, - is_boolean NOT NULL, + is_public boolean NOT NULL, CONSTRAINT group_summary_rooms_room_order_check CHECK ((room_order > 0)) ); @@ -531,7 +516,7 @@ CREATE TABLE group_summary_users ( user_id text NOT NULL, role_id text NOT NULL, user_order bigint NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -540,7 +525,7 @@ CREATE TABLE group_users ( group_id text NOT NULL, user_id text NOT NULL, is_admin boolean NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -551,7 +536,7 @@ CREATE TABLE groups ( avatar_url text, short_description text, long_description text, - is_boolean NOT NULL, + is_public boolean NOT NULL, join_policy text DEFAULT 'invite'::text NOT NULL ); @@ -578,7 +563,7 @@ CREATE TABLE local_group_membership ( user_id text NOT NULL, is_admin boolean NOT NULL, membership text NOT NULL, - is_sed boolean NOT NULL, + is_publicised boolean NOT NULL, content text NOT NULL ); @@ -695,7 +680,7 @@ CREATE TABLE profiles ( -CREATE TABLE room_list_stream ( +CREATE TABLE public_room_list_stream ( stream_id bigint NOT NULL, room_id text NOT NULL, visibility boolean NOT NULL, @@ -966,21 +951,12 @@ CREATE TABLE room_tags_revisions ( CREATE TABLE rooms ( room_id text NOT NULL, - is_boolean, + is_public boolean, creator text ); -CREATE TABLE schema_version ( - lock character(1) DEFAULT 'X'::bpchar NOT NULL, - version integer NOT NULL, - upgraded boolean NOT NULL, - CONSTRAINT schema_version_lock_check CHECK ((lock = 'X'::bpchar)) -); - - - CREATE TABLE server_keys_json ( server_name text NOT NULL, key_id text NOT NULL, @@ -1135,7 +1111,7 @@ CREATE TABLE user_stats ( user_id text NOT NULL, ts bigint NOT NULL, bucket_size integer NOT NULL, - rooms integer NOT NULL, + public_rooms integer NOT NULL, private_rooms integer NOT NULL ); @@ -1175,7 +1151,7 @@ CREATE TABLE users ( -CREATE TABLE users_in_rooms ( +CREATE TABLE users_in_public_rooms ( user_id text NOT NULL, room_id text NOT NULL ); @@ -1225,17 +1201,6 @@ ALTER TABLE ONLY application_services_txns ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id); - -ALTER TABLE ONLY applied_module_schemas - ADD CONSTRAINT applied_module_schemas_module_name_file_key UNIQUE (module_name, file); - - - -ALTER TABLE ONLY applied_schema_deltas - ADD CONSTRAINT applied_schema_deltas_version_file_key UNIQUE (version, file); - - - ALTER TABLE ONLY appservice_stream_position ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock); @@ -1521,11 +1486,6 @@ ALTER TABLE ONLY rooms -ALTER TABLE ONLY schema_version - ADD CONSTRAINT schema_version_lock_key UNIQUE (lock); - - - ALTER TABLE ONLY server_keys_json ADD CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server); @@ -1846,15 +1806,15 @@ CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id); -CREATE INDEX room_index ON rooms USING btree (is_; +CREATE INDEX public_room_index ON rooms USING btree (is_public); -CREATE INDEX room_list_stream_idx ON room_list_stream USING btree (stream_id); +CREATE INDEX public_room_list_stream_idx ON public_room_list_stream USING btree (stream_id); -CREATE INDEX room_list_stream_rm_idx ON room_list_stream USING btree (room_id, stream_id); +CREATE INDEX public_room_list_stream_rm_idx ON public_room_list_stream USING btree (room_id, stream_id); @@ -2022,7 +1982,7 @@ CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id); -CREATE UNIQUE INDEX users_in_rooms_u_idx ON users_in_rooms USING btree (user_id, room_id); +CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms USING btree (user_id, room_id); diff --git a/synapse/storage/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/full_schemas/54/full.sql.sqlite index 0b60a6c78935..be9295e4c91f 100644 --- a/synapse/storage/schema/full_schemas/54/full.sql.sqlite +++ b/synapse/storage/schema/full_schemas/54/full.sql.sqlite @@ -255,7 +255,6 @@ CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen); CREATE INDEX users_creation_ts ON users (creation_ts); CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group); CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id); -CREATE TABLE sqlite_stat1(tbl,idx,stat); CREATE INDEX state_groups_state_type_idx ON state_groups_state(state_group, type, state_key); CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id); CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip); diff --git a/synapse/storage/schema/full_schemas/README.txt b/synapse/storage/schema/full_schemas/README.txt index 12d4eb074640..df49f9b39e25 100644 --- a/synapse/storage/schema/full_schemas/README.txt +++ b/synapse/storage/schema/full_schemas/README.txt @@ -4,11 +4,11 @@ Building full schema dumps Postgres -------- -$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres +$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres SQLite ------ $ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite -Delete the CREATE statements for "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file +Delete the CREATE statements for "sqlite_stat1", "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 9021e647feb4..b919694f54ab 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -96,6 +96,7 @@ def test_handle_user_deactivated_regular_user(self): self.get_success(self.handler.handle_user_deactivated(r_user_id)) self.store.remove_from_user_dir.called_once_with(r_user_id) + @unittest.DEBUG def test_private_room(self): """ A user can be searched for only by people that are either in a public @@ -340,6 +341,7 @@ def make_homeserver(self, reactor, clock): return hs + @unittest.DEBUG def test_disabling_room_list(self): self.config.user_directory_search_enabled = True From be452fc9ace4f501398be769766e3f8bbd798571 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:24:23 +1000 Subject: [PATCH 093/231] more fix --- .../full_schemas/54/stream_positions.sql | 38 +++++++++++++++++++ tests/handlers/test_user_directory.py | 2 - 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 synapse/storage/schema/full_schemas/54/stream_positions.sql diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql new file mode 100644 index 000000000000..d6433a5af26e --- /dev/null +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -0,0 +1,38 @@ + +INSERT INTO appservice_stream_position (stream_ordering) SELECT COALESCE(MAX(stream_ordering), 0) FROM events; +INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', -1); +INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; +INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); +INSERT INTO stats_stream_pos (stream_id) VALUES (null); + +--- User dir population + +-- Set up staging tables +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_user_directory_createtables', '{}'); + +-- Run through each room and update the user directory according to who is in it +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'); + +-- Insert all users, if search_all_users is on +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'); + +-- Clean up staging tables +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'); + +--- Stats population + +-- Set up staging tables +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_stats_createtables', '{}'); + +-- Run through each room and update stats +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_stats_process_rooms', '{}', 'populate_stats_createtables'); + +-- Clean up staging tables +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_stats_cleanup', '{}', 'populate_stats_process_rooms'); diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index b919694f54ab..9021e647feb4 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -96,7 +96,6 @@ def test_handle_user_deactivated_regular_user(self): self.get_success(self.handler.handle_user_deactivated(r_user_id)) self.store.remove_from_user_dir.called_once_with(r_user_id) - @unittest.DEBUG def test_private_room(self): """ A user can be searched for only by people that are either in a public @@ -341,7 +340,6 @@ def make_homeserver(self, reactor, clock): return hs - @unittest.DEBUG def test_disabling_room_list(self): self.config.user_directory_search_enabled = True From ed6138461b86ee4d085bf2e41e00e01cb9547f7b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:29:19 +1000 Subject: [PATCH 094/231] more fix --- MANIFEST.in | 3 +++ changelog.d/5320.misc | 1 + 2 files changed, 4 insertions(+) create mode 100644 changelog.d/5320.misc diff --git a/MANIFEST.in b/MANIFEST.in index 0500dd6b876a..ad1523e38706 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -9,7 +9,10 @@ include demo/*.py include demo/*.sh recursive-include synapse/storage/schema *.sql +recursive-include synapse/storage/schema *.sql.postgres +recursive-include synapse/storage/schema *.sql.sqlite recursive-include synapse/storage/schema *.py +recursive-include synapse/storage/schema *.txt recursive-include docs * recursive-include scripts * diff --git a/changelog.d/5320.misc b/changelog.d/5320.misc new file mode 100644 index 000000000000..5b4bf0530361 --- /dev/null +++ b/changelog.d/5320.misc @@ -0,0 +1 @@ +New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. From 4e75c5e02a6dead87e9a24cbdb8fb015221070ce Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:42:12 +1000 Subject: [PATCH 095/231] WHY IS THIS CALLED A SLIGHTLY DIFFERENT THING --- synapse/storage/schema/full_schemas/54/stream_positions.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql index d6433a5af26e..0febedcc5e66 100644 --- a/synapse/storage/schema/full_schemas/54/stream_positions.sql +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -4,6 +4,7 @@ INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', - INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); INSERT INTO stats_stream_pos (stream_id) VALUES (null); +INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); --- User dir population From 2198b7ce2a1316dfaf8dde061c1a6f30a818ed5a Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 4 Jun 2019 01:06:00 +1000 Subject: [PATCH 096/231] add stuff in bg updates --- .../schema/full_schemas/54/full.sql.postgres | 72 ++++++++++++++++--- 1 file changed, 62 insertions(+), 10 deletions(-) diff --git a/synapse/storage/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/full_schemas/54/full.sql.postgres index ea3859fd24b7..098434356f0c 100644 --- a/synapse/storage/schema/full_schemas/54/full.sql.postgres +++ b/synapse/storage/schema/full_schemas/54/full.sql.postgres @@ -3,12 +3,6 @@ -CREATE TABLE _extremities_to_check ( - event_id text -); - - - CREATE TABLE access_tokens ( id bigint NOT NULL, user_id text NOT NULL, @@ -60,6 +54,7 @@ CREATE TABLE application_services_txns ( ); + CREATE TABLE appservice_room_list ( appservice_id text NOT NULL, network_id text NOT NULL, @@ -1201,6 +1196,7 @@ ALTER TABLE ONLY application_services_txns ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id); + ALTER TABLE ONLY appservice_stream_position ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock); @@ -1526,7 +1522,7 @@ ALTER TABLE ONLY users -CREATE INDEX _extremities_to_check_id ON _extremities_to_check USING btree (event_id); +CREATE INDEX access_tokens_device_id ON access_tokens USING btree (user_id, device_id); @@ -1554,6 +1550,10 @@ CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream USING +CREATE INDEX current_state_events_member_index ON current_state_events USING btree (state_key) WHERE (type = 'm.room.member'::text); + + + CREATE INDEX deleted_pushers_stream_id ON deleted_pushers USING btree (stream_id); @@ -1570,7 +1570,7 @@ CREATE INDEX device_federation_outbox_id ON device_federation_outbox USING btree -CREATE INDEX device_inbox_stream_id ON device_inbox USING btree (stream_id); +CREATE INDEX device_inbox_stream_id_user_id ON device_inbox USING btree (stream_id, user_id); @@ -1594,10 +1594,22 @@ CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes USI +CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache USING btree (user_id, device_id); + + + +CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties USING btree (user_id); + + + CREATE INDEX device_lists_stream_id ON device_lists_stream USING btree (stream_id, user_id); +CREATE INDEX device_lists_stream_user_id ON device_lists_stream USING btree (user_id, device_id); + + + CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys USING btree (user_id, room_id, session_id); @@ -1638,10 +1650,18 @@ CREATE INDEX evauth_edges_id ON event_auth USING btree (event_id); +CREATE INDEX event_contains_url_index ON events USING btree (room_id, topological_ordering, stream_ordering) WHERE ((contains_url = true) AND (outlier = false)); + + + CREATE INDEX event_json_room_id ON event_json USING btree (room_id); +CREATE INDEX event_push_actions_highlights_index ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering) WHERE (highlight = 1); + + + CREATE INDEX event_push_actions_rm_tokens ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering); @@ -1658,6 +1678,10 @@ CREATE INDEX event_push_actions_stream_ordering ON event_push_actions USING btre +CREATE INDEX event_push_actions_u_highlight ON event_push_actions USING btree (user_id, stream_ordering); + + + CREATE INDEX event_push_summary_user_rm ON event_push_summary USING btree (user_id, room_id); @@ -1678,10 +1702,18 @@ CREATE INDEX event_search_ev_ridx ON event_search USING btree (room_id); +CREATE UNIQUE INDEX event_search_event_id_idx ON event_search USING btree (event_id); + + + CREATE INDEX event_search_fts_idx ON event_search USING gin (vector); +CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups USING btree (state_group); + + + CREATE INDEX events_order_room ON events USING btree (room_id, topological_ordering, stream_ordering); @@ -1786,6 +1818,10 @@ CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repositor +CREATE INDEX local_media_repository_url_idx ON local_media_repository USING btree (created_ts) WHERE (url_cache IS NOT NULL); + + + CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users USING btree ("timestamp"); @@ -1914,7 +1950,7 @@ CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_s -CREATE INDEX state_groups_state_id ON state_groups_state USING btree (state_group); +CREATE INDEX state_groups_state_type_idx ON state_groups_state USING btree (state_group, type, state_key); @@ -1962,7 +1998,19 @@ CREATE INDEX user_filters_by_user_id_filter_id ON user_filters USING btree (user -CREATE INDEX user_ips_user_ip ON user_ips USING btree (user_id, access_token, ip); +CREATE INDEX user_ips_device_id ON user_ips USING btree (user_id, device_id, last_seen); + + + +CREATE INDEX user_ips_last_seen ON user_ips USING btree (user_id, last_seen); + + + +CREATE INDEX user_ips_last_seen_only ON user_ips USING btree (last_seen); + + + +CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips USING btree (user_id, access_token, ip); @@ -1982,6 +2030,10 @@ CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id); +CREATE INDEX users_creation_ts ON users USING btree (creation_ts); + + + CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms USING btree (user_id, room_id); From 83827c4922958abd930bfb6925d8a1a6a3833248 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 3 Jun 2019 17:06:47 +0100 Subject: [PATCH 097/231] Add account_validity's email_sent column to the list of boolean columns in synapse_port_db Fixes #5306 --- scripts/synapse_port_db | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 41be9c922076..b6ba19c77639 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -54,6 +54,7 @@ BOOLEAN_COLUMNS = { "group_roles": ["is_public"], "local_group_membership": ["is_publicised", "is_admin"], "e2e_room_keys": ["is_verified"], + "account_validity": ["email_sent"], } From fa4b54aca57bebc94e2b763abdae79343a08f969 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:06:54 +0100 Subject: [PATCH 098/231] Ignore room state with null bytes in for room stats --- synapse/storage/stats.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1c0b183a5681..1f39ef211a07 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -328,6 +328,21 @@ def update_room_state(self, room_id, fields): room_id (str) fields (dict[str:Any]) """ + + # For whatever reason some of the fields may contain null bytes, which + # postgres isn't a fan of, so we replace those fields with null. + for col in ( + "join_rules", + "history_visibility", + "encryption", + "name", + "topic", + "avatar", + "canonical_alias" + ): + if "\0" in fields.get(col, ""): + fields[col] = None + return self._simple_upsert( table="room_state", keyvalues={"room_id": room_id}, From 4bd67db100efacc3d31a2f8187b7bdd4479d9bc3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:08:33 +0100 Subject: [PATCH 099/231] Newsfile --- changelog.d/5324.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature new file mode 100644 index 000000000000..01285e965c84 --- /dev/null +++ b/changelog.d/5324.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From deca87ddf2d12ab67c91f84891d9fa09b4575fcf Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 3 Jun 2019 17:11:28 +0100 Subject: [PATCH 100/231] Changelog --- changelog.d/5325.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5325.bugfix diff --git a/changelog.d/5325.bugfix b/changelog.d/5325.bugfix new file mode 100644 index 000000000000..6914398bccef --- /dev/null +++ b/changelog.d/5325.bugfix @@ -0,0 +1 @@ +Add account_validity's email_sent column to the list of boolean columns in synapse_port_db. From fe2294ec8dc4b37d19930bd1ae0867645207af2e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:17:35 +0100 Subject: [PATCH 101/231] Revert "Newsfile" This reverts commit 4bd67db100efacc3d31a2f8187b7bdd4479d9bc3. --- changelog.d/5324.feature | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature deleted file mode 100644 index 01285e965c84..000000000000 --- a/changelog.d/5324.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. From 0d67a8cd9de9564fcdaa1206c18b411b4c43b74a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:17:57 +0100 Subject: [PATCH 102/231] Newsfile --- changelog.d/5324.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature new file mode 100644 index 000000000000..01285e965c84 --- /dev/null +++ b/changelog.d/5324.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From 0a56966f7d4879f9d517c96a3c714accdce4e17f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:42:52 +0100 Subject: [PATCH 103/231] Fix --- synapse/storage/stats.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1f39ef211a07..ff266b09b03b 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -340,7 +340,8 @@ def update_room_state(self, room_id, fields): "avatar", "canonical_alias" ): - if "\0" in fields.get(col, ""): + field = fields.get(col) + if field and "\0" in field: fields[col] = None return self._simple_upsert( From fec2dcb1a538ab8ab447f724af1a94d5b3517197 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 3 Jun 2019 22:59:51 +0100 Subject: [PATCH 104/231] Enforce validity period on server_keys for fed requests. (#5321) When handling incoming federation requests, make sure that we have an up-to-date copy of the signing key. We do not yet enforce the validity period for event signatures. --- changelog.d/5321.bugfix | 1 + synapse/crypto/keyring.py | 167 ++++++++++++++++--------- synapse/federation/federation_base.py | 4 +- synapse/federation/transport/server.py | 4 +- synapse/groups/attestations.py | 5 +- tests/crypto/test_keyring.py | 135 ++++++++++++++++---- 6 files changed, 228 insertions(+), 88 deletions(-) create mode 100644 changelog.d/5321.bugfix diff --git a/changelog.d/5321.bugfix b/changelog.d/5321.bugfix new file mode 100644 index 000000000000..943a61956dae --- /dev/null +++ b/changelog.d/5321.bugfix @@ -0,0 +1 @@ +Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index b2f4cea536ef..cdec06c88ede 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -15,6 +15,7 @@ # limitations under the License. import logging +from collections import defaultdict import six from six import raise_from @@ -70,6 +71,9 @@ class VerifyKeyRequest(object): json_object(dict): The JSON object to verify. + minimum_valid_until_ts (int): time at which we require the signing key to + be valid. (0 implies we don't care) + deferred(Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no @@ -82,7 +86,8 @@ class VerifyKeyRequest(object): server_name = attr.ib() key_ids = attr.ib() json_object = attr.ib() - deferred = attr.ib() + minimum_valid_until_ts = attr.ib() + deferred = attr.ib(default=attr.Factory(defer.Deferred)) class KeyLookupError(ValueError): @@ -90,14 +95,16 @@ class KeyLookupError(ValueError): class Keyring(object): - def __init__(self, hs): + def __init__(self, hs, key_fetchers=None): self.clock = hs.get_clock() - self._key_fetchers = ( - StoreKeyFetcher(hs), - PerspectivesKeyFetcher(hs), - ServerKeyFetcher(hs), - ) + if key_fetchers is None: + key_fetchers = ( + StoreKeyFetcher(hs), + PerspectivesKeyFetcher(hs), + ServerKeyFetcher(hs), + ) + self._key_fetchers = key_fetchers # map from server name to Deferred. Has an entry for each server with # an ongoing key download; the Deferred completes once the download @@ -106,9 +113,25 @@ def __init__(self, hs): # These are regular, logcontext-agnostic Deferreds. self.key_downloads = {} - def verify_json_for_server(self, server_name, json_object): + def verify_json_for_server(self, server_name, json_object, validity_time): + """Verify that a JSON object has been signed by a given server + + Args: + server_name (str): name of the server which must have signed this object + + json_object (dict): object to be checked + + validity_time (int): timestamp at which we require the signing key to + be valid. (0 implies we don't care) + + Returns: + Deferred[None]: completes if the the object was correctly signed, otherwise + errbacks with an error + """ + req = server_name, json_object, validity_time + return logcontext.make_deferred_yieldable( - self.verify_json_objects_for_server([(server_name, json_object)])[0] + self.verify_json_objects_for_server((req,))[0] ) def verify_json_objects_for_server(self, server_and_json): @@ -116,10 +139,12 @@ def verify_json_objects_for_server(self, server_and_json): necessary. Args: - server_and_json (list): List of pairs of (server_name, json_object) + server_and_json (iterable[Tuple[str, dict, int]): + Iterable of triplets of (server_name, json_object, validity_time) + validity_time is a timestamp at which the signing key must be valid. Returns: - List: for each input pair, a deferred indicating success + List: for each input triplet, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. @@ -128,12 +153,12 @@ def verify_json_objects_for_server(self, server_and_json): verify_requests = [] handle = preserve_fn(_handle_key_deferred) - def process(server_name, json_object): + def process(server_name, json_object, validity_time): """Process an entry in the request list - Given a (server_name, json_object) pair from the request list, - adds a key request to verify_requests, and returns a deferred which will - complete or fail (in the sentinel context) when verification completes. + Given a (server_name, json_object, validity_time) triplet from the request + list, adds a key request to verify_requests, and returns a deferred which + will complete or fail (in the sentinel context) when verification completes. """ key_ids = signature_ids(json_object, server_name) @@ -148,7 +173,7 @@ def process(server_name, json_object): # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( - server_name, key_ids, json_object, defer.Deferred() + server_name, key_ids, json_object, validity_time ) verify_requests.append(verify_request) @@ -160,8 +185,8 @@ def process(server_name, json_object): return handle(verify_request) results = [ - process(server_name, json_object) - for server_name, json_object in server_and_json + process(server_name, json_object, validity_time) + for server_name, json_object, validity_time in server_and_json ] if verify_requests: @@ -298,8 +323,12 @@ def do_iterations(): verify_request.deferred.errback( SynapseError( 401, - "No key for %s with id %s" - % (verify_request.server_name, verify_request.key_ids), + "No key for %s with ids in %s (min_validity %i)" + % ( + verify_request.server_name, + verify_request.key_ids, + verify_request.minimum_valid_until_ts, + ), Codes.UNAUTHORIZED, ) ) @@ -323,18 +352,28 @@ def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): Args: fetcher (KeyFetcher): fetcher to use to fetch the keys remaining_requests (set[VerifyKeyRequest]): outstanding key requests. - Any successfully-completed requests will be reomved from the list. + Any successfully-completed requests will be removed from the list. """ - # dict[str, set(str)]: keys to fetch for each server - missing_keys = {} + # dict[str, dict[str, int]]: keys to fetch. + # server_name -> key_id -> min_valid_ts + missing_keys = defaultdict(dict) + for verify_request in remaining_requests: # any completed requests should already have been removed assert not verify_request.deferred.called - missing_keys.setdefault(verify_request.server_name, set()).update( - verify_request.key_ids - ) + keys_for_server = missing_keys[verify_request.server_name] - results = yield fetcher.get_keys(missing_keys.items()) + for key_id in verify_request.key_ids: + # If we have several requests for the same key, then we only need to + # request that key once, but we should do so with the greatest + # min_valid_until_ts of the requests, so that we can satisfy all of + # the requests. + keys_for_server[key_id] = max( + keys_for_server.get(key_id, -1), + verify_request.minimum_valid_until_ts + ) + + results = yield fetcher.get_keys(missing_keys) completed = list() for verify_request in remaining_requests: @@ -344,25 +383,34 @@ def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): # complete this VerifyKeyRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: - key = result_keys.get(key_id) - if key: - with PreserveLoggingContext(): - verify_request.deferred.callback( - (server_name, key_id, key.verify_key) - ) - completed.append(verify_request) - break + fetch_key_result = result_keys.get(key_id) + if not fetch_key_result: + # we didn't get a result for this key + continue + + if ( + fetch_key_result.valid_until_ts + < verify_request.minimum_valid_until_ts + ): + # key was not valid at this point + continue + + with PreserveLoggingContext(): + verify_request.deferred.callback( + (server_name, key_id, fetch_key_result.verify_key) + ) + completed.append(verify_request) + break remaining_requests.difference_update(completed) class KeyFetcher(object): - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """ Args: - server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): - list of (server_name, iterable[key_id]) tuples to fetch keys for - Note that the iterables may be iterated more than once. + keys_to_fetch (dict[str, dict[str, int]]): + the keys to be fetched. server_name -> key_id -> min_valid_ts Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: @@ -378,13 +426,15 @@ def __init__(self, hs): self.store = hs.get_datastore() @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" + keys_to_fetch = ( (server_name, key_id) - for server_name, key_ids in server_name_and_key_ids - for key_id in key_ids + for server_name, keys_for_server in keys_to_fetch.items() + for key_id in keys_for_server.keys() ) + res = yield self.store.get_server_verify_keys(keys_to_fetch) keys = {} for (server_name, key_id), key in res.items(): @@ -508,14 +558,14 @@ def __init__(self, hs): self.perspective_servers = self.config.perspectives @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" @defer.inlineCallbacks def get_key(perspective_name, perspective_keys): try: result = yield self.get_server_verify_key_v2_indirect( - server_name_and_key_ids, perspective_name, perspective_keys + keys_to_fetch, perspective_name, perspective_keys ) defer.returnValue(result) except KeyLookupError as e: @@ -549,13 +599,15 @@ def get_key(perspective_name, perspective_keys): @defer.inlineCallbacks def get_server_verify_key_v2_indirect( - self, server_names_and_key_ids, perspective_name, perspective_keys + self, keys_to_fetch, perspective_name, perspective_keys ): """ Args: - server_names_and_key_ids (iterable[Tuple[str, iterable[str]]]): - list of (server_name, iterable[key_id]) tuples to fetch keys for + keys_to_fetch (dict[str, dict[str, int]]): + the keys to be fetched. server_name -> key_id -> min_valid_ts + perspective_name (str): name of the notary server to query for the keys + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the notary server @@ -569,12 +621,10 @@ def get_server_verify_key_v2_indirect( """ logger.info( "Requesting keys %s from notary server %s", - server_names_and_key_ids, + keys_to_fetch.items(), perspective_name, ) - # TODO(mark): Set the minimum_valid_until_ts to that needed by - # the events being validated or the current time if validating - # an incoming request. + try: query_response = yield self.client.post_json( destination=perspective_name, @@ -582,9 +632,10 @@ def get_server_verify_key_v2_indirect( data={ u"server_keys": { server_name: { - key_id: {u"minimum_valid_until_ts": 0} for key_id in key_ids + key_id: {u"minimum_valid_until_ts": min_valid_ts} + for key_id, min_valid_ts in server_keys.items() } - for server_name, key_ids in server_names_and_key_ids + for server_name, server_keys in keys_to_fetch.items() } }, long_retries=True, @@ -694,15 +745,18 @@ def __init__(self, hs): self.client = hs.get_http_client() @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" + # TODO make this more resilient results = yield logcontext.make_deferred_yieldable( defer.gatherResults( [ run_in_background( - self.get_server_verify_key_v2_direct, server_name, key_ids + self.get_server_verify_key_v2_direct, + server_name, + server_keys.keys(), ) - for server_name, key_ids in server_name_and_key_ids + for server_name, server_keys in keys_to_fetch.items() ], consumeErrors=True, ).addErrback(unwrapFirstError) @@ -721,6 +775,7 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: + # we may have found this key as a side-effect of asking for another. if requested_key_id in keys: continue diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index cffa831d8099..4b38f7c759f9 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -265,7 +265,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): ] more_deferreds = keyring.verify_json_objects_for_server([ - (p.sender_domain, p.redacted_pdu_json) + (p.sender_domain, p.redacted_pdu_json, 0) for p in pdus_to_check_sender ]) @@ -298,7 +298,7 @@ def sender_err(e, pdu_to_check): ] more_deferreds = keyring.verify_json_objects_for_server([ - (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json) + (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, 0) for p in pdus_to_check_event_id ]) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index d0efc4e0d325..0db8858cf12d 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -94,6 +94,7 @@ class NoAuthenticationError(AuthenticationError): class Authenticator(object): def __init__(self, hs): + self._clock = hs.get_clock() self.keyring = hs.get_keyring() self.server_name = hs.hostname self.store = hs.get_datastore() @@ -102,6 +103,7 @@ def __init__(self, hs): # A method just so we can pass 'self' as the authenticator to the Servlets @defer.inlineCallbacks def authenticate_request(self, request, content): + now = self._clock.time_msec() json_request = { "method": request.method.decode('ascii'), "uri": request.uri.decode('ascii'), @@ -138,7 +140,7 @@ def authenticate_request(self, request, content): 401, "Missing Authorization headers", Codes.UNAUTHORIZED, ) - yield self.keyring.verify_json_for_server(origin, json_request) + yield self.keyring.verify_json_for_server(origin, json_request, now) logger.info("Request from %s", origin) request.authenticated_entity = origin diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 786149be65d1..fa6b641ee15c 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -97,10 +97,11 @@ def verify_attestation(self, attestation, group_id, user_id, server_name=None): # TODO: We also want to check that *new* attestations that people give # us to store are valid for at least a little while. - if valid_until_ms < self.clock.time_msec(): + now = self.clock.time_msec() + if valid_until_ms < now: raise SynapseError(400, "Attestation expired") - yield self.keyring.verify_json_for_server(server_name, attestation) + yield self.keyring.verify_json_for_server(server_name, attestation, now) def create_attestation(self, group_id, user_id): """Create an attestation for the group_id and user_id with default diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 3933ad434778..096401938dec 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -19,6 +19,7 @@ import canonicaljson import signedjson.key import signedjson.sign +from signedjson.key import get_verify_key from twisted.internet import defer @@ -137,7 +138,7 @@ def first_lookup(): context_11.request = "11" res_deferreds = kr.verify_json_objects_for_server( - [("server10", json1), ("server11", {})] + [("server10", json1, 0), ("server11", {}, 0)] ) # the unsigned json should be rejected pretty quickly @@ -174,7 +175,7 @@ def second_lookup(): self.http_client.post_json.return_value = defer.Deferred() res_deferreds_2 = kr.verify_json_objects_for_server( - [("server10", json1)] + [("server10", json1, 0)] ) res_deferreds_2[0].addBoth(self.check_context, None) yield logcontext.make_deferred_yieldable(res_deferreds_2[0]) @@ -197,31 +198,108 @@ def test_verify_json_for_server(self): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key(1) - key1_id = "%s:%s" % (key1.alg, key1.version) - r = self.hs.datastore.store_server_verify_keys( "server9", time.time() * 1000, - [ - ( - "server9", - key1_id, - FetchKeyResult(signedjson.key.get_verify_key(key1), 1000), - ), - ], + [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))], ) self.get_success(r) + json1 = {} signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}) + d = _verify_json_for_server(kr, "server9", {}, 0) self.failureResultOf(d, SynapseError) - d = _verify_json_for_server(kr, "server9", json1) - self.assertFalse(d.called) + # should suceed on a signed object + d = _verify_json_for_server(kr, "server9", json1, 500) + # self.assertFalse(d.called) self.get_success(d) + def test_verify_json_dedupes_key_requests(self): + """Two requests for the same key should be deduped.""" + key1 = signedjson.key.generate_signing_key(1) + + def get_keys(keys_to_fetch): + # there should only be one request object (with the max validity) + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) + } + } + ) + + mock_fetcher = keyring.KeyFetcher() + mock_fetcher.get_keys = Mock(side_effect=get_keys) + kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher,)) + + json1 = {} + signedjson.sign.sign_json(json1, "server1", key1) + + # the first request should succeed; the second should fail because the key + # has expired + results = kr.verify_json_objects_for_server( + [("server1", json1, 500), ("server1", json1, 1500)] + ) + self.assertEqual(len(results), 2) + self.get_success(results[0]) + e = self.get_failure(results[1], SynapseError).value + self.assertEqual(e.errcode, "M_UNAUTHORIZED") + self.assertEqual(e.code, 401) + + # there should have been a single call to the fetcher + mock_fetcher.get_keys.assert_called_once() + + def test_verify_json_falls_back_to_other_fetchers(self): + """If the first fetcher cannot provide a recent enough key, we fall back""" + key1 = signedjson.key.generate_signing_key(1) + + def get_keys1(keys_to_fetch): + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800) + } + } + ) + + def get_keys2(keys_to_fetch): + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) + } + } + ) + + mock_fetcher1 = keyring.KeyFetcher() + mock_fetcher1.get_keys = Mock(side_effect=get_keys1) + mock_fetcher2 = keyring.KeyFetcher() + mock_fetcher2.get_keys = Mock(side_effect=get_keys2) + kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher1, mock_fetcher2)) + + json1 = {} + signedjson.sign.sign_json(json1, "server1", key1) + + results = kr.verify_json_objects_for_server( + [("server1", json1, 1200), ("server1", json1, 1500)] + ) + self.assertEqual(len(results), 2) + self.get_success(results[0]) + e = self.get_failure(results[1], SynapseError).value + self.assertEqual(e.errcode, "M_UNAUTHORIZED") + self.assertEqual(e.code, 401) + + # there should have been a single call to each fetcher + mock_fetcher1.get_keys.assert_called_once() + mock_fetcher2.get_keys.assert_called_once() + class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): @@ -260,8 +338,8 @@ def get_json(destination, path, **kwargs): self.http_client.get_json.side_effect = get_json - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) + keys_to_fetch = {SERVER_NAME: {"key1": 0}} + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) @@ -288,9 +366,7 @@ def get_json(destination, path, **kwargs): # change the server name: it should cause a rejection response["server_name"] = "OTHER_SERVER" - self.get_failure( - fetcher.get_keys(server_name_and_key_ids), KeyLookupError - ) + self.get_failure(fetcher.get_keys(keys_to_fetch), KeyLookupError) class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): @@ -342,8 +418,8 @@ def post_json(destination, path, data, **kwargs): self.http_client.post_json.side_effect = post_json - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) + keys_to_fetch = {SERVER_NAME: {"key1": 0}} + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) self.assertIn(SERVER_NAME, keys) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) @@ -401,7 +477,7 @@ def build_response(): def get_key_from_perspectives(response): fetcher = PerspectivesKeyFetcher(self.hs) - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] + keys_to_fetch = {SERVER_NAME: {"key1": 0}} def post_json(destination, path, data, **kwargs): self.assertEqual(destination, self.mock_perspective_server.server_name) @@ -410,9 +486,7 @@ def post_json(destination, path, data, **kwargs): self.http_client.post_json.side_effect = post_json - return self.get_success( - fetcher.get_keys(server_name_and_key_ids) - ) + return self.get_success(fetcher.get_keys(keys_to_fetch)) # start with a valid response so we can check we are testing the right thing response = build_response() @@ -435,6 +509,11 @@ def post_json(destination, path, data, **kwargs): self.assertEqual(keys, {}, "Expected empty dict with missing origin server sig") +def get_key_id(key): + """Get the matrix ID tag for a given SigningKey or VerifyKey""" + return "%s:%s" % (key.alg, key.version) + + @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): with LoggingContext("testctx") as ctx: @@ -445,14 +524,16 @@ def run_in_context(f, *args, **kwargs): defer.returnValue(rv) -def _verify_json_for_server(keyring, server_name, json_object): +def _verify_json_for_server(keyring, server_name, json_object, validity_time): """thin wrapper around verify_json_for_server which makes sure it is wrapped with the patched defer.inlineCallbacks. """ @defer.inlineCallbacks def v(): - rv1 = yield keyring.verify_json_for_server(server_name, json_object) + rv1 = yield keyring.verify_json_for_server( + server_name, json_object, validity_time + ) defer.returnValue(rv1) return run_in_context(v) From 06a1f3e20719ab2631089a37cef50b80c1155f89 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 17:56:54 +0100 Subject: [PATCH 105/231] Reduce timeout for outbound /key/v2/server requests. --- synapse/crypto/keyring.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index cdec06c88ede..bef6498f4b55 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -786,6 +786,19 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id), ignore_backoff=True, + + # we only give the remote server 10s to respond. It should be an + # easy request to handle, so if it doesn't reply within 10s, it's + # probably not going to. + # + # Furthermore, when we are acting as a notary server, we cannot + # wait all day for all of the origin servers, as the requesting + # server will otherwise time out before we can respond. + # + # (Note that get_json may make 4 attempts, so this can still take + # almost 45 seconds to fetch the headers, plus up to another 60s to + # read the response). + timeout=10000, ) except (NotRetryingDestination, RequestSendFailed) as e: raise_from(KeyLookupError("Failed to connect to remote server"), e) From dce6e9e0c11fc5d99b2da6698aed04e9f525f242 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 23:50:43 +0100 Subject: [PATCH 106/231] Avoid rapidly backing-off a server if we ignore the retry interval --- changelog.d/5335.bugfix | 1 + synapse/util/retryutils.py | 60 +++++++++++++++++++++++--------------- 2 files changed, 38 insertions(+), 23 deletions(-) create mode 100644 changelog.d/5335.bugfix diff --git a/changelog.d/5335.bugfix b/changelog.d/5335.bugfix new file mode 100644 index 000000000000..7318cbe35e37 --- /dev/null +++ b/changelog.d/5335.bugfix @@ -0,0 +1 @@ +Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 26cce7d197d6..f6dfa77d8fde 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -46,8 +46,7 @@ def __init__(self, retry_last_ts, retry_interval, destination): @defer.inlineCallbacks -def get_retry_limiter(destination, clock, store, ignore_backoff=False, - **kwargs): +def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs): """For a given destination check if we have previously failed to send a request there and are waiting before retrying the destination. If we are not ready to retry the destination, this will raise a @@ -60,8 +59,7 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, clock (synapse.util.clock): timing source store (synapse.storage.transactions.TransactionStore): datastore ignore_backoff (bool): true to ignore the historical backoff data and - try the request anyway. We will still update the next - retry_interval on success/failure. + try the request anyway. We will still reset the retry_interval on success. Example usage: @@ -75,13 +73,12 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, """ retry_last_ts, retry_interval = (0, 0) - retry_timings = yield store.get_destination_retry_timings( - destination - ) + retry_timings = yield store.get_destination_retry_timings(destination) if retry_timings: retry_last_ts, retry_interval = ( - retry_timings["retry_last_ts"], retry_timings["retry_interval"] + retry_timings["retry_last_ts"], + retry_timings["retry_interval"], ) now = int(clock.time_msec()) @@ -93,22 +90,31 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, destination=destination, ) + # if we are ignoring the backoff data, we should also not increment the backoff + # when we get another failure - otherwise a server can very quickly reach the + # maximum backoff even though it might only have been down briefly + backoff_on_failure = not ignore_backoff + defer.returnValue( RetryDestinationLimiter( - destination, - clock, - store, - retry_interval, - **kwargs + destination, clock, store, retry_interval, backoff_on_failure, **kwargs ) ) class RetryDestinationLimiter(object): - def __init__(self, destination, clock, store, retry_interval, - min_retry_interval=10 * 60 * 1000, - max_retry_interval=24 * 60 * 60 * 1000, - multiplier_retry_interval=5, backoff_on_404=False): + def __init__( + self, + destination, + clock, + store, + retry_interval, + min_retry_interval=10 * 60 * 1000, + max_retry_interval=24 * 60 * 60 * 1000, + multiplier_retry_interval=5, + backoff_on_404=False, + backoff_on_failure=True, + ): """Marks the destination as "down" if an exception is thrown in the context, except for CodeMessageException with code < 500. @@ -128,6 +134,9 @@ def __init__(self, destination, clock, store, retry_interval, multiplier_retry_interval (int): The multiplier to use to increase the retry interval after a failed request. backoff_on_404 (bool): Back off if we get a 404 + + backoff_on_failure (bool): set to False if we should not increase the + retry interval on a failure. """ self.clock = clock self.store = store @@ -138,6 +147,7 @@ def __init__(self, destination, clock, store, retry_interval, self.max_retry_interval = max_retry_interval self.multiplier_retry_interval = multiplier_retry_interval self.backoff_on_404 = backoff_on_404 + self.backoff_on_failure = backoff_on_failure def __enter__(self): pass @@ -173,10 +183,13 @@ def __exit__(self, exc_type, exc_val, exc_tb): if not self.retry_interval: return - logger.debug("Connection to %s was successful; clearing backoff", - self.destination) + logger.debug( + "Connection to %s was successful; clearing backoff", self.destination + ) retry_last_ts = 0 self.retry_interval = 0 + elif not self.backoff_on_failure: + return else: # We couldn't connect. if self.retry_interval: @@ -190,7 +203,10 @@ def __exit__(self, exc_type, exc_val, exc_tb): logger.info( "Connection to %s was unsuccessful (%s(%s)); backoff now %i", - self.destination, exc_type, exc_val, self.retry_interval + self.destination, + exc_type, + exc_val, + self.retry_interval, ) retry_last_ts = int(self.clock.time_msec()) @@ -201,9 +217,7 @@ def store_retry_timings(): self.destination, retry_last_ts, self.retry_interval ) except Exception: - logger.exception( - "Failed to store destination_retry_timings", - ) + logger.exception("Failed to store destination_retry_timings") # we deliberately do this in the background. synapse.util.logcontext.run_in_background(store_retry_timings) From def5ea4062295759d7c28d9c2302187871a1bc72 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 15:36:41 +0100 Subject: [PATCH 107/231] Don't bomb out on direct key fetches as soon as one fails --- synapse/crypto/keyring.py | 58 ++++++++++++++++++++++-------------- tests/crypto/test_keyring.py | 12 ++++---- 2 files changed, 41 insertions(+), 29 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index bef6498f4b55..5660c9602340 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -46,6 +46,7 @@ ) from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext, unwrapFirstError +from synapse.util.async_helpers import yieldable_gather_results from synapse.util.logcontext import ( LoggingContext, PreserveLoggingContext, @@ -169,7 +170,12 @@ def process(server_name, json_object, validity_time): ) ) - logger.debug("Verifying for %s with key_ids %s", server_name, key_ids) + logger.debug( + "Verifying for %s with key_ids %s, min_validity %i", + server_name, + key_ids, + validity_time, + ) # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( @@ -744,34 +750,42 @@ def __init__(self, hs): self.clock = hs.get_clock() self.client = hs.get_http_client() - @defer.inlineCallbacks def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" - # TODO make this more resilient - results = yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.get_server_verify_key_v2_direct, - server_name, - server_keys.keys(), - ) - for server_name, server_keys in keys_to_fetch.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - merged = {} - for result in results: - merged.update(result) + results = {} + + @defer.inlineCallbacks + def get_key(key_to_fetch_item): + server_name, key_ids = key_to_fetch_item + try: + keys = yield self.get_server_verify_key_v2_direct(server_name, key_ids) + results[server_name] = keys + except KeyLookupError as e: + logger.warning( + "Error looking up keys %s from %s: %s", key_ids, server_name, e + ) + except Exception: + logger.exception("Error getting keys %s from %s", key_ids, server_name) - defer.returnValue( - {server_name: keys for server_name, keys in merged.items() if keys} + return yieldable_gather_results(get_key, keys_to_fetch.items()).addCallback( + lambda _: results ) @defer.inlineCallbacks def get_server_verify_key_v2_direct(self, server_name, key_ids): + """ + + Args: + server_name (str): + key_ids (iterable[str]): + + Returns: + Deferred[dict[str, FetchKeyResult]]: map from key ID to lookup result + + Raises: + KeyLookupError if there was a problem making the lookup + """ keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: @@ -823,7 +837,7 @@ def get_server_verify_key_v2_direct(self, server_name, key_ids): ) keys.update(response_keys) - defer.returnValue({server_name: keys}) + defer.returnValue(keys) @defer.inlineCallbacks diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 096401938dec..4cff7e36c82f 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -25,11 +25,7 @@ from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.crypto.keyring import ( - KeyLookupError, - PerspectivesKeyFetcher, - ServerKeyFetcher, -) +from synapse.crypto.keyring import PerspectivesKeyFetcher, ServerKeyFetcher from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -364,9 +360,11 @@ def get_json(destination, path, **kwargs): bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) ) - # change the server name: it should cause a rejection + # change the server name: the result should be ignored response["server_name"] = "OTHER_SERVER" - self.get_failure(fetcher.get_keys(keys_to_fetch), KeyLookupError) + + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) + self.assertEqual(keys, {}) class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): From c5d60eadd5949ab4c12857e0830eb0afbb857f72 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 18:07:19 +0100 Subject: [PATCH 108/231] Notary server: make requests to origins in parallel ... else we're guaranteed to time out. --- synapse/crypto/keyring.py | 10 +++++++++- synapse/rest/key/v2/remote_key_resource.py | 12 ++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5660c9602340..6dae713ebc95 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -751,7 +751,15 @@ def __init__(self, hs): self.client = hs.get_http_client() def get_keys(self, keys_to_fetch): - """see KeyFetcher.get_keys""" + """ + Args: + keys_to_fetch (dict[str, iterable[str]]): + the keys to be fetched. server_name -> key_ids + + Returns: + Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: + map from server_name -> key_id -> FetchKeyResult + """ results = {} diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 21c3c807b9d4..8a730bbc354c 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -20,7 +20,7 @@ from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError -from synapse.crypto.keyring import KeyLookupError, ServerKeyFetcher +from synapse.crypto.keyring import ServerKeyFetcher from synapse.http.server import respond_with_json_bytes, wrap_json_request_handler from synapse.http.servlet import parse_integer, parse_json_object_from_request @@ -215,15 +215,7 @@ def query_keys(self, request, query, query_remote_on_cache_miss=False): json_results.add(bytes(result["key_json"])) if cache_misses and query_remote_on_cache_miss: - for server_name, key_ids in cache_misses.items(): - try: - yield self.fetcher.get_server_verify_key_v2_direct( - server_name, key_ids - ) - except KeyLookupError as e: - logger.info("Failed to fetch key: %s", e) - except Exception: - logger.exception("Failed to get key for %r", server_name) + yield self.fetcher.get_keys(cache_misses) yield self.query_keys( request, query, query_remote_on_cache_miss=False ) From a3f2d000e031f2e9b6e76f679967fd0c0ba890f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 23:12:48 +0100 Subject: [PATCH 109/231] changelog --- changelog.d/5333.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5333.bugfix diff --git a/changelog.d/5333.bugfix b/changelog.d/5333.bugfix new file mode 100644 index 000000000000..cb05a6dd63b4 --- /dev/null +++ b/changelog.d/5333.bugfix @@ -0,0 +1 @@ +Fix various problems which made the signing-key notary server time out for some requests. \ No newline at end of file From b2b90b7d34bf9afc437df6a2e58ab89cfd8ab91f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 4 Jun 2019 15:54:27 +1000 Subject: [PATCH 110/231] Hawkowl/fix missing auth (#5328) --- changelog.d/5328.misc | 1 + synapse/rest/client/v1/voip.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/5328.misc diff --git a/changelog.d/5328.misc b/changelog.d/5328.misc new file mode 100644 index 000000000000..e1b9dc58a3d9 --- /dev/null +++ b/changelog.d/5328.misc @@ -0,0 +1 @@ +The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 0975df84cf66..638104921001 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -29,6 +29,7 @@ class VoipRestServlet(RestServlet): def __init__(self, hs): super(VoipRestServlet, self).__init__() self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): From 5bdb189f86b462890ff55c9244506b0c41fed856 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 11:14:16 +0100 Subject: [PATCH 111/231] Improve docstrings on MatrixFederationClient. (#5332) --- changelog.d/5332.misc | 1 + synapse/http/matrixfederationclient.py | 71 ++++++++++++++++++++------ 2 files changed, 56 insertions(+), 16 deletions(-) create mode 100644 changelog.d/5332.misc diff --git a/changelog.d/5332.misc b/changelog.d/5332.misc new file mode 100644 index 000000000000..dcfac4eac9fe --- /dev/null +++ b/changelog.d/5332.misc @@ -0,0 +1 @@ +Improve docstrings on MatrixFederationClient. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 8197619a7806..663ea72a7a3a 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -285,7 +285,24 @@ def _send_request( request (MatrixFederationRequest): details of request to be sent timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server). 60s by default. + (including connecting to the server), *for each attempt*. + 60s by default. + + long_retries (bool): whether to use the long retry algorithm. + + The regular retry algorithm makes 4 attempts, with intervals + [0.5s, 1s, 2s]. + + The long retry algorithm makes 11 attempts, with intervals + [4s, 16s, 60s, 60s, ...] + + Both algorithms add -20%/+40% jitter to the retry intervals. + + Note that the above intervals are *in addition* to the time spent + waiting for the request to complete (up to `timeout` ms). + + NB: the long retry algorithm takes over 20 minutes to complete, with + a default timeout of 60s! ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -566,10 +583,14 @@ def put_json(self, destination, path, args={}, data={}, the request body. This will be encoded as JSON. json_data_callback (callable): A callable returning the dict to use as the request body. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): True if we should count a 404 response as @@ -627,15 +648,22 @@ def post_json(self, destination, path, data={}, long_retries=False, Args: destination (str): The remote server to send the HTTP request to. + path (str): The HTTP path. + data (dict): A dict containing the data that will be used as the request body. This will be encoded as JSON. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + args (dict): query params Returns: Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The @@ -686,14 +714,19 @@ def get_json(self, destination, path, args=None, retry_on_dns_fail=True, Args: destination (str): The remote server to send the HTTP request to. + path (str): The HTTP path. + args (dict|None): A dictionary used to create query strings, defaults to None. - timeout (int): How long to try (in ms) the destination for before - giving up. None indicates no timeout and that the request will - be retried. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of the request. Workaround for #3622 in Synapse <= v0.99.3. @@ -742,12 +775,18 @@ def delete_json(self, destination, path, long_retries=False, destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + + args (dict): query params Returns: Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. From 4d08b8f30c6a10caaa570bd93059d496b66185a0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 11:53:07 +0100 Subject: [PATCH 112/231] Don't do long retries when calling the key notary server. (#5334) It takes at least 20 minutes to work through the long_retries schedule (11 attempts, each with a 60 second timeout, and 60 seconds between each request), so if the notary server isn't returning within the timeout, we'll just end up blocking whatever request is happening for 20 minutes. Ain't nobody got time for that. --- changelog.d/5334.bugfix | 1 + synapse/crypto/keyring.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/5334.bugfix diff --git a/changelog.d/5334.bugfix b/changelog.d/5334.bugfix new file mode 100644 index 000000000000..ed141e0918c7 --- /dev/null +++ b/changelog.d/5334.bugfix @@ -0,0 +1 @@ +Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6dae713ebc95..0fd15287e775 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -644,7 +644,6 @@ def get_server_verify_key_v2_indirect( for server_name, server_keys in keys_to_fetch.items() } }, - long_retries=True, ) except (NotRetryingDestination, RequestSendFailed) as e: raise_from(KeyLookupError("Failed to connect to remote server"), e) From ac3cc3236748877b692e6c6c771019fdb23d3e71 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 13:47:44 +0100 Subject: [PATCH 113/231] Make account validity renewal emails work when email notifs are disabled --- changelog.d/5341.bugfix | 1 + synapse/config/emailconfig.py | 99 ++++++++++++++++++++--------------- 2 files changed, 58 insertions(+), 42 deletions(-) create mode 100644 changelog.d/5341.bugfix diff --git a/changelog.d/5341.bugfix b/changelog.d/5341.bugfix new file mode 100644 index 000000000000..a7aaa95f3949 --- /dev/null +++ b/changelog.d/5341.bugfix @@ -0,0 +1 @@ +Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 342a6ce5fdc3..cf4875f1f347 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,12 +31,49 @@ class EmailConfig(Config): def read_config(self, config): + # TODO: We should separate better the email configuration from the notification + # and account validity config. + self.email_enable_notifs = False email_config = config.get("email", {}) + + self.email_smtp_host = email_config.get("smtp_host", None) + self.email_smtp_port = email_config.get("smtp_port", None) + self.email_smtp_user = email_config.get("smtp_user", None) + self.email_smtp_pass = email_config.get("smtp_pass", None) + self.require_transport_security = email_config.get( + "require_transport_security", False + ) + if "app_name" in email_config: + self.email_app_name = email_config["app_name"] + else: + self.email_app_name = "Matrix" + + self.email_notif_from = email_config.get("notif_from", None) + # make sure it's valid + parsed = email.utils.parseaddr(self.email_notif_from) + if self.email_notif_from and parsed[1] == '': + raise RuntimeError("Invalid notif_from address") + + template_dir = email_config.get("template_dir") + # we need an absolute path, because we change directory after starting (and + # we don't yet know what auxilliary templates like mail.css we will need). + # (Note that loading as package_resources with jinja.PackageLoader doesn't + # work for the same reason.) + if not template_dir: + template_dir = pkg_resources.resource_filename( + 'synapse', 'res/templates' + ) + + self.email_template_dir = os.path.abspath(template_dir) + self.email_enable_notifs = email_config.get("enable_notifs", False) + account_validity_renewal_enabled = config.get( + "account_validity", {}, + ).get("renew_at") - if self.email_enable_notifs: + if self.email_enable_notifs or account_validity_renewal_enabled: # make sure we can import the required deps import jinja2 import bleach @@ -42,6 +81,7 @@ def read_config(self, config): jinja2 bleach + if self.email_enable_notifs: required = [ "smtp_host", "smtp_port", @@ -66,34 +106,13 @@ def read_config(self, config): "email.enable_notifs is True but no public_baseurl is set" ) - self.email_smtp_host = email_config["smtp_host"] - self.email_smtp_port = email_config["smtp_port"] - self.email_notif_from = email_config["notif_from"] self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] - self.email_expiry_template_html = email_config.get( - "expiry_template_html", "notice_expiry.html", - ) - self.email_expiry_template_text = email_config.get( - "expiry_template_text", "notice_expiry.txt", - ) - - template_dir = email_config.get("template_dir") - # we need an absolute path, because we change directory after starting (and - # we don't yet know what auxilliary templates like mail.css we will need). - # (Note that loading as package_resources with jinja.PackageLoader doesn't - # work for the same reason.) - if not template_dir: - template_dir = pkg_resources.resource_filename( - 'synapse', 'res/templates' - ) - template_dir = os.path.abspath(template_dir) for f in self.email_notif_template_text, self.email_notif_template_html: - p = os.path.join(template_dir, f) + p = os.path.join(self.email_template_dir, f) if not os.path.isfile(p): raise ConfigError("Unable to find email template file %s" % (p, )) - self.email_template_dir = template_dir self.email_notif_for_new_users = email_config.get( "notif_for_new_users", True @@ -101,29 +120,25 @@ def read_config(self, config): self.email_riot_base_url = email_config.get( "riot_base_url", None ) - self.email_smtp_user = email_config.get( - "smtp_user", None - ) - self.email_smtp_pass = email_config.get( - "smtp_pass", None - ) - self.require_transport_security = email_config.get( - "require_transport_security", False - ) - if "app_name" in email_config: - self.email_app_name = email_config["app_name"] - else: - self.email_app_name = "Matrix" - - # make sure it's valid - parsed = email.utils.parseaddr(self.email_notif_from) - if parsed[1] == '': - raise RuntimeError("Invalid notif_from address") else: self.email_enable_notifs = False # Not much point setting defaults for the rest: it would be an # error for them to be used. + if account_validity_renewal_enabled: + self.email_expiry_template_html = email_config.get( + "expiry_template_html", "notice_expiry.html", + ) + self.email_expiry_template_text = email_config.get( + "expiry_template_text", "notice_expiry.txt", + ) + + for f in self.email_expiry_template_text, self.email_expiry_template_html: + p = os.path.join(self.email_template_dir, f) + if not os.path.isfile(p): + raise ConfigError("Unable to find email template file %s" % (p, )) + + def default_config(self, config_dir_path, server_name, **kwargs): return """ # Enable sending emails for notification events or expiry notices From 1cc5fc1f6c316e8ea1c50669cd80f4a7d441570a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 13:51:23 +0100 Subject: [PATCH 114/231] Lint --- synapse/config/emailconfig.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index cf4875f1f347..7ca3505895fc 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -138,7 +138,6 @@ def read_config(self, config): if not os.path.isfile(p): raise ConfigError("Unable to find email template file %s" % (p, )) - def default_config(self, config_dir_path, server_name, **kwargs): return """ # Enable sending emails for notification events or expiry notices From 2f62e1f6ff671bf6404bd90b1d945f8d029f0d37 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 14:24:36 +0100 Subject: [PATCH 115/231] Only parse from email if provided --- synapse/config/emailconfig.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 7ca3505895fc..8400471f408e 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -51,10 +51,11 @@ def read_config(self, config): self.email_app_name = "Matrix" self.email_notif_from = email_config.get("notif_from", None) - # make sure it's valid - parsed = email.utils.parseaddr(self.email_notif_from) - if self.email_notif_from and parsed[1] == '': - raise RuntimeError("Invalid notif_from address") + if self.email_notif_from is not None: + # make sure it's valid + parsed = email.utils.parseaddr(self.email_notif_from) + if parsed[1] == '': + raise RuntimeError("Invalid notif_from address") template_dir = email_config.get("template_dir") # we need an absolute path, because we change directory after starting (and From b4189b112fcacf8143aa8fe7674d5c2518067bc8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 18:01:09 +0100 Subject: [PATCH 116/231] Rename get_events->get_events_from_store_or_dest (#5344) We have too many things called get_event, and it's hard to figure out what we mean. Also remove some unused params from the signature, and add some logging. --- changelog.d/5344.misc | 1 + synapse/federation/federation_client.py | 33 ++++++++++--------------- 2 files changed, 14 insertions(+), 20 deletions(-) create mode 100644 changelog.d/5344.misc diff --git a/changelog.d/5344.misc b/changelog.d/5344.misc new file mode 100644 index 000000000000..a20c563bf170 --- /dev/null +++ b/changelog.d/5344.misc @@ -0,0 +1 @@ +Clean up FederationClient.get_events for clarity. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f3fc897a0ac6..916ff487c938 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,6 @@ import copy import itertools import logging -import random from six.moves import range @@ -326,8 +325,8 @@ def get_state_for_room(self, destination, room_id, event_id): state_event_ids = result["pdu_ids"] auth_event_ids = result.get("auth_chain_ids", []) - fetched_events, failed_to_fetch = yield self.get_events( - [destination], room_id, set(state_event_ids + auth_event_ids) + fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest( + destination, room_id, set(state_event_ids + auth_event_ids) ) if failed_to_fetch: @@ -397,27 +396,20 @@ def get_state_for_room(self, destination, room_id, event_id): defer.returnValue((signed_pdus, signed_auth)) @defer.inlineCallbacks - def get_events(self, destinations, room_id, event_ids, return_local=True): - """Fetch events from some remote destinations, checking if we already - have them. + def get_events_from_store_or_dest(self, destination, room_id, event_ids): + """Fetch events from a remote destination, checking if we already have them. Args: - destinations (list) + destination (str) room_id (str) event_ids (list) - return_local (bool): Whether to include events we already have in - the DB in the returned list of events Returns: Deferred: A deferred resolving to a 2-tuple where the first is a list of events and the second is a list of event ids that we failed to fetch. """ - if return_local: - seen_events = yield self.store.get_events(event_ids, allow_rejected=True) - signed_events = list(seen_events.values()) - else: - seen_events = yield self.store.have_seen_events(event_ids) - signed_events = [] + seen_events = yield self.store.get_events(event_ids, allow_rejected=True) + signed_events = list(seen_events.values()) failed_to_fetch = set() @@ -428,10 +420,11 @@ def get_events(self, destinations, room_id, event_ids, return_local=True): if not missing_events: defer.returnValue((signed_events, failed_to_fetch)) - def random_server_list(): - srvs = list(destinations) - random.shuffle(srvs) - return srvs + logger.debug( + "Fetching unknown state/auth events %s for room %s", + missing_events, + event_ids, + ) room_version = yield self.store.get_room_version(room_id) @@ -443,7 +436,7 @@ def random_server_list(): deferreds = [ run_in_background( self.get_pdu, - destinations=random_server_list(), + destinations=[destination], event_id=e_id, room_version=room_version, ) From dae224a73f6a799718f7dfc5c6d8ac3e050fca1d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 18:05:06 +0100 Subject: [PATCH 117/231] Fix failure to fetch batches of PDUs (#5342) FederationClient.get_pdu is called in a loop to fetch a batch of PDUs. A failure to fetch one should not result in a failure of the whole batch. Add the missing `continue`. --- changelog.d/5342.bugfix | 1 + synapse/federation/federation_client.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/5342.bugfix diff --git a/changelog.d/5342.bugfix b/changelog.d/5342.bugfix new file mode 100644 index 000000000000..66a3076292f6 --- /dev/null +++ b/changelog.d/5342.bugfix @@ -0,0 +1 @@ +Fix failure when fetching batches of events during backfill, etc. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 916ff487c938..d559605382f8 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -279,6 +279,7 @@ def get_pdu(self, destinations, event_id, room_version, outlier=False, "Failed to get PDU %s from %s because %s", event_id, destination, e, ) + continue except NotRetryingDestination as e: logger.info(str(e)) continue From aa530e68005d041ad037b081fe748d301a11291a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 22:02:53 +0100 Subject: [PATCH 118/231] Call RetryLimiter correctly (#5340) Fixes a regression introduced in #5335. --- changelog.d/5340.bugfix | 2 ++ synapse/util/retryutils.py | 7 ++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5340.bugfix diff --git a/changelog.d/5340.bugfix b/changelog.d/5340.bugfix new file mode 100644 index 000000000000..931ee904e141 --- /dev/null +++ b/changelog.d/5340.bugfix @@ -0,0 +1,2 @@ +Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. + diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index f6dfa77d8fde..1a77456498ac 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -97,7 +97,12 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs) defer.returnValue( RetryDestinationLimiter( - destination, clock, store, retry_interval, backoff_on_failure, **kwargs + destination, + clock, + store, + retry_interval, + backoff_on_failure=backoff_on_failure, + **kwargs ) ) From 016af015980d28d7efdce9aad1fe2148335f5086 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jun 2019 10:35:13 +0100 Subject: [PATCH 119/231] Rename VerifyKeyRequest.deferred field (#5343) it's a bit confusing --- changelog.d/5343.misc | 1 + synapse/crypto/keyring.py | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5343.misc diff --git a/changelog.d/5343.misc b/changelog.d/5343.misc new file mode 100644 index 000000000000..dbee0f71b9d8 --- /dev/null +++ b/changelog.d/5343.misc @@ -0,0 +1 @@ +Rename VerifyKeyRequest.deferred field. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 0fd15287e775..8e1d666db112 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -75,7 +75,7 @@ class VerifyKeyRequest(object): minimum_valid_until_ts (int): time at which we require the signing key to be valid. (0 implies we don't care) - deferred(Deferred[str, str, nacl.signing.VerifyKey]): + key_ready (Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no logcontext. @@ -88,7 +88,7 @@ class VerifyKeyRequest(object): key_ids = attr.ib() json_object = attr.ib() minimum_valid_until_ts = attr.ib() - deferred = attr.ib(default=attr.Factory(defer.Deferred)) + key_ready = attr.ib(default=attr.Factory(defer.Deferred)) class KeyLookupError(ValueError): @@ -204,7 +204,7 @@ def process(server_name, json_object, validity_time): def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request - Once each fetch completes, verify_request.deferred will be resolved. + Once each fetch completes, verify_request.key_ready will be resolved. Args: verify_requests (List[VerifyKeyRequest]): @@ -250,7 +250,7 @@ def remove_deferreds(res, verify_request): return res for verify_request in verify_requests: - verify_request.deferred.addBoth(remove_deferreds, verify_request) + verify_request.key_ready.addBoth(remove_deferreds, verify_request) except Exception: logger.exception("Error starting key lookups") @@ -303,7 +303,7 @@ def rm(r, server_name_): def _get_server_verify_keys(self, verify_requests): """Tries to find at least one key for each verify request - For each verify_request, verify_request.deferred is called back with + For each verify_request, verify_request.key_ready is called back with params (server_name, key_id, VerifyKey) if a key is found, or errbacked with a SynapseError if none of the keys are found. @@ -312,7 +312,7 @@ def _get_server_verify_keys(self, verify_requests): """ remaining_requests = set( - (rq for rq in verify_requests if not rq.deferred.called) + (rq for rq in verify_requests if not rq.key_ready.called) ) @defer.inlineCallbacks @@ -326,7 +326,7 @@ def do_iterations(): # look for any requests which weren't satisfied with PreserveLoggingContext(): for verify_request in remaining_requests: - verify_request.deferred.errback( + verify_request.key_ready.errback( SynapseError( 401, "No key for %s with ids in %s (min_validity %i)" @@ -346,8 +346,8 @@ def on_err(err): logger.error("Unexpected error in _get_server_verify_keys: %s", err) with PreserveLoggingContext(): for verify_request in remaining_requests: - if not verify_request.deferred.called: - verify_request.deferred.errback(err) + if not verify_request.key_ready.called: + verify_request.key_ready.errback(err) run_in_background(do_iterations).addErrback(on_err) @@ -366,7 +366,7 @@ def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): for verify_request in remaining_requests: # any completed requests should already have been removed - assert not verify_request.deferred.called + assert not verify_request.key_ready.called keys_for_server = missing_keys[verify_request.server_name] for key_id in verify_request.key_ids: @@ -402,7 +402,7 @@ def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): continue with PreserveLoggingContext(): - verify_request.deferred.callback( + verify_request.key_ready.callback( (server_name, key_id, fetch_key_result.verify_key) ) completed.append(verify_request) @@ -862,7 +862,7 @@ def _handle_key_deferred(verify_request): """ server_name = verify_request.server_name with PreserveLoggingContext(): - _, key_id, verify_key = yield verify_request.deferred + _, key_id, verify_key = yield verify_request.key_ready json_object = verify_request.json_object From 2615c6bd9e5456c5aefc23a9c89a4346b8afc6b0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jun 2019 10:35:40 +0100 Subject: [PATCH 120/231] Clean up debug logging (#5347) Remove some spurious stuff, clarify some other stuff --- changelog.d/5347.misc | 2 ++ synapse/crypto/event_signing.py | 6 +++++- synapse/crypto/keyring.py | 4 ---- synapse/federation/federation_client.py | 16 +++++++++++++--- 4 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5347.misc diff --git a/changelog.d/5347.misc b/changelog.d/5347.misc new file mode 100644 index 000000000000..436245fb11aa --- /dev/null +++ b/changelog.d/5347.misc @@ -0,0 +1,2 @@ +Various improvements to debug logging. + diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 1dfa727fcfa9..99a586655b3a 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -31,7 +31,11 @@ def check_event_content_hash(event, hash_algorithm=hashlib.sha256): """Check whether the hash for this PDU matches the contents""" name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm) - logger.debug("Expecting hash: %s", encode_base64(expected_hash)) + logger.debug( + "Verifying content hash on %s (expecting: %s)", + event.event_id, + encode_base64(expected_hash), + ) # some malformed events lack a 'hashes'. Protect against it being missing # or a weird type by basically treating it the same as an unhashed event. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 8e1d666db112..e94e71bdad01 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -866,10 +866,6 @@ def _handle_key_deferred(verify_request): json_object = verify_request.json_object - logger.debug( - "Got key %s %s:%s for server %s, verifying" - % (key_id, verify_key.alg, verify_key.version, server_name) - ) try: verify_signed_json(json_object, server_name, verify_key) except SignatureVerifyException as e: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index d559605382f8..70573746d6c5 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -232,7 +232,8 @@ def get_pdu(self, destinations, event_id, room_version, outlier=False, moving to the next destination. None indicates no timeout. Returns: - Deferred: Results in the requested PDU. + Deferred: Results in the requested PDU, or None if we were unable to find + it. """ # TODO: Rate limit the number of times we try and get the same event. @@ -257,7 +258,12 @@ def get_pdu(self, destinations, event_id, room_version, outlier=False, destination, event_id, timeout=timeout, ) - logger.debug("transaction_data %r", transaction_data) + logger.debug( + "retrieved event id %s from %s: %r", + event_id, + destination, + transaction_data, + ) pdu_list = [ event_from_pdu_json(p, format_ver, outlier=outlier) @@ -331,7 +337,11 @@ def get_state_for_room(self, destination, room_id, event_id): ) if failed_to_fetch: - logger.warn("Failed to get %r", failed_to_fetch) + logger.warning( + "Failed to fetch missing state/auth events for %s: %s", + room_id, + failed_to_fetch + ) event_map = { ev.event_id: ev for ev in fetched_events From 14f13babb00d64009b11ef822ebe6fafe044eebd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jun 2019 10:38:25 +0100 Subject: [PATCH 121/231] Add a test room version where we enforce key validity (#5348) --- changelog.d/5348.bugfix | 1 + synapse/api/room_versions.py | 20 +++++++++----- synapse/federation/federation_base.py | 39 ++++++++++++++++----------- 3 files changed, 38 insertions(+), 22 deletions(-) create mode 100644 changelog.d/5348.bugfix diff --git a/changelog.d/5348.bugfix b/changelog.d/5348.bugfix new file mode 100644 index 000000000000..8d396c7990a7 --- /dev/null +++ b/changelog.d/5348.bugfix @@ -0,0 +1 @@ +Add a new room version where the timestamps on events are checked against the validity periods on signing keys. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 4085bd10b95f..501cdfb6a41a 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -50,6 +50,7 @@ class RoomVersion(object): disposition = attr.ib() # str; one of the RoomDispositions event_format = attr.ib() # int; one of the EventFormatVersions state_res = attr.ib() # int; one of the StateResolutionVersions + enforce_key_validity = attr.ib() # bool class RoomVersions(object): @@ -58,30 +59,35 @@ class RoomVersions(object): RoomDisposition.STABLE, EventFormatVersions.V1, StateResolutionVersions.V1, - ) - STATE_V2_TEST = RoomVersion( - "state-v2-test", - RoomDisposition.UNSTABLE, - EventFormatVersions.V1, - StateResolutionVersions.V2, + enforce_key_validity=False, ) V2 = RoomVersion( "2", RoomDisposition.STABLE, EventFormatVersions.V1, StateResolutionVersions.V2, + enforce_key_validity=False, ) V3 = RoomVersion( "3", RoomDisposition.STABLE, EventFormatVersions.V2, StateResolutionVersions.V2, + enforce_key_validity=False, ) V4 = RoomVersion( "4", RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, + enforce_key_validity=False, + ) + VDH_TEST_KEY_VALIDITY = RoomVersion( + "vdh-test-key-validity", + RoomDisposition.UNSTABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=False, ) @@ -90,7 +96,7 @@ class RoomVersions(object): RoomVersions.V1, RoomVersions.V2, RoomVersions.V3, - RoomVersions.STATE_V2_TEST, RoomVersions.V4, + RoomVersions.VDH_TEST_KEY_VALIDITY, ) } # type: dict[str, RoomVersion] diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 4b38f7c759f9..b541913d82bb 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -223,9 +223,6 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): the signatures are valid, or fail (with a SynapseError) if not. """ - # (currently this is written assuming the v1 room structure; we'll probably want a - # separate function for checking v2 rooms) - # we want to check that the event is signed by: # # (a) the sender's server @@ -257,6 +254,10 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): for p in pdus ] + v = KNOWN_ROOM_VERSIONS.get(room_version) + if not v: + raise RuntimeError("Unrecognized room version %s" % (room_version,)) + # First we check that the sender event is signed by the sender's domain # (except if its a 3pid invite, in which case it may be sent by any server) pdus_to_check_sender = [ @@ -264,10 +265,16 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): if not _is_invite_via_3pid(p.pdu) ] - more_deferreds = keyring.verify_json_objects_for_server([ - (p.sender_domain, p.redacted_pdu_json, 0) - for p in pdus_to_check_sender - ]) + more_deferreds = keyring.verify_json_objects_for_server( + [ + ( + p.sender_domain, + p.redacted_pdu_json, + p.pdu.origin_server_ts if v.enforce_key_validity else 0, + ) + for p in pdus_to_check_sender + ] + ) def sender_err(e, pdu_to_check): errmsg = "event id %s: unable to verify signature for sender %s: %s" % ( @@ -287,20 +294,22 @@ def sender_err(e, pdu_to_check): # event id's domain (normally only the case for joins/leaves), and add additional # checks. Only do this if the room version has a concept of event ID domain # (ie, the room version uses old-style non-hash event IDs). - v = KNOWN_ROOM_VERSIONS.get(room_version) - if not v: - raise RuntimeError("Unrecognized room version %s" % (room_version,)) - if v.event_format == EventFormatVersions.V1: pdus_to_check_event_id = [ p for p in pdus_to_check if p.sender_domain != get_domain_from_id(p.pdu.event_id) ] - more_deferreds = keyring.verify_json_objects_for_server([ - (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, 0) - for p in pdus_to_check_event_id - ]) + more_deferreds = keyring.verify_json_objects_for_server( + [ + ( + get_domain_from_id(p.pdu.event_id), + p.redacted_pdu_json, + p.pdu.origin_server_ts if v.enforce_key_validity else 0, + ) + for p in pdus_to_check_event_id + ] + ) def event_err(e, pdu_to_check): errmsg = ( From cea9750d112cf74790fb8c16482a0068717954d5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Jun 2019 16:12:57 +0100 Subject: [PATCH 122/231] Associate a request_name with each verify request, for logging Also: * rename VerifyKeyRequest->VerifyJsonRequest * calculate key_ids on VerifyJsonRequest construction * refactor things to pass around VerifyJsonRequests instead of 4-tuples --- changelog.d/5353.misc | 2 + synapse/crypto/keyring.py | 112 +++++++++++++++---------- synapse/federation/federation_base.py | 2 + synapse/federation/transport/server.py | 4 +- synapse/groups/attestations.py | 4 +- tests/crypto/test_keyring.py | 18 ++-- 6 files changed, 86 insertions(+), 56 deletions(-) create mode 100644 changelog.d/5353.misc diff --git a/changelog.d/5353.misc b/changelog.d/5353.misc new file mode 100644 index 000000000000..436245fb11aa --- /dev/null +++ b/changelog.d/5353.misc @@ -0,0 +1,2 @@ +Various improvements to debug logging. + diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index e94e71bdad01..2b6b5913bc7d 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -60,9 +60,9 @@ @attr.s(slots=True, cmp=False) -class VerifyKeyRequest(object): +class VerifyJsonRequest(object): """ - A request for a verify key to verify a JSON object. + A request to verify a JSON object. Attributes: server_name(str): The name of the server to verify against. @@ -85,11 +85,15 @@ class VerifyKeyRequest(object): """ server_name = attr.ib() - key_ids = attr.ib() json_object = attr.ib() minimum_valid_until_ts = attr.ib() + request_name = attr.ib() + key_ids = attr.ib(init=False) key_ready = attr.ib(default=attr.Factory(defer.Deferred)) + def __attrs_post_init__(self): + self.key_ids = signature_ids(self.json_object, self.server_name) + class KeyLookupError(ValueError): pass @@ -114,7 +118,9 @@ def __init__(self, hs, key_fetchers=None): # These are regular, logcontext-agnostic Deferreds. self.key_downloads = {} - def verify_json_for_server(self, server_name, json_object, validity_time): + def verify_json_for_server( + self, server_name, json_object, validity_time, request_name + ): """Verify that a JSON object has been signed by a given server Args: @@ -125,24 +131,31 @@ def verify_json_for_server(self, server_name, json_object, validity_time): validity_time (int): timestamp at which we require the signing key to be valid. (0 implies we don't care) + request_name (str): an identifier for this json object (eg, an event id) + for logging. + Returns: Deferred[None]: completes if the the object was correctly signed, otherwise errbacks with an error """ - req = server_name, json_object, validity_time - - return logcontext.make_deferred_yieldable( - self.verify_json_objects_for_server((req,))[0] - ) + req = VerifyJsonRequest(server_name, json_object, validity_time, request_name) + requests = (req,) + return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0]) def verify_json_objects_for_server(self, server_and_json): """Bulk verifies signatures of json objects, bulk fetching keys as necessary. Args: - server_and_json (iterable[Tuple[str, dict, int]): - Iterable of triplets of (server_name, json_object, validity_time) - validity_time is a timestamp at which the signing key must be valid. + server_and_json (iterable[Tuple[str, dict, int, str]): + Iterable of (server_name, json_object, validity_time, request_name) + tuples. + + validity_time is a timestamp at which the signing key must be + valid. + + request_name is an identifier for this json object (eg, an event id) + for logging. Returns: List: for each input triplet, a deferred indicating success @@ -150,38 +163,54 @@ def verify_json_objects_for_server(self, server_and_json): server_name. The deferreds run their callbacks in the sentinel logcontext. """ - # a list of VerifyKeyRequests - verify_requests = [] + return self._verify_objects( + VerifyJsonRequest(server_name, json_object, validity_time, request_name) + for server_name, json_object, validity_time, request_name in server_and_json + ) + + def _verify_objects(self, verify_requests): + """Does the work of verify_json_[objects_]for_server + + + Args: + verify_requests (iterable[VerifyJsonRequest]): + Iterable of verification requests. + + Returns: + List: for each input item, a deferred indicating success + or failure to verify each json object's signature for the given + server_name. The deferreds run their callbacks in the sentinel + logcontext. + """ + # a list of VerifyJsonRequests which are awaiting a key lookup + key_lookups = [] handle = preserve_fn(_handle_key_deferred) - def process(server_name, json_object, validity_time): + def process(verify_request): """Process an entry in the request list - Given a (server_name, json_object, validity_time) triplet from the request - list, adds a key request to verify_requests, and returns a deferred which + Adds a key request to key_lookups, and returns a deferred which will complete or fail (in the sentinel context) when verification completes. """ - key_ids = signature_ids(json_object, server_name) - - if not key_ids: + if not verify_request.key_ids: return defer.fail( SynapseError( - 400, "Not signed by %s" % (server_name,), Codes.UNAUTHORIZED + 400, + "Not signed by %s" % (verify_request.server_name,), + Codes.UNAUTHORIZED, ) ) logger.debug( - "Verifying for %s with key_ids %s, min_validity %i", - server_name, - key_ids, - validity_time, + "Verifying %s for %s with key_ids %s, min_validity %i", + verify_request.request_name, + verify_request.server_name, + verify_request.key_ids, + verify_request.minimum_valid_until_ts, ) # add the key request to the queue, but don't start it off yet. - verify_request = VerifyKeyRequest( - server_name, key_ids, json_object, validity_time - ) - verify_requests.append(verify_request) + key_lookups.append(verify_request) # now run _handle_key_deferred, which will wait for the key request # to complete and then do the verification. @@ -190,13 +219,10 @@ def process(server_name, json_object, validity_time): # wrap it with preserve_fn (aka run_in_background) return handle(verify_request) - results = [ - process(server_name, json_object, validity_time) - for server_name, json_object, validity_time in server_and_json - ] + results = [process(r) for r in verify_requests] - if verify_requests: - run_in_background(self._start_key_lookups, verify_requests) + if key_lookups: + run_in_background(self._start_key_lookups, key_lookups) return results @@ -207,7 +233,7 @@ def _start_key_lookups(self, verify_requests): Once each fetch completes, verify_request.key_ready will be resolved. Args: - verify_requests (List[VerifyKeyRequest]): + verify_requests (List[VerifyJsonRequest]): """ try: @@ -308,7 +334,7 @@ def _get_server_verify_keys(self, verify_requests): with a SynapseError if none of the keys are found. Args: - verify_requests (list[VerifyKeyRequest]): list of verify requests + verify_requests (list[VerifyJsonRequest]): list of verify requests """ remaining_requests = set( @@ -357,7 +383,7 @@ def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): Args: fetcher (KeyFetcher): fetcher to use to fetch the keys - remaining_requests (set[VerifyKeyRequest]): outstanding key requests. + remaining_requests (set[VerifyJsonRequest]): outstanding key requests. Any successfully-completed requests will be removed from the list. """ # dict[str, dict[str, int]]: keys to fetch. @@ -376,7 +402,7 @@ def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): # the requests. keys_for_server[key_id] = max( keys_for_server.get(key_id, -1), - verify_request.minimum_valid_until_ts + verify_request.minimum_valid_until_ts, ) results = yield fetcher.get_keys(missing_keys) @@ -386,7 +412,7 @@ def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): server_name = verify_request.server_name # see if any of the keys we got this time are sufficient to - # complete this VerifyKeyRequest. + # complete this VerifyJsonRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: fetch_key_result = result_keys.get(key_id) @@ -454,9 +480,7 @@ def __init__(self, hs): self.config = hs.get_config() @defer.inlineCallbacks - def process_v2_response( - self, from_server, response_json, time_added_ms - ): + def process_v2_response(self, from_server, response_json, time_added_ms): """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from @@ -852,7 +876,7 @@ def _handle_key_deferred(verify_request): """Waits for the key to become available, and then performs a verification Args: - verify_request (VerifyKeyRequest): + verify_request (VerifyJsonRequest): Returns: Deferred[None] diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index b541913d82bb..fc5cfb7d83f0 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -271,6 +271,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): p.sender_domain, p.redacted_pdu_json, p.pdu.origin_server_ts if v.enforce_key_validity else 0, + p.pdu.event_id, ) for p in pdus_to_check_sender ] @@ -306,6 +307,7 @@ def sender_err(e, pdu_to_check): get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, p.pdu.origin_server_ts if v.enforce_key_validity else 0, + p.pdu.event_id, ) for p in pdus_to_check_event_id ] diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 0db8858cf12d..949a5fb2aa6c 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -140,7 +140,9 @@ def authenticate_request(self, request, content): 401, "Missing Authorization headers", Codes.UNAUTHORIZED, ) - yield self.keyring.verify_json_for_server(origin, json_request, now) + yield self.keyring.verify_json_for_server( + origin, json_request, now, "Incoming request" + ) logger.info("Request from %s", origin) request.authenticated_entity = origin diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index fa6b641ee15c..e5dda1975f62 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -101,7 +101,9 @@ def verify_attestation(self, attestation, group_id, user_id, server_name=None): if valid_until_ms < now: raise SynapseError(400, "Attestation expired") - yield self.keyring.verify_json_for_server(server_name, attestation, now) + yield self.keyring.verify_json_for_server( + server_name, attestation, now, "Group attestation" + ) def create_attestation(self, group_id, user_id): """Create an attestation for the group_id and user_id with default diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 4cff7e36c82f..18121f4f6c6d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -134,7 +134,7 @@ def first_lookup(): context_11.request = "11" res_deferreds = kr.verify_json_objects_for_server( - [("server10", json1, 0), ("server11", {}, 0)] + [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")] ) # the unsigned json should be rejected pretty quickly @@ -171,7 +171,7 @@ def second_lookup(): self.http_client.post_json.return_value = defer.Deferred() res_deferreds_2 = kr.verify_json_objects_for_server( - [("server10", json1, 0)] + [("server10", json1, 0, "test")] ) res_deferreds_2[0].addBoth(self.check_context, None) yield logcontext.make_deferred_yieldable(res_deferreds_2[0]) @@ -205,11 +205,11 @@ def test_verify_json_for_server(self): signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}, 0) + d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned") self.failureResultOf(d, SynapseError) # should suceed on a signed object - d = _verify_json_for_server(kr, "server9", json1, 500) + d = _verify_json_for_server(kr, "server9", json1, 500, "test signed") # self.assertFalse(d.called) self.get_success(d) @@ -239,7 +239,7 @@ def get_keys(keys_to_fetch): # the first request should succeed; the second should fail because the key # has expired results = kr.verify_json_objects_for_server( - [("server1", json1, 500), ("server1", json1, 1500)] + [("server1", json1, 500, "test1"), ("server1", json1, 1500, "test2")] ) self.assertEqual(len(results), 2) self.get_success(results[0]) @@ -284,7 +284,7 @@ def get_keys2(keys_to_fetch): signedjson.sign.sign_json(json1, "server1", key1) results = kr.verify_json_objects_for_server( - [("server1", json1, 1200), ("server1", json1, 1500)] + [("server1", json1, 1200, "test1"), ("server1", json1, 1500, "test2")] ) self.assertEqual(len(results), 2) self.get_success(results[0]) @@ -522,16 +522,14 @@ def run_in_context(f, *args, **kwargs): defer.returnValue(rv) -def _verify_json_for_server(keyring, server_name, json_object, validity_time): +def _verify_json_for_server(kr, *args): """thin wrapper around verify_json_for_server which makes sure it is wrapped with the patched defer.inlineCallbacks. """ @defer.inlineCallbacks def v(): - rv1 = yield keyring.verify_json_for_server( - server_name, json_object, validity_time - ) + rv1 = yield kr.verify_json_for_server(*args) defer.returnValue(rv1) return run_in_context(v) From d18e4ea0d46a9390a75b270fe5f17dc3bc23f29a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 10:58:51 +0100 Subject: [PATCH 123/231] Implement room v5 which enforces signing key validity Implements [MSC2077](https://github.com/matrix-org/matrix-doc/pull/2077) and fixes #5247 and #4364. --- changelog.d/5354.bugfix | 2 ++ synapse/api/room_versions.py | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5354.bugfix diff --git a/changelog.d/5354.bugfix b/changelog.d/5354.bugfix new file mode 100644 index 000000000000..0c56032b3080 --- /dev/null +++ b/changelog.d/5354.bugfix @@ -0,0 +1,2 @@ +Add a new room version where the timestamps on events are checked against the validity periods on signing keys. + diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 501cdfb6a41a..d644803d3843 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -82,12 +82,12 @@ class RoomVersions(object): StateResolutionVersions.V2, enforce_key_validity=False, ) - VDH_TEST_KEY_VALIDITY = RoomVersion( - "vdh-test-key-validity", - RoomDisposition.UNSTABLE, + V5 = RoomVersion( + "5", + RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, - enforce_key_validity=False, + enforce_key_validity=True, ) @@ -97,6 +97,6 @@ class RoomVersions(object): RoomVersions.V2, RoomVersions.V3, RoomVersions.V4, - RoomVersions.VDH_TEST_KEY_VALIDITY, + RoomVersions.V5, ) } # type: dict[str, RoomVersion] From bc3d6b918b62c3dd6ce96eba638cf4601126e2f9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Jun 2019 11:31:27 +0100 Subject: [PATCH 124/231] Add logging when request fails and clarify we ignore errors. --- synapse/handlers/federation.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index fa735efedd00..ac5ca791431a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2034,8 +2034,14 @@ def do_auth(self, origin, event, context, auth_events): ) except Exception: # We don't really mind if the above fails, so lets not fail - # processing if it does. - logger.exception("Failed to call _update_auth_events_and_context_for_auth") + # processing if it does. However, it really shouldn't fail so + # let's still log as an exception since we'll still want to fix + # any bugs. + logger.exception( + "Failed to double check auth events for %s with remote. " + "Ignoring failure and continuing processing of event.", + event.event_id, + ) try: self.auth.check(room_version, event, auth_events=auth_events) @@ -2108,9 +2114,10 @@ def _update_auth_events_and_context_for_auth( remote_auth_chain = yield self.federation_client.get_event_auth( origin, event.room_id, event.event_id ) - except RequestSendFailed: + except RequestSendFailed as e: # The other side isn't around or doesn't implement the # endpoint, so lets just bail out. + logger.info("Failed to get event auth from remote: %s", e) return seen_remotes = yield self.store.have_seen_events( @@ -2264,9 +2271,10 @@ def _update_auth_events_and_context_for_auth( event.event_id, local_auth_chain, ) - except RequestSendFailed: + except RequestSendFailed as e: # The other side isn't around or doesn't implement the # endpoint, so lets just bail out. + logger.info("Failed to query auth from remote: %s", e) return seen_remotes = yield self.store.have_seen_events( From dbbaf25dd3ce1c2b784cc3347142073cc586988d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 11:50:27 +0100 Subject: [PATCH 125/231] Do user_id != me checks before deciding whether we should pick heroes from the joined members or the parted ones --- synapse/handlers/sync.py | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1ee9a6e313f0..bbf74027acd3 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -598,15 +598,28 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): if canonical_alias and canonical_alias.content: defer.returnValue(summary) + me = sync_config.user.to_string() + joined_user_ids = [ - r[0] for r in details.get(Membership.JOIN, empty_ms).members + r[0] + for r in details.get(Membership.JOIN, empty_ms).members + if r[0] != me ] invited_user_ids = [ - r[0] for r in details.get(Membership.INVITE, empty_ms).members + r[0] + for r in details.get(Membership.INVITE, empty_ms).members + if r[0] != me ] gone_user_ids = ( - [r[0] for r in details.get(Membership.LEAVE, empty_ms).members] + - [r[0] for r in details.get(Membership.BAN, empty_ms).members] + [ + r[0] + for r in details.get(Membership.LEAVE, empty_ms).members + if r[0] != me + ] + [ + r[0] + for r in details.get(Membership.BAN, empty_ms).members + if r[0] != me + ] ) # FIXME: only build up a member_ids list for our heroes @@ -621,22 +634,13 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): member_ids[user_id] = event_id # FIXME: order by stream ordering rather than as returned by SQL - me = sync_config.user.to_string() if (joined_user_ids or invited_user_ids): summary['m.heroes'] = sorted( - [ - user_id - for user_id in (joined_user_ids + invited_user_ids) - if user_id != me - ] + [user_id for user_id in (joined_user_ids + invited_user_ids)] )[0:5] else: summary['m.heroes'] = sorted( - [ - user_id - for user_id in gone_user_ids - if user_id != me - ] + [user_id for user_id in gone_user_ids] )[0:5] if not sync_config.filter_collection.lazy_load_members(): From a412be2bc796d90c28941960aa29012dcc171ba1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 11:53:50 +0100 Subject: [PATCH 126/231] Changelog --- changelog.d/5355.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5355.bugfix diff --git a/changelog.d/5355.bugfix b/changelog.d/5355.bugfix new file mode 100644 index 000000000000..5de469e867bb --- /dev/null +++ b/changelog.d/5355.bugfix @@ -0,0 +1 @@ +Include left members in room summaries' heroes From 804f26a9ffb4a5f90acdf3d8e7e869fcecf33942 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 12:03:01 +0100 Subject: [PATCH 127/231] Properly format the changelog --- changelog.d/5355.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5355.bugfix b/changelog.d/5355.bugfix index 5de469e867bb..e1955a7403dd 100644 --- a/changelog.d/5355.bugfix +++ b/changelog.d/5355.bugfix @@ -1 +1 @@ -Include left members in room summaries' heroes +Include left members in room summaries' heroes. From 26713515de97c98dda99a9b06325781fe09b1cbe Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Jun 2019 13:16:23 +0100 Subject: [PATCH 128/231] Neilj/mau tracking config explainer (#5284) Improve documentation of monthly active user blocking and mau_trial_days --- changelog.d/5284.misc | 1 + docs/sample_config.yaml | 16 ++++++++++++++++ synapse/config/server.py | 16 ++++++++++++++++ 3 files changed, 33 insertions(+) create mode 100644 changelog.d/5284.misc diff --git a/changelog.d/5284.misc b/changelog.d/5284.misc new file mode 100644 index 000000000000..c4d42ca3d9e0 --- /dev/null +++ b/changelog.d/5284.misc @@ -0,0 +1 @@ +Improve sample config for monthly active user blocking. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 493ea9ee9e1f..0960b9b5ed23 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -261,6 +261,22 @@ listeners: # Monthly Active User Blocking # +# Used in cases where the admin or server owner wants to limit to the +# number of monthly active users. +# +# 'limit_usage_by_mau' disables/enables monthly active user blocking. When +# anabled and a limit is reached the server returns a 'ResourceLimitError' +# with error type Codes.RESOURCE_LIMIT_EXCEEDED +# +# 'max_mau_value' is the hard limit of monthly active users above which +# the server will start blocking user actions. +# +# 'mau_trial_days' is a means to add a grace period for active users. It +# means that users must be active for this number of days before they +# can be considered active and guards against the case where lots of users +# sign up in a short space of time never to return after their initial +# session. +# #limit_usage_by_mau: False #max_mau_value: 50 #mau_trial_days: 2 diff --git a/synapse/config/server.py b/synapse/config/server.py index e763e19e15a4..334921d42104 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -585,6 +585,22 @@ def default_config(self, server_name, data_dir_path, **kwargs): # Monthly Active User Blocking # + # Used in cases where the admin or server owner wants to limit to the + # number of monthly active users. + # + # 'limit_usage_by_mau' disables/enables monthly active user blocking. When + # anabled and a limit is reached the server returns a 'ResourceLimitError' + # with error type Codes.RESOURCE_LIMIT_EXCEEDED + # + # 'max_mau_value' is the hard limit of monthly active users above which + # the server will start blocking user actions. + # + # 'mau_trial_days' is a means to add a grace period for active users. It + # means that users must be active for this number of days before they + # can be considered active and guards against the case where lots of users + # sign up in a short space of time never to return after their initial + # session. + # #limit_usage_by_mau: False #max_mau_value: 50 #mau_trial_days: 2 From 4650526b5ebf699920ebf9ecfdf13797c189a922 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Jun 2019 13:47:03 +0100 Subject: [PATCH 129/231] Neilj/changelog clean up (#5356) * group together key validity refactors --- changelog.d/5232.misc | 2 +- changelog.d/5234.misc | 2 +- changelog.d/5235.misc | 2 +- changelog.d/5236.misc | 2 +- changelog.d/5237.misc | 2 +- changelog.d/5244.misc | 2 +- changelog.d/5250.misc | 2 +- changelog.d/5296.misc | 2 +- changelog.d/5299.misc | 2 +- changelog.d/5343.misc | 2 +- changelog.d/5347.misc | 3 +-- changelog.d/5356.misc | 1 + 12 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5356.misc diff --git a/changelog.d/5232.misc b/changelog.d/5232.misc index 1cdc71f09511..8336bc55dc48 100644 --- a/changelog.d/5232.misc +++ b/changelog.d/5232.misc @@ -1 +1 @@ -Run black on synapse.crypto.keyring. +Preparatory work for key-validity features. diff --git a/changelog.d/5234.misc b/changelog.d/5234.misc index 43fbd6f67c59..8336bc55dc48 100644 --- a/changelog.d/5234.misc +++ b/changelog.d/5234.misc @@ -1 +1 @@ -Rewrite store_server_verify_key to store several keys at once. +Preparatory work for key-validity features. diff --git a/changelog.d/5235.misc b/changelog.d/5235.misc index 2296ad2a4f46..8336bc55dc48 100644 --- a/changelog.d/5235.misc +++ b/changelog.d/5235.misc @@ -1 +1 @@ -Remove unused VerifyKey.expired and .time_added fields. +Preparatory work for key-validity features. diff --git a/changelog.d/5236.misc b/changelog.d/5236.misc index cb4417a9f45f..8336bc55dc48 100644 --- a/changelog.d/5236.misc +++ b/changelog.d/5236.misc @@ -1 +1 @@ -Simplify Keyring.process_v2_response. \ No newline at end of file +Preparatory work for key-validity features. diff --git a/changelog.d/5237.misc b/changelog.d/5237.misc index f4fe3b821bf6..8336bc55dc48 100644 --- a/changelog.d/5237.misc +++ b/changelog.d/5237.misc @@ -1 +1 @@ -Store key validity time in the storage layer. +Preparatory work for key-validity features. diff --git a/changelog.d/5244.misc b/changelog.d/5244.misc index 9cc1fb869de0..8336bc55dc48 100644 --- a/changelog.d/5244.misc +++ b/changelog.d/5244.misc @@ -1 +1 @@ -Refactor synapse.crypto.keyring to use a KeyFetcher interface. +Preparatory work for key-validity features. diff --git a/changelog.d/5250.misc b/changelog.d/5250.misc index 575a299a8214..8336bc55dc48 100644 --- a/changelog.d/5250.misc +++ b/changelog.d/5250.misc @@ -1 +1 @@ -Simplification to Keyring.wait_for_previous_lookups. +Preparatory work for key-validity features. diff --git a/changelog.d/5296.misc b/changelog.d/5296.misc index a038a6f7f64b..8336bc55dc48 100644 --- a/changelog.d/5296.misc +++ b/changelog.d/5296.misc @@ -1 +1 @@ -Refactor keyring.VerifyKeyRequest to use attr.s. +Preparatory work for key-validity features. diff --git a/changelog.d/5299.misc b/changelog.d/5299.misc index 53297c768b95..8336bc55dc48 100644 --- a/changelog.d/5299.misc +++ b/changelog.d/5299.misc @@ -1 +1 @@ -Rewrite get_server_verify_keys, again. +Preparatory work for key-validity features. diff --git a/changelog.d/5343.misc b/changelog.d/5343.misc index dbee0f71b9d8..8336bc55dc48 100644 --- a/changelog.d/5343.misc +++ b/changelog.d/5343.misc @@ -1 +1 @@ -Rename VerifyKeyRequest.deferred field. +Preparatory work for key-validity features. diff --git a/changelog.d/5347.misc b/changelog.d/5347.misc index 436245fb11aa..8336bc55dc48 100644 --- a/changelog.d/5347.misc +++ b/changelog.d/5347.misc @@ -1,2 +1 @@ -Various improvements to debug logging. - +Preparatory work for key-validity features. diff --git a/changelog.d/5356.misc b/changelog.d/5356.misc new file mode 100644 index 000000000000..8336bc55dc48 --- /dev/null +++ b/changelog.d/5356.misc @@ -0,0 +1 @@ +Preparatory work for key-validity features. From a4cf2c1184f137bc52c2b12ef32876a1cb10801f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 14:00:18 +0100 Subject: [PATCH 130/231] Rewrite changelog --- changelog.d/5325.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5325.bugfix b/changelog.d/5325.bugfix index 6914398bccef..b9413388f5bb 100644 --- a/changelog.d/5325.bugfix +++ b/changelog.d/5325.bugfix @@ -1 +1 @@ -Add account_validity's email_sent column to the list of boolean columns in synapse_port_db. +Fix a bug where running synapse_port_db would cause the account validity feature to fail because it didn't set the type of the email_sent column to boolean. From 0a2f5226441936bab45ed4bc69836a008f69249a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 14:02:29 +0100 Subject: [PATCH 131/231] Simplify condition --- synapse/handlers/sync.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 72997d6d0412..78318aacd808 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -587,15 +587,14 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): # for the "name" value and default to an empty string. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content and name.content.get("name", ""): + if name and name.content.get("name", ""): defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if (canonical_alias and canonical_alias.content - and canonical_alias.content.get("alias", "")): + if canonical_alias and canonical_alias.content.get("alias", ""): defer.returnValue(summary) joined_user_ids = [ From e2dfb922e1334e4a506a9d678d0f1bf573cc95e6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 14:16:07 +0100 Subject: [PATCH 132/231] Validate federation server TLS certificates by default. --- changelog.d/5359.feature | 1 + synapse/config/tls.py | 10 +++++----- .../http/federation/test_matrix_federation_agent.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5359.feature diff --git a/changelog.d/5359.feature b/changelog.d/5359.feature new file mode 100644 index 000000000000..2a0393983485 --- /dev/null +++ b/changelog.d/5359.feature @@ -0,0 +1 @@ +Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 72dd5926f9f4..43712b8213e0 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -74,7 +74,7 @@ def read_config(self, config): # Whether to verify certificates on outbound federation traffic self.federation_verify_certificates = config.get( - "federation_verify_certificates", False, + "federation_verify_certificates", True, ) # Whitelist of domains to not verify certificates for @@ -241,12 +241,12 @@ def default_config(self, config_dir_path, server_name, **kwargs): # #tls_private_key_path: "%(tls_private_key_path)s" - # Whether to verify TLS certificates when sending federation traffic. + # Whether to verify TLS server certificates for outbound federation requests. # - # This currently defaults to `false`, however this will change in - # Synapse 1.0 when valid federation certificates will be required. + # Defaults to `true`. To disable certificate verification, uncomment the + # following line. # - #federation_verify_certificates: true + #federation_verify_certificates: false # Skip federation certificate verification on the following whitelist # of domains. diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index ed0ca079d9d1..4153da4da7fa 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -27,6 +27,7 @@ from twisted.web.http_headers import Headers from twisted.web.iweb import IPolicyForHTTPS +from synapse.config.homeserver import HomeServerConfig from synapse.crypto.context_factory import ClientTLSOptionsFactory from synapse.http.federation.matrix_federation_agent import ( MatrixFederationAgent, @@ -52,11 +53,16 @@ def setUp(self): self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) + # for now, we disable cert verification for the test, since the cert we + # present will not be trusted. We should do better here, though. + config_dict = default_config("test", parse=False) + config_dict["federation_verify_certificates"] = False + config = HomeServerConfig() + config.parse_config_dict(config_dict) + self.agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory( - default_config("test", parse=True) - ), + tls_client_options_factory=ClientTLSOptionsFactory(config), _well_known_tls_policy=TrustingTLSPolicyForHTTPS(), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, From 95ab2eb4a1e9757bfe881abacce6ff81b3dbc371 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jun 2019 15:12:33 +0100 Subject: [PATCH 133/231] Fix notes about well-known and acme (#5357) fixes #4951 --- changelog.d/5357.doc | 1 + docs/MSC1711_certificates_FAQ.md | 11 +++++------ 2 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 changelog.d/5357.doc diff --git a/changelog.d/5357.doc b/changelog.d/5357.doc new file mode 100644 index 000000000000..27cba49641ff --- /dev/null +++ b/changelog.d/5357.doc @@ -0,0 +1 @@ +Fix notes about ACME in the MSC1711 faq. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index ebfb20f5c86c..37f7f669c948 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -145,12 +145,11 @@ You can do this with a `.well-known` file as follows: 1. Keep the SRV record in place - it is needed for backwards compatibility with Synapse 0.34 and earlier. - 2. Give synapse a certificate corresponding to the target domain - (`customer.example.net` in the above example). Currently Synapse's ACME - support [does not support - this](https://github.com/matrix-org/synapse/issues/4552), so you will have - to acquire a certificate yourself and give it to Synapse via - `tls_certificate_path` and `tls_private_key_path`. + 2. Give Synapse a certificate corresponding to the target domain + (`customer.example.net` in the above example). You can either use Synapse's + built-in [ACME support](./ACME.md) for this (via the `domain` parameter in + the `acme` section), or acquire a certificate yourself and give it to + Synapse via `tls_certificate_path` and `tls_private_key_path`. 3. Restart Synapse to ensure the new certificate is loaded. From b4f1cd31f49dff94b4006a6eeb3b2c27eb003dd4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 15:30:10 +0100 Subject: [PATCH 134/231] Update sample config --- docs/sample_config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 493ea9ee9e1f..d10a355d68c5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -313,12 +313,12 @@ listeners: # #tls_private_key_path: "CONFDIR/SERVERNAME.tls.key" -# Whether to verify TLS certificates when sending federation traffic. +# Whether to verify TLS server certificates for outbound federation requests. # -# This currently defaults to `false`, however this will change in -# Synapse 1.0 when valid federation certificates will be required. +# Defaults to `true`. To disable certificate verification, uncomment the +# following line. # -#federation_verify_certificates: true +#federation_verify_certificates: false # Skip federation certificate verification on the following whitelist # of domains. From fb98c05e0373a517f74568f83634c5b579812571 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 15:32:06 +0100 Subject: [PATCH 135/231] add a script to generate new signing_key files --- changelog.d/5361.feature | 1 + scripts/generate_signing_key.py | 37 +++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 changelog.d/5361.feature create mode 100755 scripts/generate_signing_key.py diff --git a/changelog.d/5361.feature b/changelog.d/5361.feature new file mode 100644 index 000000000000..10768cdad3c4 --- /dev/null +++ b/changelog.d/5361.feature @@ -0,0 +1 @@ +Add a script to generate new signing-key files. diff --git a/scripts/generate_signing_key.py b/scripts/generate_signing_key.py new file mode 100755 index 000000000000..ba3ba9739574 --- /dev/null +++ b/scripts/generate_signing_key.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import sys + +from signedjson.key import write_signing_keys, generate_signing_key + +from synapse.util.stringutils import random_string + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "-o", "--output_file", + + type=argparse.FileType('w'), + default=sys.stdout, + help="Where to write the output to", + ) + args = parser.parse_args() + + key_id = "a_" + random_string(4) + key = generate_signing_key(key_id), + write_signing_keys(args.output_file, key) From 75538813fcd0403ec8915484a813b99e6eb256c6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Jun 2019 15:45:46 +0100 Subject: [PATCH 136/231] Fix background updates to handle redactions/rejections (#5352) * Fix background updates to handle redactions/rejections In background updates based on current state delta stream we need to handle that we may not have all the events (or at least that `get_events` may raise an exception). --- changelog.d/5352.bugfix | 1 + synapse/handlers/presence.py | 11 +++--- synapse/handlers/stats.py | 18 +++++++--- synapse/storage/events_worker.py | 37 +++++++++++++++++++ tests/handlers/test_stats.py | 62 ++++++++++++++++++++++++++++++-- 5 files changed, 117 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5352.bugfix diff --git a/changelog.d/5352.bugfix b/changelog.d/5352.bugfix new file mode 100644 index 000000000000..2ffefe5a6846 --- /dev/null +++ b/changelog.d/5352.bugfix @@ -0,0 +1 @@ +Fix room stats and presence background updates to correctly handle missing events. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6209858bbb9a..e49c8203efc6 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -828,14 +828,17 @@ def _handle_state_delta(self, deltas): # joins. continue - event = yield self.store.get_event(event_id) - if event.content.get("membership") != Membership.JOIN: + event = yield self.store.get_event(event_id, allow_none=True) + if not event or event.content.get("membership") != Membership.JOIN: # We only care about joins continue if prev_event_id: - prev_event = yield self.store.get_event(prev_event_id) - if prev_event.content.get("membership") == Membership.JOIN: + prev_event = yield self.store.get_event(prev_event_id, allow_none=True) + if ( + prev_event + and prev_event.content.get("membership") == Membership.JOIN + ): # Ignore changes to join events. continue diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 0e92b405ba6f..7ad16c85665e 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -115,6 +115,7 @@ def _handle_deltas(self, deltas): event_id = delta["event_id"] stream_id = delta["stream_id"] prev_event_id = delta["prev_event_id"] + stream_pos = delta["stream_id"] logger.debug("Handling: %r %r, %s", typ, state_key, event_id) @@ -136,10 +137,15 @@ def _handle_deltas(self, deltas): event_content = {} if event_id is not None: - event_content = (yield self.store.get_event(event_id)).content or {} + event = yield self.store.get_event(event_id, allow_none=True) + if event: + event_content = event.content or {} + + # We use stream_pos here rather than fetch by event_id as event_id + # may be None + now = yield self.store.get_received_ts_by_stream_pos(stream_pos) # quantise time to the nearest bucket - now = yield self.store.get_received_ts(event_id) now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size if typ == EventTypes.Member: @@ -149,9 +155,11 @@ def _handle_deltas(self, deltas): # compare them. prev_event_content = {} if prev_event_id is not None: - prev_event_content = ( - yield self.store.get_event(prev_event_id) - ).content + prev_event = yield self.store.get_event( + prev_event_id, allow_none=True, + ) + if prev_event: + prev_event_content = prev_event.content membership = event_content.get("membership", Membership.LEAVE) prev_membership = prev_event_content.get("membership", Membership.LEAVE) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 17824280485a..cc7df5cf14df 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -78,6 +78,43 @@ def get_received_ts(self, event_id): desc="get_received_ts", ) + def get_received_ts_by_stream_pos(self, stream_ordering): + """Given a stream ordering get an approximate timestamp of when it + happened. + + This is done by simply taking the received ts of the first event that + has a stream ordering greater than or equal to the given stream pos. + If none exists returns the current time, on the assumption that it must + have happened recently. + + Args: + stream_ordering (int) + + Returns: + Deferred[int] + """ + + def _get_approximate_received_ts_txn(txn): + sql = """ + SELECT received_ts FROM events + WHERE stream_ordering >= ? + LIMIT 1 + """ + + txn.execute(sql, (stream_ordering,)) + row = txn.fetchone() + if row and row[0]: + ts = row[0] + else: + ts = self.clock.time_msec() + + return ts + + return self.runInteraction( + "get_approximate_received_ts", + _get_approximate_received_ts_txn, + ) + @defer.inlineCallbacks def get_event( self, diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index 249aba3d598f..2710c991cfec 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -204,7 +204,7 @@ def test_incorrect_state_transition(self): "a2": {"membership": "not a real thing"}, } - def get_event(event_id): + def get_event(event_id, allow_none=True): m = Mock() m.content = events[event_id] d = defer.Deferred() @@ -224,7 +224,7 @@ def get_received_ts(event_id): "room_id": "room", "event_id": "a1", "prev_event_id": "a2", - "stream_id": "bleb", + "stream_id": 60, } ] @@ -241,7 +241,7 @@ def get_received_ts(event_id): "room_id": "room", "event_id": "a2", "prev_event_id": "a1", - "stream_id": "bleb", + "stream_id": 100, } ] @@ -249,3 +249,59 @@ def get_received_ts(event_id): self.assertEqual( f.value.args[0], "'not a real thing' is not a valid membership" ) + + def test_redacted_prev_event(self): + """ + If the prev_event does not exist, then it is assumed to be a LEAVE. + """ + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + + # Do the initial population of the user directory via the background update + self._add_background_updates() + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + events = { + "a1": None, + "a2": {"membership": Membership.JOIN}, + } + + def get_event(event_id, allow_none=True): + if events.get(event_id): + m = Mock() + m.content = events[event_id] + else: + m = None + d = defer.Deferred() + self.reactor.callLater(0.0, d.callback, m) + return d + + def get_received_ts(event_id): + return defer.succeed(1) + + self.store.get_received_ts = get_received_ts + self.store.get_event = get_event + + deltas = [ + { + "type": EventTypes.Member, + "state_key": "some_user:test", + "room_id": room_1, + "event_id": "a2", + "prev_event_id": "a1", + "stream_id": 100, + } + ] + + # Handle our fake deltas, which has a user going from LEAVE -> JOIN. + self.get_success(self.handler._handle_deltas(deltas)) + + # One delta, with two joined members -- the room creator, and our fake + # user. + r = self.get_success(self.store.get_deltas_for_room(room_1, 0)) + self.assertEqual(len(r), 1) + self.assertEqual(r[0]["joined_members"], 2) From 94f6c674df8035d44e7219193377f77afdfa6669 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Jun 2019 16:11:31 +0100 Subject: [PATCH 137/231] Neilj/add r0.5 to versions (#5360) * Update _matrix/client/versions to reference support for r0.5.0 --- changelog.d/5360.feature | 1 + synapse/rest/client/versions.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/5360.feature diff --git a/changelog.d/5360.feature b/changelog.d/5360.feature new file mode 100644 index 000000000000..01fbb3b06d9e --- /dev/null +++ b/changelog.d/5360.feature @@ -0,0 +1 @@ +Update /_matrix/client/versions to reference support for r0.5.0. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 27e7cbf3cc00..babbf6a23ce8 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -39,6 +39,7 @@ def on_GET(self, request): "r0.2.0", "r0.3.0", "r0.4.0", + "r0.5.0", ], # as per MSC1497: "unstable_features": { From f8a45302c9ce147d7797ceb9e3757bd3b2af6b99 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 16:16:33 +0100 Subject: [PATCH 138/231] Fix `federation_custom_ca_list` configuration option. Previously, setting this option would cause an exception at startup. --- changelog.d/5362.bugfix | 1 + synapse/config/tls.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5362.bugfix diff --git a/changelog.d/5362.bugfix b/changelog.d/5362.bugfix new file mode 100644 index 000000000000..1c8b19182cb6 --- /dev/null +++ b/changelog.d/5362.bugfix @@ -0,0 +1 @@ +Fix `federation_custom_ca_list` configuration option. diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 72dd5926f9f4..94a53d05f971 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -107,7 +107,7 @@ def read_config(self, config): certs = [] for ca_file in custom_ca_list: logger.debug("Reading custom CA certificate file: %s", ca_file) - content = self.read_file(ca_file) + content = self.read_file(ca_file, "federation_custom_ca_list") # Parse the CA certificates try: From fe13bd52acb67de56fb5e1866d0ec64fff10ed94 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 16:35:05 +0100 Subject: [PATCH 139/231] Don't check whether the user's account is expired on /send_mail requests --- synapse/api/auth.py | 10 ++++-- .../rest/client/v2_alpha/account_validity.py | 2 +- tests/rest/client/v2_alpha/test_register.py | 35 +++++++++++++++++++ 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 0c6c93a87b92..e24d942553f6 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -184,7 +184,13 @@ def get_public_keys(self, invite_event): return event_auth.get_public_keys(invite_event) @defer.inlineCallbacks - def get_user_by_req(self, request, allow_guest=False, rights="access"): + def get_user_by_req( + self, + request, + allow_guest=False, + rights="access", + allow_expired=False, + ): """ Get a registered user's ID. Args: @@ -229,7 +235,7 @@ def get_user_by_req(self, request, allow_guest=False, rights="access"): is_guest = user_info["is_guest"] # Deny the request if the user account has expired. - if self._account_validity.enabled: + if self._account_validity.enabled and not allow_expired: user_id = user.to_string() expiration_ts = yield self.store.get_expiration_ts_for_user(user_id) if expiration_ts is not None and self.clock.time_msec() >= expiration_ts: diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index fc8dbeb617f0..9bc1e208ca5b 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -79,7 +79,7 @@ def on_POST(self, request): if not self.account_validity.renew_by_email_enabled: raise AuthError(403, "Account renewal via email is disabled on this server.") - requester = yield self.auth.get_user_by_req(request) + requester = yield self.auth.get_user_by_req(request, allow_expired=True) user_id = requester.user.to_string() yield self.account_activity_handler.send_renewal_email_to_user(user_id) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index d4a1d4d50c8d..77a2923af6b3 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -427,6 +427,41 @@ def test_manual_email_send(self): self.assertEqual(len(self.email_attempts), 1) + def test_manual_email_send_expired_account(self): + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + + # We need to manually add an email address otherwise the handler will do + # nothing. + now = self.hs.clock.time_msec() + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address="kermit@example.com", + validated_at=now, + added_at=now, + ) + ) + + # Make the account expire. + self.reactor.advance(datetime.timedelta(days=8).total_seconds()) + + # Ignore all emails sent by the automatic background task and only focus on the + # ones sent manually. + self.email_attempts = [] + + # Test that we're still able to manually trigger a mail to be sent. + request, channel = self.make_request( + b"POST", + "/_matrix/client/unstable/account_validity/send_mail", + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.assertEqual(len(self.email_attempts), 1) + class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): From d51ca9d9b3856d60af67ceac05df98347838a221 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 16:38:51 +0100 Subject: [PATCH 140/231] Changelog --- changelog.d/5363.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5363.feature diff --git a/changelog.d/5363.feature b/changelog.d/5363.feature new file mode 100644 index 000000000000..179a789fd733 --- /dev/null +++ b/changelog.d/5363.feature @@ -0,0 +1 @@ +Allow expired user to trigger renewal email sending manually From ccbc9e5e17b59661d5f1b67050927c2fb69a0a89 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 16:41:26 +0100 Subject: [PATCH 141/231] Gah towncrier --- changelog.d/5363.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5363.feature b/changelog.d/5363.feature index 179a789fd733..803fe3fc3793 100644 --- a/changelog.d/5363.feature +++ b/changelog.d/5363.feature @@ -1 +1 @@ -Allow expired user to trigger renewal email sending manually +Allow expired user to trigger renewal email sending manually. From 6362e3af14cff28bc51d0e66d207b84bae7fd422 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 6 Jun 2019 04:20:35 +1000 Subject: [PATCH 142/231] add more comments --- synapse/storage/schema/full_schemas/README.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/storage/schema/full_schemas/README.txt b/synapse/storage/schema/full_schemas/README.txt index df49f9b39e25..d3f640134408 100644 --- a/synapse/storage/schema/full_schemas/README.txt +++ b/synapse/storage/schema/full_schemas/README.txt @@ -1,6 +1,8 @@ Building full schema dumps ========================== +These schemas need to be made from a database that has had all background updates run. + Postgres -------- @@ -11,4 +13,7 @@ SQLite $ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite +After +----- + Delete the CREATE statements for "sqlite_stat1", "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file From 64fa9287920fd391090a607a9bfd3da5415aee16 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 6 Jun 2019 10:34:12 +0100 Subject: [PATCH 143/231] Simplify condition --- synapse/handlers/sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 78318aacd808..b878ce11dc3f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -587,14 +587,14 @@ def compute_summary(self, room_id, sync_config, batch, state, now_token): # for the "name" value and default to an empty string. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content.get("name", ""): + if name and name.content.get("name"): defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if canonical_alias and canonical_alias.content.get("alias", ""): + if canonical_alias and canonical_alias.content.get("alias"): defer.returnValue(summary) joined_user_ids = [ From 7898a1a48de18302bab06e985cc72ee67908d609 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 6 Jun 2019 10:34:33 +0100 Subject: [PATCH 144/231] Add credit in the changelog --- changelog.d/5084.bugfix | 1 - changelog.d/5089.bugfix | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/5084.bugfix create mode 100644 changelog.d/5089.bugfix diff --git a/changelog.d/5084.bugfix b/changelog.d/5084.bugfix deleted file mode 100644 index 9d8434460c09..000000000000 --- a/changelog.d/5084.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. diff --git a/changelog.d/5089.bugfix b/changelog.d/5089.bugfix new file mode 100644 index 000000000000..68643cebb7b9 --- /dev/null +++ b/changelog.d/5089.bugfix @@ -0,0 +1 @@ +Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! From 7f08a3523a2018b66cd96cccf30602c8f687b495 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Jun 2019 11:09:38 +0100 Subject: [PATCH 145/231] Better words --- synapse/storage/stream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 0b5f5f966375..6f7f65d96ba3 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -592,7 +592,7 @@ def get_topological_token_for_event(self, event_id): ) def get_max_topological_token(self, room_id, stream_key): - """Get the max topological token in a room that before given stream + """Get the max topological token in a room before the given stream ordering. Args: From 71063a69b8a72576ae7587042f4c2f24fcbd6bcd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 6 Jun 2019 14:45:17 +0100 Subject: [PATCH 146/231] Fix missing logcontext for PresenceHandler.on_shutdown. (#5369) Fixes some warnings, and a scary-looking stacktrace when sytest kills the process. --- changelog.d/5369.bugfix | 1 + synapse/handlers/presence.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5369.bugfix diff --git a/changelog.d/5369.bugfix b/changelog.d/5369.bugfix new file mode 100644 index 000000000000..cc61618f3958 --- /dev/null +++ b/changelog.d/5369.bugfix @@ -0,0 +1 @@ +Fix missing logcontext warnings on shutdown. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index e49c8203efc6..557fb5f83ddb 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -158,7 +158,13 @@ def __init__(self, hs): # have not yet been persisted self.unpersisted_users_changes = set() - hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown) + hs.get_reactor().addSystemEventTrigger( + "before", + "shutdown", + run_as_background_process, + "presence.on_shutdown", + self._on_shutdown, + ) self.serial_to_user = {} self._next_serial = 1 From 3b6645d3bf3d3be449e1162e4135b677a1086ade Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 7 Jun 2019 01:20:58 +1000 Subject: [PATCH 147/231] remove background updates that arent needed --- .../full_schemas/54/stream_positions.sql | 34 +------------------ 1 file changed, 1 insertion(+), 33 deletions(-) diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql index 0febedcc5e66..084a70db6553 100644 --- a/synapse/storage/schema/full_schemas/54/stream_positions.sql +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -4,36 +4,4 @@ INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', - INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); INSERT INTO stats_stream_pos (stream_id) VALUES (null); -INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); - ---- User dir population - --- Set up staging tables -INSERT INTO background_updates (update_name, progress_json) VALUES - ('populate_user_directory_createtables', '{}'); - --- Run through each room and update the user directory according to who is in it -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'); - --- Insert all users, if search_all_users is on -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'); - --- Clean up staging tables -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'); - ---- Stats population - --- Set up staging tables -INSERT INTO background_updates (update_name, progress_json) VALUES - ('populate_stats_createtables', '{}'); - --- Run through each room and update stats -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_stats_process_rooms', '{}', 'populate_stats_createtables'); - --- Clean up staging tables -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_stats_cleanup', '{}', 'populate_stats_process_rooms'); +INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); \ No newline at end of file From f868c8df031e1c4a52e68ab32bb6fda086bd6ad7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 6 Jun 2019 16:36:28 +0100 Subject: [PATCH 148/231] Regen sample config before kicking off agents (#5370) * Regen sample config before kicking off agents * Add changelog --- .buildkite/pipeline.yml | 4 ++-- changelog.d/5370.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5370.misc diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 44b258dca677..b805b2d83909 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -36,8 +36,6 @@ steps: image: "python:3.6" propagate-environment: true - - wait - - command: - "python -m pip install tox" - "tox -e check-sampleconfig" @@ -46,6 +44,8 @@ steps: - docker#v3.0.1: image: "python:3.6" + - wait + - command: - "python -m pip install tox" - "tox -e py27,codecov" diff --git a/changelog.d/5370.misc b/changelog.d/5370.misc new file mode 100644 index 000000000000..b0473ef280d7 --- /dev/null +++ b/changelog.d/5370.misc @@ -0,0 +1 @@ +Don't run CI build checks until sample config check has passed. From 837e32ef551b9c53d23615cdde56cc9babcc9059 Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 7 Jun 2019 01:49:25 +1000 Subject: [PATCH 149/231] just user dir? --- .../full_schemas/54/stream_positions.sql | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql index 084a70db6553..575ab6b354ab 100644 --- a/synapse/storage/schema/full_schemas/54/stream_positions.sql +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -4,4 +4,22 @@ INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', - INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); INSERT INTO stats_stream_pos (stream_id) VALUES (null); -INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); \ No newline at end of file +INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); + +--- User dir population + +-- Set up staging tables +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_user_directory_createtables', '{}'); + +-- Run through each room and update the user directory according to who is in it +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'); + +-- Insert all users, if search_all_users is on +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'); + +-- Clean up staging tables +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'); From 833c406b9b34392eb64780eeef6b670be762ea21 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Jun 2019 17:23:02 +0100 Subject: [PATCH 150/231] Neilj/1.0 upgrade notes (#5371) 1.0 upgrade/install notes --- INSTALL.md | 25 +++++++++++++++- UPGRADE.rst | 49 ++++++++++++++++++++++++++++++++ changelog.d/5371.feature | 1 + docs/MSC1711_certificates_FAQ.md | 12 ++++---- 4 files changed, 79 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5371.feature diff --git a/INSTALL.md b/INSTALL.md index 1934593148c8..d3a450f40f57 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -5,6 +5,7 @@ * [Prebuilt packages](#prebuilt-packages) * [Setting up Synapse](#setting-up-synapse) * [TLS certificates](#tls-certificates) + * [Email](#email) * [Registering a user](#registering-a-user) * [Setting up a TURN server](#setting-up-a-turn-server) * [URL previews](#url-previews) @@ -394,9 +395,31 @@ To configure Synapse to expose an HTTPS port, you will need to edit instance, if using certbot, use `fullchain.pem` as your certificate, not `cert.pem`). -For those of you upgrading your TLS certificate in readiness for Synapse 1.0, +For those of you upgrading your TLS certificate for Synapse 1.0 compliance, please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100). +## Email + +It is desirable for Synapse to have the capability to send email. For example, +this is required to support the 'password reset' feature. + +To configure an SMTP server for Synapse, modify the configuration section +headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port`` +and ``notif_from`` fields filled out. You may also need to set ``smtp_user``, +``smtp_pass``, and ``require_transport_security``.. + +If Synapse is not configured with an SMTP server, password reset via email will + be disabled by default. + +Alternatively it is possible delegate the sending of email to the server's +identity server. Doing so is convenient but not recommended, since a malicious +or compromised identity server could theoretically hijack a given user's +account by redirecting mail. + +If you are absolutely certain that you wish to use the server's identity server +for password resets, set ``trust_identity_server_for_password_resets`` to +``true`` under the ``email:`` configuration section. + ## Registering a user You will need at least one user on your server in order to use a Matrix diff --git a/UPGRADE.rst b/UPGRADE.rst index 228222d53422..6032a505c945 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -49,6 +49,55 @@ returned by the Client-Server API: # configured on port 443. curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" +Upgrading to v1.0 +================= + +Validation of TLS certificates +------------------------------ + +Synapse v1.0 is the first release to enforce +validation of TLS certificates for the federation API. It is therefore +essential that your certificates are correctly configured. See the `FAQ +`_ for more information. + +Note, v1.0 installations will also no longer be able to federate with servers +that have not correctly configured their certificates. + +In rare cases, it may be desirable to disable certificate checking: for +example, it might be essential to be able to federate with a given legacy +server in a closed federation. This can be done in one of two ways:- + +* Configure the global switch ``federation_verify_certificates`` to ``false``. +* Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``. + +See the `sample configuration file `_ +for more details on these settings. + +Email +----- +When a user requests a password reset, Synapse will send an email to the +user to confirm the request. + +Previous versions of Synapse delegated the job of sending this email to an +identity server. If the identity server was somehow malicious or became +compromised, it would be theoretically possible to hijack an account through +this means. + +Therefore, by default, Synapse v1.0 will send the confirmation email itself. If +Synapse is not configured with an SMTP server, password reset via email will be +disabled. + +To configure an SMTP server for Synapse, modify the configuration section +headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port`` +and ``notif_from`` fields filled out. You may also need to set ``smtp_user``, +``smtp_pass``, and ``require_transport_security``. + +If you are absolutely certain that you wish to continue using an identity +server for password resets, set ``trust_identity_server_for_password_resets`` to ``true``. + +See the `sample configuration file `_ +for more details on these settings. + Upgrading to v0.99.0 ==================== diff --git a/changelog.d/5371.feature b/changelog.d/5371.feature new file mode 100644 index 000000000000..7f960630e01a --- /dev/null +++ b/changelog.d/5371.feature @@ -0,0 +1 @@ +Update upgrade and installation guides ahead of 1.0. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 37f7f669c948..599462bdcb73 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -68,16 +68,14 @@ Admins should upgrade and configure a valid CA cert. Homeservers that require a .well-known entry (see below), should retain their SRV record and use it alongside their .well-known record. -**>= 5th March 2019 - Synapse 1.0.0 is released** +**10th June 2019 - Synapse 1.0.0 is released** -1.0.0 will land no sooner than 1 month after 0.99.0, leaving server admins one -month after 5th February to upgrade to 0.99.0 and deploy their certificates. In +1.0.0 is scheduled for release on 10th June. In accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html) 1.0.0 will enforce certificate validity. This means that any homeserver without a valid certificate after this point will no longer be able to federate with 1.0.0 servers. - ## Configuring certificates for compatibility with Synapse 1.0.0 ### If you do not currently have an SRV record @@ -146,9 +144,9 @@ You can do this with a `.well-known` file as follows: with Synapse 0.34 and earlier. 2. Give Synapse a certificate corresponding to the target domain - (`customer.example.net` in the above example). You can either use Synapse's - built-in [ACME support](./ACME.md) for this (via the `domain` parameter in - the `acme` section), or acquire a certificate yourself and give it to + (`customer.example.net` in the above example). You can either use Synapse's + built-in [ACME support](./ACME.md) for this (via the `domain` parameter in + the `acme` section), or acquire a certificate yourself and give it to Synapse via `tls_certificate_path` and `tls_private_key_path`. 3. Restart Synapse to ensure the new certificate is loaded. From 9fbb20a531161652143028cde333429fe03b0343 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 6 Jun 2019 17:33:11 +0100 Subject: [PATCH 151/231] Stop hardcoding trust of old matrix.org key (#5374) There are a few changes going on here: * We make checking the signature on a key server response optional: if no verify_keys are specified, we trust to TLS to validate the connection. * We change the default config so that it does not require responses to be signed by the old key. * We replace the old 'perspectives' config with 'trusted_key_servers', which is also formatted slightly differently. * We emit a warning to the logs every time we trust a key server response signed by the old key. --- changelog.d/5374.feature | 1 + docs/sample_config.yaml | 43 +++- synapse/config/key.py | 228 +++++++++++++++--- synapse/crypto/keyring.py | 72 +++--- tests/crypto/test_keyring.py | 43 ++-- .../test_matrix_federation_agent.py | 1 + 6 files changed, 293 insertions(+), 95 deletions(-) create mode 100644 changelog.d/5374.feature diff --git a/changelog.d/5374.feature b/changelog.d/5374.feature new file mode 100644 index 000000000000..17937637ab8d --- /dev/null +++ b/changelog.d/5374.feature @@ -0,0 +1 @@ +Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2f37e71601c7..a2e815ea529e 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -952,12 +952,43 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # The trusted servers to download signing keys from. # -#perspectives: -# servers: -# "matrix.org": -# verify_keys: -# "ed25519:auto": -# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" +# When we need to fetch a signing key, each server is tried in parallel. +# +# Normally, the connection to the key server is validated via TLS certificates. +# Additional security can be provided by configuring a `verify key`, which +# will make synapse check that the response is signed by that key. +# +# This setting supercedes an older setting named `perspectives`. The old format +# is still supported for backwards-compatibility, but it is deprecated. +# +# Options for each entry in the list include: +# +# server_name: the name of the server. required. +# +# verify_keys: an optional map from key id to base64-encoded public key. +# If specified, we will check that the response is signed by at least +# one of the given keys. +# +# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset, +# and federation_verify_certificates is not `true`, synapse will refuse +# to start, because this would allow anyone who can spoof DNS responses +# to masquerade as the trusted key server. If you know what you are doing +# and are sure that your network environment provides a secure connection +# to the key server, you can set this to `true` to override this +# behaviour. +# +# An example configuration might look like: +# +#trusted_key_servers: +# - server_name: "my_trusted_server.example.com" +# verify_keys: +# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" +# - server_name: "my_other_trusted_server.example.com" +# +# The default configuration is: +# +#trusted_key_servers: +# - server_name: "matrix.org" # Enable SAML2 for registration and login. Uses pysaml2. diff --git a/synapse/config/key.py b/synapse/config/key.py index eb1025981898..aba7092ccdbb 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +18,8 @@ import logging import os +import attr +import jsonschema from signedjson.key import ( NACL_ED25519, decode_signing_key_base64, @@ -32,11 +35,27 @@ from ._base import Config, ConfigError +INSECURE_NOTARY_ERROR = """\ +Your server is configured to accept key server responses without signature +validation or TLS certificate validation. This is likely to be very insecure. If +you are *sure* you want to do this, set 'accept_keys_insecurely' on the +keyserver configuration.""" + + logger = logging.getLogger(__name__) -class KeyConfig(Config): +@attr.s +class TrustedKeyServer(object): + # string: name of the server. + server_name = attr.ib() + # dict[str,VerifyKey]|None: map from key id to key object, or None to disable + # signature verification. + verify_keys = attr.ib(default=None) + + +class KeyConfig(Config): def read_config(self, config): # the signing key can be specified inline or in a separate file if "signing_key" in config: @@ -49,16 +68,27 @@ def read_config(self, config): config.get("old_signing_keys", {}) ) self.key_refresh_interval = self.parse_duration( - config.get("key_refresh_interval", "1d"), + config.get("key_refresh_interval", "1d") ) - self.perspectives = self.read_perspectives( - config.get("perspectives", {}).get("servers", { - "matrix.org": {"verify_keys": { - "ed25519:auto": { - "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw", - } - }} - }) + + # if neither trusted_key_servers nor perspectives are given, use the default. + if "perspectives" not in config and "trusted_key_servers" not in config: + key_servers = [{"server_name": "matrix.org"}] + else: + key_servers = config.get("trusted_key_servers", []) + + if not isinstance(key_servers, list): + raise ConfigError( + "trusted_key_servers, if given, must be a list, not a %s" + % (type(key_servers).__name__,) + ) + + # merge the 'perspectives' config into the 'trusted_key_servers' config. + key_servers.extend(_perspectives_to_key_servers(config)) + + # list of TrustedKeyServer objects + self.key_servers = list( + _parse_key_servers(key_servers, self.federation_verify_certificates) ) self.macaroon_secret_key = config.get( @@ -78,8 +108,9 @@ def read_config(self, config): # falsification of values self.form_secret = config.get("form_secret", None) - def default_config(self, config_dir_path, server_name, generate_secrets=False, - **kwargs): + def default_config( + self, config_dir_path, server_name, generate_secrets=False, **kwargs + ): base_key_name = os.path.join(config_dir_path, server_name) if generate_secrets: @@ -91,7 +122,8 @@ def default_config(self, config_dir_path, server_name, generate_secrets=False, macaroon_secret_key = "# macaroon_secret_key: " form_secret = "# form_secret: " - return """\ + return ( + """\ # a secret which is used to sign access tokens. If none is specified, # the registration_shared_secret is used, if one is given; otherwise, # a secret key is derived from the signing key. @@ -133,33 +165,53 @@ def default_config(self, config_dir_path, server_name, generate_secrets=False, # The trusted servers to download signing keys from. # - #perspectives: - # servers: - # "matrix.org": - # verify_keys: - # "ed25519:auto": - # key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" - """ % locals() - - def read_perspectives(self, perspectives_servers): - servers = {} - for server_name, server_config in perspectives_servers.items(): - for key_id, key_data in server_config["verify_keys"].items(): - if is_signing_algorithm_supported(key_id): - key_base64 = key_data["key"] - key_bytes = decode_base64(key_base64) - verify_key = decode_verify_key_bytes(key_id, key_bytes) - servers.setdefault(server_name, {})[key_id] = verify_key - return servers + # When we need to fetch a signing key, each server is tried in parallel. + # + # Normally, the connection to the key server is validated via TLS certificates. + # Additional security can be provided by configuring a `verify key`, which + # will make synapse check that the response is signed by that key. + # + # This setting supercedes an older setting named `perspectives`. The old format + # is still supported for backwards-compatibility, but it is deprecated. + # + # Options for each entry in the list include: + # + # server_name: the name of the server. required. + # + # verify_keys: an optional map from key id to base64-encoded public key. + # If specified, we will check that the response is signed by at least + # one of the given keys. + # + # accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset, + # and federation_verify_certificates is not `true`, synapse will refuse + # to start, because this would allow anyone who can spoof DNS responses + # to masquerade as the trusted key server. If you know what you are doing + # and are sure that your network environment provides a secure connection + # to the key server, you can set this to `true` to override this + # behaviour. + # + # An example configuration might look like: + # + #trusted_key_servers: + # - server_name: "my_trusted_server.example.com" + # verify_keys: + # "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" + # - server_name: "my_other_trusted_server.example.com" + # + # The default configuration is: + # + #trusted_key_servers: + # - server_name: "matrix.org" + """ + % locals() + ) def read_signing_key(self, signing_key_path): signing_keys = self.read_file(signing_key_path, "signing_key") try: return read_signing_keys(signing_keys.splitlines(True)) except Exception as e: - raise ConfigError( - "Error reading signing_key: %s" % (str(e)) - ) + raise ConfigError("Error reading signing_key: %s" % (str(e))) def read_old_signing_keys(self, old_signing_keys): keys = {} @@ -182,9 +234,7 @@ def generate_files(self, config): if not self.path_exists(signing_key_path): with open(signing_key_path, "w") as signing_key_file: key_id = "a_" + random_string(4) - write_signing_keys( - signing_key_file, (generate_signing_key(key_id),), - ) + write_signing_keys(signing_key_file, (generate_signing_key(key_id),)) else: signing_keys = self.read_file(signing_key_path, "signing_key") if len(signing_keys.split("\n")[0].split()) == 1: @@ -194,6 +244,106 @@ def generate_files(self, config): NACL_ED25519, key_id, signing_keys.split("\n")[0] ) with open(signing_key_path, "w") as signing_key_file: - write_signing_keys( - signing_key_file, (key,), + write_signing_keys(signing_key_file, (key,)) + + +def _perspectives_to_key_servers(config): + """Convert old-style 'perspectives' configs into new-style 'trusted_key_servers' + + Returns an iterable of entries to add to trusted_key_servers. + """ + + # 'perspectives' looks like: + # + # { + # "servers": { + # "matrix.org": { + # "verify_keys": { + # "ed25519:auto": { + # "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" + # } + # } + # } + # } + # } + # + # 'trusted_keys' looks like: + # + # [ + # { + # "server_name": "matrix.org", + # "verify_keys": { + # "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw", + # } + # } + # ] + + perspectives_servers = config.get("perspectives", {}).get("servers", {}) + + for server_name, server_opts in perspectives_servers.items(): + trusted_key_server_entry = {"server_name": server_name} + verify_keys = server_opts.get("verify_keys") + if verify_keys is not None: + trusted_key_server_entry["verify_keys"] = { + key_id: key_data["key"] for key_id, key_data in verify_keys.items() + } + yield trusted_key_server_entry + + +TRUSTED_KEY_SERVERS_SCHEMA = { + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "schema for the trusted_key_servers setting", + "type": "array", + "items": { + "type": "object", + "properties": { + "server_name": {"type": "string"}, + "verify_keys": { + "type": "object", + # each key must be a base64 string + "additionalProperties": {"type": "string"}, + }, + }, + "required": ["server_name"], + }, +} + + +def _parse_key_servers(key_servers, federation_verify_certificates): + try: + jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA) + except jsonschema.ValidationError as e: + raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message) + + for server in key_servers: + server_name = server["server_name"] + result = TrustedKeyServer(server_name=server_name) + + verify_keys = server.get("verify_keys") + if verify_keys is not None: + result.verify_keys = {} + for key_id, key_base64 in verify_keys.items(): + if not is_signing_algorithm_supported(key_id): + raise ConfigError( + "Unsupported signing algorithm on key %s for server %s in " + "trusted_key_servers" % (key_id, server_name) ) + try: + key_bytes = decode_base64(key_base64) + verify_key = decode_verify_key_bytes(key_id, key_bytes) + except Exception as e: + raise ConfigError( + "Unable to parse key %s for server %s in " + "trusted_key_servers: %s" % (key_id, server_name, e) + ) + + result.verify_keys[key_id] = verify_key + + if ( + not verify_keys + and not server.get("accept_keys_insecurely") + and not federation_verify_certificates + ): + raise ConfigError(INSECURE_NOTARY_ERROR) + + yield result diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 2b6b5913bc7d..96964b0d5062 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -585,25 +585,27 @@ def __init__(self, hs): super(PerspectivesKeyFetcher, self).__init__(hs) self.clock = hs.get_clock() self.client = hs.get_http_client() - self.perspective_servers = self.config.perspectives + self.key_servers = self.config.key_servers @defer.inlineCallbacks def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" @defer.inlineCallbacks - def get_key(perspective_name, perspective_keys): + def get_key(key_server): try: result = yield self.get_server_verify_key_v2_indirect( - keys_to_fetch, perspective_name, perspective_keys + keys_to_fetch, key_server ) defer.returnValue(result) except KeyLookupError as e: - logger.warning("Key lookup failed from %r: %s", perspective_name, e) + logger.warning( + "Key lookup failed from %r: %s", key_server.server_name, e + ) except Exception as e: logger.exception( "Unable to get key from %r: %s %s", - perspective_name, + key_server.server_name, type(e).__name__, str(e), ) @@ -613,8 +615,8 @@ def get_key(perspective_name, perspective_keys): results = yield logcontext.make_deferred_yieldable( defer.gatherResults( [ - run_in_background(get_key, p_name, p_keys) - for p_name, p_keys in self.perspective_servers.items() + run_in_background(get_key, server) + for server in self.key_servers ], consumeErrors=True, ).addErrback(unwrapFirstError) @@ -629,17 +631,15 @@ def get_key(perspective_name, perspective_keys): @defer.inlineCallbacks def get_server_verify_key_v2_indirect( - self, keys_to_fetch, perspective_name, perspective_keys + self, keys_to_fetch, key_server ): """ Args: keys_to_fetch (dict[str, dict[str, int]]): the keys to be fetched. server_name -> key_id -> min_valid_ts - perspective_name (str): name of the notary server to query for the keys - - perspective_keys (dict[str, VerifyKey]): map of key_id->key for the - notary server + key_server (synapse.config.key.TrustedKeyServer): notary server to query for + the keys Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map @@ -649,6 +649,7 @@ def get_server_verify_key_v2_indirect( KeyLookupError if there was an error processing the entire response from the server """ + perspective_name = key_server.server_name logger.info( "Requesting keys %s from notary server %s", keys_to_fetch.items(), @@ -689,11 +690,13 @@ def get_server_verify_key_v2_indirect( ) try: - processed_response = yield self._process_perspectives_response( - perspective_name, - perspective_keys, + self._validate_perspectives_response( + key_server, response, - time_added_ms=time_now_ms, + ) + + processed_response = yield self.process_v2_response( + perspective_name, response, time_added_ms=time_now_ms ) except KeyLookupError as e: logger.warning( @@ -717,28 +720,24 @@ def get_server_verify_key_v2_indirect( defer.returnValue(keys) - def _process_perspectives_response( - self, perspective_name, perspective_keys, response, time_added_ms + def _validate_perspectives_response( + self, key_server, response, ): - """Parse a 'Server Keys' structure from the result of a /key/query request - - Checks that the entry is correctly signed by the perspectives server, and then - passes over to process_v2_response + """Optionally check the signature on the result of a /key/query request Args: - perspective_name (str): the name of the notary server that produced this - result - - perspective_keys (dict[str, VerifyKey]): map of key_id->key for the - notary server + key_server (synapse.config.key.TrustedKeyServer): the notary server that + produced this result response (dict): the json-decoded Server Keys response object + """ + perspective_name = key_server.server_name + perspective_keys = key_server.verify_keys - time_added_ms (int): the timestamp to record in server_keys_json + if perspective_keys is None: + # signature checking is disabled on this server + return - Returns: - Deferred[dict[str, FetchKeyResult]]: map from key_id to result object - """ if ( u"signatures" not in response or perspective_name not in response[u"signatures"] @@ -751,6 +750,13 @@ def _process_perspectives_response( verify_signed_json(response, perspective_name, perspective_keys[key_id]) verified = True + if perspective_name == "matrix.org" and key_id == "ed25519:auto": + logger.warning( + "Trusting trusted_key_server responses signed by the " + "compromised matrix.org signing key 'ed25519:auto'. " + "This is a placebo." + ) + if not verified: raise KeyLookupError( "Response not signed with a known key: signed with: %r, known keys: %r" @@ -760,10 +766,6 @@ def _process_perspectives_response( ) ) - return self.process_v2_response( - perspective_name, response, time_added_ms=time_added_ms - ) - class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 18121f4f6c6d..4b1901ce31a5 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -19,7 +19,7 @@ import canonicaljson import signedjson.key import signedjson.sign -from signedjson.key import get_verify_key +from signedjson.key import encode_verify_key_base64, get_verify_key from twisted.internet import defer @@ -40,7 +40,7 @@ def __init__(self): def get_verify_keys(self): vk = signedjson.key.get_verify_key(self.key) - return {"%s:%s" % (vk.alg, vk.version): vk} + return {"%s:%s" % (vk.alg, vk.version): encode_verify_key_base64(vk)} def get_signed_key(self, server_name, verify_key): key_id = "%s:%s" % (verify_key.alg, verify_key.version) @@ -48,9 +48,7 @@ def get_signed_key(self, server_name, verify_key): "server_name": server_name, "old_verify_keys": {}, "valid_until_ts": time.time() * 1000 + 3600, - "verify_keys": { - key_id: {"key": signedjson.key.encode_verify_key_base64(verify_key)} - }, + "verify_keys": {key_id: {"key": encode_verify_key_base64(verify_key)}}, } self.sign_response(res) return res @@ -63,10 +61,18 @@ class KeyringTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.mock_perspective_server = MockPerspectiveServer() self.http_client = Mock() - hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) - keys = self.mock_perspective_server.get_verify_keys() - hs.config.perspectives = {self.mock_perspective_server.server_name: keys} - return hs + + config = self.default_config() + config["trusted_key_servers"] = [ + { + "server_name": self.mock_perspective_server.server_name, + "verify_keys": self.mock_perspective_server.get_verify_keys(), + } + ] + + return self.setup_test_homeserver( + handlers=None, http_client=self.http_client, config=config + ) def check_context(self, _, expected): self.assertEquals( @@ -371,10 +377,18 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.mock_perspective_server = MockPerspectiveServer() self.http_client = Mock() - hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) - keys = self.mock_perspective_server.get_verify_keys() - hs.config.perspectives = {self.mock_perspective_server.server_name: keys} - return hs + + config = self.default_config() + config["trusted_key_servers"] = [ + { + "server_name": self.mock_perspective_server.server_name, + "verify_keys": self.mock_perspective_server.get_verify_keys(), + } + ] + + return self.setup_test_homeserver( + handlers=None, http_client=self.http_client, config=config + ) def test_get_keys_from_perspectives(self): # arbitrarily advance the clock a bit @@ -439,8 +453,7 @@ def post_json(destination, path, data, **kwargs): self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS) self.assertEqual( - bytes(res["key_json"]), - canonicaljson.encode_canonical_json(response), + bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) ) def test_invalid_perspectives_responses(self): diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 4153da4da7fa..05880a10489b 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -57,6 +57,7 @@ def setUp(self): # present will not be trusted. We should do better here, though. config_dict = default_config("test", parse=False) config_dict["federation_verify_certificates"] = False + config_dict["trusted_key_servers"] = [] config = HomeServerConfig() config.parse_config_dict(config_dict) From 3719680ee42b72b8480fa76a1455576897b65ef0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 6 Jun 2019 17:34:07 +0100 Subject: [PATCH 152/231] Add ability to perform password reset via email without trusting the identity server (#5377) Sends password reset emails from the homeserver instead of proxying to the identity server. This is now the default behaviour for security reasons. If you wish to continue proxying password reset requests to the identity server you must now enable the email.trust_identity_server_for_password_resets option. This PR is a culmination of 3 smaller PRs which have each been separately reviewed: * #5308 * #5345 * #5368 --- changelog.d/5377.feature | 1 + docs/sample_config.yaml | 60 +++- synapse/api/errors.py | 9 + synapse/app/homeserver.py | 1 + synapse/config/emailconfig.py | 153 ++++++++- synapse/handlers/auth.py | 64 +++- synapse/handlers/identity.py | 13 +- synapse/push/mailer.py | 85 +++-- synapse/push/pusher.py | 4 +- synapse/python_dependencies.py | 2 +- synapse/res/templates/password_reset.html | 9 + synapse/res/templates/password_reset.txt | 7 + .../res/templates/password_reset_failure.html | 6 + .../res/templates/password_reset_success.html | 6 + synapse/rest/client/v2_alpha/account.py | 243 ++++++++++++++- synapse/storage/_base.py | 6 +- synapse/storage/prepare_database.py | 2 +- synapse/storage/registration.py | 290 +++++++++++++++++- .../delta/55/track_threepid_validations.sql | 31 ++ tests/utils.py | 1 - 20 files changed, 922 insertions(+), 71 deletions(-) create mode 100644 changelog.d/5377.feature create mode 100644 synapse/res/templates/password_reset.html create mode 100644 synapse/res/templates/password_reset.txt create mode 100644 synapse/res/templates/password_reset_failure.html create mode 100644 synapse/res/templates/password_reset_success.html create mode 100644 synapse/storage/schema/delta/55/track_threepid_validations.sql diff --git a/changelog.d/5377.feature b/changelog.d/5377.feature new file mode 100644 index 000000000000..6aae41847a3e --- /dev/null +++ b/changelog.d/5377.feature @@ -0,0 +1 @@ +Add ability to perform password reset via email without trusting the identity server. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index a2e815ea529e..ea73306fb995 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1065,10 +1065,8 @@ password_config: -# Enable sending emails for notification events or expiry notices -# Defining a custom URL for Riot is only needed if email notifications -# should contain links to a self-hosted installation of Riot; when set -# the "app_name" setting is ignored. +# Enable sending emails for password resets, notification events or +# account expiry notices # # If your SMTP server requires authentication, the optional smtp_user & # smtp_pass variables should be used @@ -1076,22 +1074,64 @@ password_config: #email: # enable_notifs: false # smtp_host: "localhost" -# smtp_port: 25 +# smtp_port: 25 # SSL: 465, STARTTLS: 587 # smtp_user: "exampleusername" # smtp_pass: "examplepassword" # require_transport_security: False # notif_from: "Your Friendly %(app)s Home Server " # app_name: Matrix -# # if template_dir is unset, uses the example templates that are part of -# # the Synapse distribution. +# +# # Enable email notifications by default +# notif_for_new_users: True +# +# # Defining a custom URL for Riot is only needed if email notifications +# # should contain links to a self-hosted installation of Riot; when set +# # the "app_name" setting is ignored +# riot_base_url: "http://localhost/riot" +# +# # Enable sending password reset emails via the configured, trusted +# # identity servers +# # +# # IMPORTANT! This will give a malicious or overtaken identity server +# # the ability to reset passwords for your users! Make absolutely sure +# # that you want to do this! It is strongly recommended that password +# # reset emails be sent by the homeserver instead +# # +# # If this option is set to false and SMTP options have not been +# # configured, resetting user passwords via email will be disabled +# #trust_identity_server_for_password_resets: false +# +# # Configure the time that a validation email or text message code +# # will expire after sending +# # +# # This is currently used for password resets +# #validation_token_lifetime: 1h +# +# # Template directory. All template files should be stored within this +# # directory +# # # #template_dir: res/templates +# +# # Templates for email notifications +# # # notif_template_html: notif_mail.html # notif_template_text: notif_mail.txt -# # Templates for account expiry notices. +# +# # Templates for account expiry notices +# # # expiry_template_html: notice_expiry.html # expiry_template_text: notice_expiry.txt -# notif_for_new_users: True -# riot_base_url: "http://localhost/riot" +# +# # Templates for password reset emails sent by the homeserver +# # +# #password_reset_template_html: password_reset.html +# #password_reset_template_text: password_reset.txt +# +# # Templates for password reset success and failure pages that a user +# # will see after attempting to reset their password +# # +# #password_reset_template_success_html: password_reset_success.html +# #password_reset_template_failure_html: password_reset_failure.html #password_providers: diff --git a/synapse/api/errors.py b/synapse/api/errors.py index e91697049ce2..66201d6efe41 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -339,6 +339,15 @@ def __init__(self): ) +class ThreepidValidationError(SynapseError): + """An error raised when there was a problem authorising an event.""" + + def __init__(self, *args, **kwargs): + if "errcode" not in kwargs: + kwargs["errcode"] = Codes.FORBIDDEN + super(ThreepidValidationError, self).__init__(*args, **kwargs) + + class IncompatibleRoomVersionError(SynapseError): """A server is trying to join a room whose version it does not support. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 1045d28949e2..df524a23dd52 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -176,6 +176,7 @@ def _configure_named_resource(self, name, compress=False): resources.update({ "/_matrix/client/api/v1": client_resource, + "/_synapse/password_reset": client_resource, "/_matrix/client/r0": client_resource, "/_matrix/client/unstable": client_resource, "/_matrix/client/v2_alpha": client_resource, diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 8400471f408e..ae0425290623 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -50,6 +50,11 @@ def read_config(self, config): else: self.email_app_name = "Matrix" + # TODO: Rename notif_from to something more generic, or have a separate + # from for password resets, message notifications, etc? + # Currently the email section is a bit bogged down with settings for + # multiple functions. Would be good to split it out into separate + # sections and only put the common ones under email: self.email_notif_from = email_config.get("notif_from", None) if self.email_notif_from is not None: # make sure it's valid @@ -74,7 +79,28 @@ def read_config(self, config): "account_validity", {}, ).get("renew_at") - if self.email_enable_notifs or account_validity_renewal_enabled: + email_trust_identity_server_for_password_resets = email_config.get( + "trust_identity_server_for_password_resets", False, + ) + self.email_password_reset_behaviour = ( + "remote" if email_trust_identity_server_for_password_resets else "local" + ) + if self.email_password_reset_behaviour == "local" and email_config == {}: + logger.warn( + "User password resets have been disabled due to lack of email config" + ) + self.email_password_reset_behaviour = "off" + + # Get lifetime of a validation token in milliseconds + self.email_validation_token_lifetime = self.parse_duration( + email_config.get("validation_token_lifetime", "1h") + ) + + if ( + self.email_enable_notifs + or account_validity_renewal_enabled + or self.email_password_reset_behaviour == "local" + ): # make sure we can import the required deps import jinja2 import bleach @@ -82,6 +108,67 @@ def read_config(self, config): jinja2 bleach + if self.email_password_reset_behaviour == "local": + required = [ + "smtp_host", + "smtp_port", + "notif_from", + ] + + missing = [] + for k in required: + if k not in email_config: + missing.append(k) + + if (len(missing) > 0): + raise RuntimeError( + "email.password_reset_behaviour is set to 'local' " + "but required keys are missing: %s" % + (", ".join(["email." + k for k in missing]),) + ) + + # Templates for password reset emails + self.email_password_reset_template_html = email_config.get( + "password_reset_template_html", "password_reset.html", + ) + self.email_password_reset_template_text = email_config.get( + "password_reset_template_text", "password_reset.txt", + ) + self.email_password_reset_failure_template = email_config.get( + "password_reset_failure_template", "password_reset_failure.html", + ) + # This template does not support any replaceable variables, so we will + # read it from the disk once during setup + email_password_reset_success_template = email_config.get( + "password_reset_success_template", "password_reset_success.html", + ) + + # Check templates exist + for f in [self.email_password_reset_template_html, + self.email_password_reset_template_text, + self.email_password_reset_failure_template, + email_password_reset_success_template]: + p = os.path.join(self.email_template_dir, f) + if not os.path.isfile(p): + raise ConfigError("Unable to find template file %s" % (p, )) + + # Retrieve content of web templates + filepath = os.path.join( + self.email_template_dir, + email_password_reset_success_template, + ) + self.email_password_reset_success_html_content = self.read_file( + filepath, + "email.password_reset_template_success_html", + ) + + if config.get("public_baseurl") is None: + raise RuntimeError( + "email.password_reset_behaviour is set to 'local' but no " + "public_baseurl is set. This is necessary to generate password " + "reset links" + ) + if self.email_enable_notifs: required = [ "smtp_host", @@ -121,10 +208,6 @@ def read_config(self, config): self.email_riot_base_url = email_config.get( "riot_base_url", None ) - else: - self.email_enable_notifs = False - # Not much point setting defaults for the rest: it would be an - # error for them to be used. if account_validity_renewal_enabled: self.email_expiry_template_html = email_config.get( @@ -141,10 +224,8 @@ def read_config(self, config): def default_config(self, config_dir_path, server_name, **kwargs): return """ - # Enable sending emails for notification events or expiry notices - # Defining a custom URL for Riot is only needed if email notifications - # should contain links to a self-hosted installation of Riot; when set - # the "app_name" setting is ignored. + # Enable sending emails for password resets, notification events or + # account expiry notices # # If your SMTP server requires authentication, the optional smtp_user & # smtp_pass variables should be used @@ -152,20 +233,62 @@ def default_config(self, config_dir_path, server_name, **kwargs): #email: # enable_notifs: false # smtp_host: "localhost" - # smtp_port: 25 + # smtp_port: 25 # SSL: 465, STARTTLS: 587 # smtp_user: "exampleusername" # smtp_pass: "examplepassword" # require_transport_security: False # notif_from: "Your Friendly %(app)s Home Server " # app_name: Matrix - # # if template_dir is unset, uses the example templates that are part of - # # the Synapse distribution. + # + # # Enable email notifications by default + # notif_for_new_users: True + # + # # Defining a custom URL for Riot is only needed if email notifications + # # should contain links to a self-hosted installation of Riot; when set + # # the "app_name" setting is ignored + # riot_base_url: "http://localhost/riot" + # + # # Enable sending password reset emails via the configured, trusted + # # identity servers + # # + # # IMPORTANT! This will give a malicious or overtaken identity server + # # the ability to reset passwords for your users! Make absolutely sure + # # that you want to do this! It is strongly recommended that password + # # reset emails be sent by the homeserver instead + # # + # # If this option is set to false and SMTP options have not been + # # configured, resetting user passwords via email will be disabled + # #trust_identity_server_for_password_resets: false + # + # # Configure the time that a validation email or text message code + # # will expire after sending + # # + # # This is currently used for password resets + # #validation_token_lifetime: 1h + # + # # Template directory. All template files should be stored within this + # # directory + # # # #template_dir: res/templates + # + # # Templates for email notifications + # # # notif_template_html: notif_mail.html # notif_template_text: notif_mail.txt - # # Templates for account expiry notices. + # + # # Templates for account expiry notices + # # # expiry_template_html: notice_expiry.html # expiry_template_text: notice_expiry.txt - # notif_for_new_users: True - # riot_base_url: "http://localhost/riot" + # + # # Templates for password reset emails sent by the homeserver + # # + # #password_reset_template_html: password_reset.html + # #password_reset_template_text: password_reset.txt + # + # # Templates for password reset success and failure pages that a user + # # will see after attempting to reset their password + # # + # #password_reset_template_success_html: password_reset_success.html + # #password_reset_template_failure_html: password_reset_failure.html """ diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index aa5d89a9ac90..7f8ddc99c6bd 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -162,7 +162,7 @@ def validate_user_via_ui_auth(self, requester, request_body, clientip): defer.returnValue(params) @defer.inlineCallbacks - def check_auth(self, flows, clientdict, clientip): + def check_auth(self, flows, clientdict, clientip, password_servlet=False): """ Takes a dictionary sent by the client in the login / registration protocol and handles the User-Interactive Auth flow. @@ -186,6 +186,16 @@ def check_auth(self, flows, clientdict, clientip): clientip (str): The IP address of the client. + password_servlet (bool): Whether the request originated from + PasswordRestServlet. + XXX: This is a temporary hack to distinguish between checking + for threepid validations locally (in the case of password + resets) and using the identity server (in the case of binding + a 3PID during registration). Once we start using the + homeserver for both tasks, this distinction will no longer be + necessary. + + Returns: defer.Deferred[dict, dict, str]: a deferred tuple of (creds, params, session_id). @@ -241,7 +251,9 @@ def check_auth(self, flows, clientdict, clientip): if 'type' in authdict: login_type = authdict['type'] try: - result = yield self._check_auth_dict(authdict, clientip) + result = yield self._check_auth_dict( + authdict, clientip, password_servlet=password_servlet, + ) if result: creds[login_type] = result self._save_session(session) @@ -351,7 +363,7 @@ def get_session_data(self, session_id, key, default=None): return sess.setdefault('serverdict', {}).get(key, default) @defer.inlineCallbacks - def _check_auth_dict(self, authdict, clientip): + def _check_auth_dict(self, authdict, clientip, password_servlet=False): """Attempt to validate the auth dict provided by a client Args: @@ -369,7 +381,13 @@ def _check_auth_dict(self, authdict, clientip): login_type = authdict['type'] checker = self.checkers.get(login_type) if checker is not None: - res = yield checker(authdict, clientip) + # XXX: Temporary workaround for having Synapse handle password resets + # See AuthHandler.check_auth for further details + res = yield checker( + authdict, + clientip=clientip, + password_servlet=password_servlet, + ) defer.returnValue(res) # build a v1-login-style dict out of the authdict and fall back to the @@ -383,7 +401,7 @@ def _check_auth_dict(self, authdict, clientip): defer.returnValue(canonical_id) @defer.inlineCallbacks - def _check_recaptcha(self, authdict, clientip): + def _check_recaptcha(self, authdict, clientip, **kwargs): try: user_response = authdict["response"] except KeyError: @@ -429,20 +447,20 @@ def _check_recaptcha(self, authdict, clientip): defer.returnValue(True) raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) - def _check_email_identity(self, authdict, _): - return self._check_threepid('email', authdict) + def _check_email_identity(self, authdict, **kwargs): + return self._check_threepid('email', authdict, **kwargs) - def _check_msisdn(self, authdict, _): + def _check_msisdn(self, authdict, **kwargs): return self._check_threepid('msisdn', authdict) - def _check_dummy_auth(self, authdict, _): + def _check_dummy_auth(self, authdict, **kwargs): return defer.succeed(True) - def _check_terms_auth(self, authdict, _): + def _check_terms_auth(self, authdict, **kwargs): return defer.succeed(True) @defer.inlineCallbacks - def _check_threepid(self, medium, authdict): + def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs): if 'threepid_creds' not in authdict: raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) @@ -451,7 +469,29 @@ def _check_threepid(self, medium, authdict): identity_handler = self.hs.get_handlers().identity_handler logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - threepid = yield identity_handler.threepid_from_creds(threepid_creds) + if ( + not password_servlet + or self.hs.config.email_password_reset_behaviour == "remote" + ): + threepid = yield identity_handler.threepid_from_creds(threepid_creds) + elif self.hs.config.email_password_reset_behaviour == "local": + row = yield self.store.get_threepid_validation_session( + medium, + threepid_creds["client_secret"], + sid=threepid_creds["sid"], + ) + + threepid = { + "medium": row["medium"], + "address": row["address"], + "validated_at": row["validated_at"], + } if row else None + + if row: + # Valid threepid returned, delete from the db + yield self.store.delete_threepid_session(threepid_creds["sid"]) + else: + raise SynapseError(400, "Password resets are not enabled on this homeserver") if not threepid: raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 22469486d7ed..04caf657934d 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -247,7 +247,14 @@ def try_unbind_threepid_with_id_server(self, mxid, threepid, id_server): defer.returnValue(changed) @defer.inlineCallbacks - def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs): + def requestEmailToken( + self, + id_server, + email, + client_secret, + send_attempt, + next_link=None, + ): if not self._should_trust_id_server(id_server): raise SynapseError( 400, "Untrusted ID server '%s'" % id_server, @@ -259,7 +266,9 @@ def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwa 'client_secret': client_secret, 'send_attempt': send_attempt, } - params.update(kwargs) + + if next_link: + params.update({'next_link': next_link}) try: data = yield self.http_client.post_json_get_json( diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index c269bcf4a402..4bc9eb731319 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -80,10 +80,10 @@ class Mailer(object): - def __init__(self, hs, app_name, notif_template_html, notif_template_text): + def __init__(self, hs, app_name, template_html, template_text): self.hs = hs - self.notif_template_html = notif_template_html - self.notif_template_text = notif_template_text + self.template_html = template_html + self.template_text = template_text self.sendmail = self.hs.get_sendmail() self.store = self.hs.get_datastore() @@ -94,21 +94,48 @@ def __init__(self, hs, app_name, notif_template_html, notif_template_text): logger.info("Created Mailer for app_name %s" % app_name) @defer.inlineCallbacks - def send_notification_mail(self, app_id, user_id, email_address, - push_actions, reason): - try: - from_string = self.hs.config.email_notif_from % { - "app": self.app_name - } - except TypeError: - from_string = self.hs.config.email_notif_from + def send_password_reset_mail( + self, + email_address, + token, + client_secret, + sid, + ): + """Send an email with a password reset link to a user + + Args: + email_address (str): Email address we're sending the password + reset to + token (str): Unique token generated by the server to verify + password reset email was received + client_secret (str): Unique token generated by the client to + group together multiple email sending attempts + sid (str): The generated session ID + """ + if email.utils.parseaddr(email_address)[1] == '': + raise RuntimeError("Invalid 'to' email address") + + link = ( + self.hs.config.public_baseurl + + "_synapse/password_reset/email/submit_token" + "?token=%s&client_secret=%s&sid=%s" % + (token, client_secret, sid) + ) - raw_from = email.utils.parseaddr(from_string)[1] - raw_to = email.utils.parseaddr(email_address)[1] + template_vars = { + "link": link, + } - if raw_to == '': - raise RuntimeError("Invalid 'to' address") + yield self.send_email( + email_address, + "[%s] Password Reset Email" % self.hs.config.server_name, + template_vars, + ) + @defer.inlineCallbacks + def send_notification_mail(self, app_id, user_id, email_address, + push_actions, reason): + """Send email regarding a user's room notifications""" rooms_in_order = deduped_ordered_list( [pa['room_id'] for pa in push_actions] ) @@ -176,14 +203,36 @@ def _fetch_room_state(room_id): "reason": reason, } - html_text = self.notif_template_html.render(**template_vars) + yield self.send_email( + email_address, + "[%s] %s" % (self.app_name, summary_text), + template_vars, + ) + + @defer.inlineCallbacks + def send_email(self, email_address, subject, template_vars): + """Send an email with the given information and template text""" + try: + from_string = self.hs.config.email_notif_from % { + "app": self.app_name + } + except TypeError: + from_string = self.hs.config.email_notif_from + + raw_from = email.utils.parseaddr(from_string)[1] + raw_to = email.utils.parseaddr(email_address)[1] + + if raw_to == '': + raise RuntimeError("Invalid 'to' address") + + html_text = self.template_html.render(**template_vars) html_part = MIMEText(html_text, "html", "utf8") - plain_text = self.notif_template_text.render(**template_vars) + plain_text = self.template_text.render(**template_vars) text_part = MIMEText(plain_text, "plain", "utf8") multipart_msg = MIMEMultipart('alternative') - multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text) + multipart_msg['Subject'] = subject multipart_msg['From'] = from_string multipart_msg['To'] = email_address multipart_msg['Date'] = email.utils.formatdate() diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index 14bc7823cf3f..aff85daeb5b5 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -70,8 +70,8 @@ def _create_email_pusher(self, _hs, pusherdict): mailer = Mailer( hs=self.hs, app_name=app_name, - notif_template_html=self.notif_template_html, - notif_template_text=self.notif_template_text, + template_html=self.notif_template_html, + template_text=self.notif_template_text, ) self.mailers[app_name] = mailer return EmailPusher(self.hs, pusherdict, mailer) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index f64baa4d5896..c78f2cb15e0c 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -77,7 +77,7 @@ ] CONDITIONAL_REQUIREMENTS = { - "email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"], + "email": ["Jinja2>=2.9", "bleach>=1.4.2"], "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"], # we use execute_batch, which arrived in psycopg 2.7. diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html new file mode 100644 index 000000000000..4fa7b367341a --- /dev/null +++ b/synapse/res/templates/password_reset.html @@ -0,0 +1,9 @@ + + +

A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:

+ + {{ link }} + +

If this was not you, please disregard this email and contact your server administrator. Thank you.

+ + diff --git a/synapse/res/templates/password_reset.txt b/synapse/res/templates/password_reset.txt new file mode 100644 index 000000000000..f0deff59a75f --- /dev/null +++ b/synapse/res/templates/password_reset.txt @@ -0,0 +1,7 @@ +A password reset request has been received for your Matrix account. If this +was you, please click the link below to confirm resetting your password: + +{{ link }} + +If this was not you, please disregard this email and contact your server +administrator. Thank you. diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html new file mode 100644 index 000000000000..0b132cf8db94 --- /dev/null +++ b/synapse/res/templates/password_reset_failure.html @@ -0,0 +1,6 @@ + + + +

{{ failure_reason }}. Your password has not been reset.

+ + diff --git a/synapse/res/templates/password_reset_success.html b/synapse/res/templates/password_reset_success.html new file mode 100644 index 000000000000..7b6fa5e6f03f --- /dev/null +++ b/synapse/res/templates/password_reset_success.html @@ -0,0 +1,6 @@ + + + +

Your password was successfully reset. You may now close this window.

+ + diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ca35dc3c8395..e4c63b69b96f 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -15,19 +15,25 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import re from six.moves import http_client +import jinja2 + from twisted.internet import defer from synapse.api.constants import LoginType -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, SynapseError, ThreepidValidationError +from synapse.http.server import finish_request from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, + parse_string, ) from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.stringutils import random_string from synapse.util.threepids import check_3pid_allowed from ._base import client_patterns, interactive_auth_handler @@ -41,17 +47,42 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): def __init__(self, hs): super(EmailPasswordRequestTokenRestServlet, self).__init__() self.hs = hs + self.datastore = hs.get_datastore() + self.config = hs.config self.identity_handler = hs.get_handlers().identity_handler + if self.config.email_password_reset_behaviour == "local": + from synapse.push.mailer import Mailer, load_jinja2_templates + templates = load_jinja2_templates( + config=hs.config, + template_html_name=hs.config.email_password_reset_template_html, + template_text_name=hs.config.email_password_reset_template_text, + ) + self.mailer = Mailer( + hs=self.hs, + app_name=self.config.email_app_name, + template_html=templates[0], + template_text=templates[1], + ) + @defer.inlineCallbacks def on_POST(self, request): + if self.config.email_password_reset_behaviour == "off": + raise SynapseError(400, "Password resets have been disabled on this server") + body = parse_json_object_from_request(request) assert_params_in_dict(body, [ - 'id_server', 'client_secret', 'email', 'send_attempt' + 'client_secret', 'email', 'send_attempt' ]) - if not check_3pid_allowed(self.hs, "email", body['email']): + # Extract params from body + client_secret = body["client_secret"] + email = body["email"] + send_attempt = body["send_attempt"] + next_link = body.get("next_link") # Optional param + + if not check_3pid_allowed(self.hs, "email", email): raise SynapseError( 403, "Your email domain is not authorized on this server", @@ -59,15 +90,100 @@ def on_POST(self, request): ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( - 'email', body['email'] + 'email', email, ) if existingUid is None: raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) - ret = yield self.identity_handler.requestEmailToken(**body) + if self.config.email_password_reset_behaviour == "remote": + if 'id_server' not in body: + raise SynapseError(400, "Missing 'id_server' param in body") + + # Have the identity server handle the password reset flow + ret = yield self.identity_handler.requestEmailToken( + body["id_server"], email, client_secret, send_attempt, next_link, + ) + else: + # Send password reset emails from Synapse + sid = yield self.send_password_reset( + email, client_secret, send_attempt, next_link, + ) + + # Wrap the session id in a JSON object + ret = {"sid": sid} + defer.returnValue((200, ret)) + @defer.inlineCallbacks + def send_password_reset( + self, + email, + client_secret, + send_attempt, + next_link=None, + ): + """Send a password reset email + + Args: + email (str): The user's email address + client_secret (str): The provided client secret + send_attempt (int): Which send attempt this is + + Returns: + The new session_id upon success + + Raises: + SynapseError is an error occurred when sending the email + """ + # Check that this email/client_secret/send_attempt combo is new or + # greater than what we've seen previously + session = yield self.datastore.get_threepid_validation_session( + "email", client_secret, address=email, validated=False, + ) + + # Check to see if a session already exists and that it is not yet + # marked as validated + if session and session.get("validated_at") is None: + session_id = session['session_id'] + last_send_attempt = session['last_send_attempt'] + + # Check that the send_attempt is higher than previous attempts + if send_attempt <= last_send_attempt: + # If not, just return a success without sending an email + defer.returnValue(session_id) + else: + # An non-validated session does not exist yet. + # Generate a session id + session_id = random_string(16) + + # Generate a new validation token + token = random_string(32) + + # Send the mail with the link containing the token, client_secret + # and session_id + try: + yield self.mailer.send_password_reset_mail( + email, token, client_secret, session_id, + ) + except Exception: + logger.exception( + "Error sending a password reset email to %s", email, + ) + raise SynapseError( + 500, "An error was encountered when sending the password reset email" + ) + + token_expires = (self.hs.clock.time_msec() + + self.config.email_validation_token_lifetime) + + yield self.datastore.start_or_continue_validation_session( + "email", email, session_id, client_secret, send_attempt, + next_link, token, token_expires, + ) + + defer.returnValue(session_id) + class MsisdnPasswordRequestTokenRestServlet(RestServlet): PATTERNS = client_patterns("/account/password/msisdn/requestToken$") @@ -80,6 +196,9 @@ def __init__(self, hs): @defer.inlineCallbacks def on_POST(self, request): + if not self.config.email_password_reset_behaviour == "off": + raise SynapseError(400, "Password resets have been disabled on this server") + body = parse_json_object_from_request(request) assert_params_in_dict(body, [ @@ -107,6 +226,118 @@ def on_POST(self, request): defer.returnValue((200, ret)) +class PasswordResetSubmitTokenServlet(RestServlet): + """Handles 3PID validation token submission""" + PATTERNS = [ + re.compile("^/_synapse/password_reset/(?P[^/]*)/submit_token/*$"), + ] + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(PasswordResetSubmitTokenServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() + self.config = hs.config + self.clock = hs.get_clock() + self.datastore = hs.get_datastore() + + @defer.inlineCallbacks + def on_GET(self, request, medium): + if medium != "email": + raise SynapseError( + 400, + "This medium is currently not supported for password resets", + ) + + sid = parse_string(request, "sid") + client_secret = parse_string(request, "client_secret") + token = parse_string(request, "token") + + # Attempt to validate a 3PID sesssion + try: + # Mark the session as valid + next_link = yield self.datastore.validate_threepid_session( + sid, + client_secret, + token, + self.clock.time_msec(), + ) + + # Perform a 302 redirect if next_link is set + if next_link: + if next_link.startswith("file:///"): + logger.warn( + "Not redirecting to next_link as it is a local file: address" + ) + else: + request.setResponseCode(302) + request.setHeader("Location", next_link) + finish_request(request) + defer.returnValue(None) + + # Otherwise show the success template + html = self.config.email_password_reset_success_html_content + request.setResponseCode(200) + except ThreepidValidationError as e: + # Show a failure page with a reason + html = self.load_jinja2_template( + self.config.email_template_dir, + self.config.email_password_reset_failure_template, + template_vars={ + "failure_reason": e.msg, + } + ) + request.setResponseCode(e.code) + + request.write(html.encode('utf-8')) + finish_request(request) + defer.returnValue(None) + + def load_jinja2_template(self, template_dir, template_filename, template_vars): + """Loads a jinja2 template with variables to insert + + Args: + template_dir (str): The directory where templates are stored + template_filename (str): The name of the template in the template_dir + template_vars (Dict): Dictionary of keys in the template + alongside their values to insert + + Returns: + str containing the contents of the rendered template + """ + loader = jinja2.FileSystemLoader(template_dir) + env = jinja2.Environment(loader=loader) + + template = env.get_template(template_filename) + return template.render(**template_vars) + + @defer.inlineCallbacks + def on_POST(self, request, medium): + if medium != "email": + raise SynapseError( + 400, + "This medium is currently not supported for password resets", + ) + + body = parse_json_object_from_request(request) + assert_params_in_dict(body, [ + 'sid', 'client_secret', 'token', + ]) + + valid, _ = yield self.datastore.validate_threepid_validation_token( + body['sid'], + body['client_secret'], + body['token'], + self.clock.time_msec(), + ) + response_code = 200 if valid else 400 + + defer.returnValue((response_code, {"success": valid})) + + class PasswordRestServlet(RestServlet): PATTERNS = client_patterns("/account/password$") @@ -144,6 +375,7 @@ def on_POST(self, request): result, params, _ = yield self.auth_handler.check_auth( [[LoginType.EMAIL_IDENTITY], [LoginType.MSISDN]], body, self.hs.get_ip_from_request(request), + password_servlet=True, ) if LoginType.EMAIL_IDENTITY in result: @@ -417,6 +649,7 @@ def on_GET(self, request): def register_servlets(hs, http_server): EmailPasswordRequestTokenRestServlet(hs).register(http_server) MsisdnPasswordRequestTokenRestServlet(hs).register(http_server) + PasswordResetSubmitTokenServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) EmailThreepidRequestTokenRestServlet(hs).register(http_server) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 52891bb9eb75..ae891aa332a4 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -588,6 +588,10 @@ def _simple_insert(self, table, values, or_ignore=False, desc="_simple_insert"): Args: table : string giving the table name values : dict of new column names and values for them + or_ignore : bool stating whether an exception should be raised + when a conflicting row already exists. If True, False will be + returned by the function instead + desc : string giving a description of the transaction Returns: bool: Whether the row was inserted or not. Only useful when @@ -1228,8 +1232,8 @@ def _simple_select_one_txn(txn, table, keyvalues, retcols, allow_none=False): ) txn.execute(select_sql, list(keyvalues.values())) - row = txn.fetchone() + if not row: if allow_none: return None diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c1711bc8bd5f..23a4baa4841d 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 54 +SCHEMA_VERSION = 55 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 4cf159ba817f..9b41cbd757ef 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -17,17 +17,20 @@ import re +from six import iterkeys from six.moves import range from twisted.internet import defer from synapse.api.constants import UserTypes -from synapse.api.errors import Codes, StoreError +from synapse.api.errors import Codes, StoreError, ThreepidValidationError from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore from synapse.types import UserID from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +THIRTY_MINUTES_IN_MS = 30 * 60 * 1000 + class RegistrationWorkerStore(SQLBaseStore): def __init__(self, db_conn, hs): @@ -422,7 +425,7 @@ def get_3pid_guest_access_token(self, medium, address): defer.returnValue(None) @defer.inlineCallbacks - def get_user_id_by_threepid(self, medium, address): + def get_user_id_by_threepid(self, medium, address, require_verified=False): """Returns user id from threepid Args: @@ -595,6 +598,11 @@ def __init__(self, db_conn, hs): "user_threepids_grandfather", self._bg_user_threepids_grandfather, ) + # Create a background job for culling expired 3PID validity tokens + hs.get_clock().looping_call( + self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS, + ) + @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id=None): """Adds an access token for the given user. @@ -963,7 +971,6 @@ def _bg_user_threepids_grandfather(self, progress, batch_size): We do this by grandfathering in existing user threepids assuming that they used one of the server configured trusted identity servers. """ - id_servers = set(self.config.trusted_third_party_id_servers) def _bg_user_threepids_grandfather_txn(txn): @@ -984,3 +991,280 @@ def _bg_user_threepids_grandfather_txn(txn): yield self._end_background_update("user_threepids_grandfather") defer.returnValue(1) + + def get_threepid_validation_session( + self, + medium, + client_secret, + address=None, + sid=None, + validated=None, + ): + """Gets a session_id and last_send_attempt (if available) for a + client_secret/medium/(address|session_id) combo + + Args: + medium (str|None): The medium of the 3PID + address (str|None): The address of the 3PID + sid (str|None): The ID of the validation session + client_secret (str|None): A unique string provided by the client to + help identify this validation attempt + validated (bool|None): Whether sessions should be filtered by + whether they have been validated already or not. None to + perform no filtering + + Returns: + deferred {str, int}|None: A dict containing the + latest session_id and send_attempt count for this 3PID. + Otherwise None if there hasn't been a previous attempt + """ + keyvalues = { + "medium": medium, + "client_secret": client_secret, + } + if address: + keyvalues["address"] = address + if sid: + keyvalues["session_id"] = sid + + assert(address or sid) + + def get_threepid_validation_session_txn(txn): + sql = """ + SELECT address, session_id, medium, client_secret, + last_send_attempt, validated_at + FROM threepid_validation_session WHERE %s + """ % (" AND ".join("%s = ?" % k for k in iterkeys(keyvalues)),) + + if validated is not None: + sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL") + + sql += " LIMIT 1" + + txn.execute(sql, list(keyvalues.values())) + rows = self.cursor_to_dict(txn) + if not rows: + return None + + return rows[0] + + return self.runInteraction( + "get_threepid_validation_session", + get_threepid_validation_session_txn, + ) + + def validate_threepid_session( + self, + session_id, + client_secret, + token, + current_ts, + ): + """Attempt to validate a threepid session using a token + + Args: + session_id (str): The id of a validation session + client_secret (str): A unique string provided by the client to + help identify this validation attempt + token (str): A validation token + current_ts (int): The current unix time in milliseconds. Used for + checking token expiry status + + Returns: + deferred str|None: A str representing a link to redirect the user + to if there is one. + """ + # Insert everything into a transaction in order to run atomically + def validate_threepid_session_txn(txn): + row = self._simple_select_one_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + retcols=["client_secret", "validated_at"], + allow_none=True, + ) + + if not row: + raise ThreepidValidationError(400, "Unknown session_id") + retrieved_client_secret = row["client_secret"] + validated_at = row["validated_at"] + + if retrieved_client_secret != client_secret: + raise ThreepidValidationError( + 400, "This client_secret does not match the provided session_id", + ) + + row = self._simple_select_one_txn( + txn, + table="threepid_validation_token", + keyvalues={"session_id": session_id, "token": token}, + retcols=["expires", "next_link"], + allow_none=True, + ) + + if not row: + raise ThreepidValidationError( + 400, "Validation token not found or has expired", + ) + expires = row["expires"] + next_link = row["next_link"] + + # If the session is already validated, no need to revalidate + if validated_at: + return next_link + + if expires <= current_ts: + raise ThreepidValidationError( + 400, "This token has expired. Please request a new one", + ) + + # Looks good. Validate the session + self._simple_update_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + updatevalues={"validated_at": self.clock.time_msec()}, + ) + + return next_link + + # Return next_link if it exists + return self.runInteraction( + "validate_threepid_session_txn", + validate_threepid_session_txn, + ) + + def upsert_threepid_validation_session( + self, + medium, + address, + client_secret, + send_attempt, + session_id, + validated_at=None, + ): + """Upsert a threepid validation session + Args: + medium (str): The medium of the 3PID + address (str): The address of the 3PID + client_secret (str): A unique string provided by the client to + help identify this validation attempt + send_attempt (int): The latest send_attempt on this session + session_id (str): The id of this validation session + validated_at (int|None): The unix timestamp in milliseconds of + when the session was marked as valid + """ + insertion_values = { + "medium": medium, + "address": address, + "client_secret": client_secret, + } + + if validated_at: + insertion_values["validated_at"] = validated_at + + return self._simple_upsert( + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + values={"last_send_attempt": send_attempt}, + insertion_values=insertion_values, + desc="upsert_threepid_validation_session", + ) + + def start_or_continue_validation_session( + self, + medium, + address, + session_id, + client_secret, + send_attempt, + next_link, + token, + token_expires, + ): + """Creates a new threepid validation session if it does not already + exist and associates a new validation token with it + + Args: + medium (str): The medium of the 3PID + address (str): The address of the 3PID + session_id (str): The id of this validation session + client_secret (str): A unique string provided by the client to + help identify this validation attempt + send_attempt (int): The latest send_attempt on this session + next_link (str|None): The link to redirect the user to upon + successful validation + token (str): The validation token + token_expires (int): The timestamp for which after the token + will no longer be valid + """ + def start_or_continue_validation_session_txn(txn): + # Create or update a validation session + self._simple_upsert_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + values={"last_send_attempt": send_attempt}, + insertion_values={ + "medium": medium, + "address": address, + "client_secret": client_secret, + }, + ) + + # Create a new validation token with this session ID + self._simple_insert_txn( + txn, + table="threepid_validation_token", + values={ + "session_id": session_id, + "token": token, + "next_link": next_link, + "expires": token_expires, + }, + ) + + return self.runInteraction( + "start_or_continue_validation_session", + start_or_continue_validation_session_txn, + ) + + def cull_expired_threepid_validation_tokens(self): + """Remove threepid validation tokens with expiry dates that have passed""" + def cull_expired_threepid_validation_tokens_txn(txn, ts): + sql = """ + DELETE FROM threepid_validation_token WHERE + expires < ? + """ + return txn.execute(sql, (ts,)) + + return self.runInteraction( + "cull_expired_threepid_validation_tokens", + cull_expired_threepid_validation_tokens_txn, + self.clock.time_msec(), + ) + + def delete_threepid_session(self, session_id): + """Removes a threepid validation session from the database. This can + be done after validation has been performed and whatever action was + waiting on it has been carried out + + Args: + session_id (str): The ID of the session to delete + """ + def delete_threepid_session_txn(txn): + self._simple_delete_txn( + txn, + table="threepid_validation_token", + keyvalues={"session_id": session_id}, + ) + self._simple_delete_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + ) + + return self.runInteraction( + "delete_threepid_session", + delete_threepid_session_txn, + ) diff --git a/synapse/storage/schema/delta/55/track_threepid_validations.sql b/synapse/storage/schema/delta/55/track_threepid_validations.sql new file mode 100644 index 000000000000..a8eced2e0a5d --- /dev/null +++ b/synapse/storage/schema/delta/55/track_threepid_validations.sql @@ -0,0 +1,31 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS threepid_validation_session ( + session_id TEXT PRIMARY KEY, + medium TEXT NOT NULL, + address TEXT NOT NULL, + client_secret TEXT NOT NULL, + last_send_attempt BIGINT NOT NULL, + validated_at BIGINT +); + +CREATE TABLE IF NOT EXISTS threepid_validation_token ( + token TEXT PRIMARY KEY, + session_id TEXT NOT NULL, + next_link TEXT, + expires BIGINT NOT NULL +); + +CREATE INDEX threepid_validation_token_session_id ON threepid_validation_token(session_id); diff --git a/tests/utils.py b/tests/utils.py index 200c1ceabe0f..b2817cf22c4e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -131,7 +131,6 @@ def default_config(name, parse=False): "password_providers": [], "worker_replication_url": "", "worker_app": None, - "email_enable_notifs": False, "block_non_admin_invites": False, "federation_domain_whitelist": None, "filter_timeline_limit": 5000, From ed872db8df61f5ee019bdfdb68e1e83e9e2b7298 Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 7 Jun 2019 02:53:47 +1000 Subject: [PATCH 153/231] fix maybe --- .../full_schemas/54/stream_positions.sql | 22 ++----------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql index 575ab6b354ab..c265fd20e250 100644 --- a/synapse/storage/schema/full_schemas/54/stream_positions.sql +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -2,24 +2,6 @@ INSERT INTO appservice_stream_position (stream_ordering) SELECT COALESCE(MAX(stream_ordering), 0) FROM events; INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', -1); INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; -INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); -INSERT INTO stats_stream_pos (stream_id) VALUES (null); +INSERT INTO user_directory_stream_pos (stream_id) VALUES (0); +INSERT INTO stats_stream_pos (stream_id) VALUES (0); INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); - ---- User dir population - --- Set up staging tables -INSERT INTO background_updates (update_name, progress_json) VALUES - ('populate_user_directory_createtables', '{}'); - --- Run through each room and update the user directory according to who is in it -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'); - --- Insert all users, if search_all_users is on -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'); - --- Clean up staging tables -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'); From 8acde3dc4749be714beb79a61298f430d7c4e701 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Jun 2019 18:00:06 +0100 Subject: [PATCH 154/231] remove bloat --- INSTALL.md | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index d3a450f40f57..a1ff91a98eef 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -406,20 +406,11 @@ this is required to support the 'password reset' feature. To configure an SMTP server for Synapse, modify the configuration section headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port`` and ``notif_from`` fields filled out. You may also need to set ``smtp_user``, -``smtp_pass``, and ``require_transport_security``.. +``smtp_pass``, and ``require_transport_security``. If Synapse is not configured with an SMTP server, password reset via email will be disabled by default. -Alternatively it is possible delegate the sending of email to the server's -identity server. Doing so is convenient but not recommended, since a malicious -or compromised identity server could theoretically hijack a given user's -account by redirecting mail. - -If you are absolutely certain that you wish to use the server's identity server -for password resets, set ``trust_identity_server_for_password_resets`` to -``true`` under the ``email:`` configuration section. - ## Registering a user You will need at least one user on your server in order to use a Matrix From a11865016e4fc8f691ce94ec25e8f40290df8329 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Jun 2019 20:13:47 +0100 Subject: [PATCH 155/231] Set default room version to v4. (#5379) Set default room version to v4. --- changelog.d/5379.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- tests/storage/test_cleanup_extrems.py | 6 ++++++ tests/utils.py | 3 ++- 5 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5379.feature diff --git a/changelog.d/5379.feature b/changelog.d/5379.feature new file mode 100644 index 000000000000..7b64786fe679 --- /dev/null +++ b/changelog.d/5379.feature @@ -0,0 +1 @@ +Set default room version to v4. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ea73306fb995..4d7e6f3eb5ac 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -91,7 +91,7 @@ pid_file: DATADIR/homeserver.pid # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "1" +#default_room_version: "4" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index 334921d42104..7d56e2d141a7 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -36,7 +36,7 @@ # in the list. DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0'] -DEFAULT_ROOM_VERSION = "1" +DEFAULT_ROOM_VERSION = "4" class ServerConfig(Config): diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 6dda66ecd3b9..6aa8b8b3c679 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -25,6 +25,11 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): """Test the background update to clean forward extremities table. """ + def make_homeserver(self, reactor, clock): + # Hack until we understand why test_forked_graph_cleanup fails with v4 + config = self.default_config() + config['default_room_version'] = '1' + return self.setup_test_homeserver(config=config) def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() @@ -220,6 +225,7 @@ def test_forked_graph_cleanup(self): Where SF* are soft failed, and with them A, B and C marked as extremities. This should resolve to B and C being marked as extremity. """ + # Create the room graph event_id_a = self.create_and_send_event() event_id_b = self.create_and_send_event() diff --git a/tests/utils.py b/tests/utils.py index b2817cf22c4e..f8c7ad2604e0 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -31,6 +31,7 @@ from synapse.api.errors import CodeMessageException, cs_error from synapse.api.room_versions import RoomVersions from synapse.config.homeserver import HomeServerConfig +from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.federation.transport import server as federation_server from synapse.http.server import HttpServer from synapse.server import HomeServer @@ -173,7 +174,7 @@ def default_config(name, parse=False): "use_frozen_dicts": False, # We need a sane default_room_version, otherwise attempts to create # rooms will fail. - "default_room_version": "1", + "default_room_version": DEFAULT_ROOM_VERSION, # disable user directory updates, because they get done in the # background, which upsets the test runner. "update_user_directory": False, From 2d1d7b7e6f2bec3b96b0d23993369ce46aad4f32 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 6 Jun 2019 23:54:00 +0100 Subject: [PATCH 156/231] Prevent multiple device list updates from breaking a batch send (#5156) fixes #5153 --- changelog.d/5156.bugfix | 1 + .../sender/per_destination_queue.py | 5 +- synapse/storage/devices.py | 152 ++++++++++++++---- tests/storage/test_devices.py | 69 ++++++++ 4 files changed, 196 insertions(+), 31 deletions(-) create mode 100644 changelog.d/5156.bugfix diff --git a/changelog.d/5156.bugfix b/changelog.d/5156.bugfix new file mode 100644 index 000000000000..e8aa7d8241c1 --- /dev/null +++ b/changelog.d/5156.bugfix @@ -0,0 +1 @@ +Prevent federation device list updates breaking when processing multiple updates at once. \ No newline at end of file diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index fae8bea392b3..564c57203d33 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -349,9 +349,10 @@ def _pop_pending_edus(self, limit): @defer.inlineCallbacks def _get_new_device_messages(self, limit): last_device_list = self._last_device_list_stream_id - # Will return at most 20 entries + + # Retrieve list of new device updates to send to the destination now_stream_id, results = yield self._store.get_devices_by_remote( - self._destination, last_device_list + self._destination, last_device_list, limit=limit, ) edus = [ Edu( diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index fd869b934c7b..d102e07372cc 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from six import iteritems, itervalues +from six import iteritems from canonicaljson import json @@ -72,11 +72,14 @@ def get_devices_by_user(self, user_id): defer.returnValue({d["device_id"]: d for d in devices}) - def get_devices_by_remote(self, destination, from_stream_id): + @defer.inlineCallbacks + def get_devices_by_remote(self, destination, from_stream_id, limit): """Get stream of updates to send to remote servers Returns: - (int, list[dict]): current stream id and list of updates + Deferred[tuple[int, list[dict]]]: + current stream id (ie, the stream id of the last update included in the + response), and the list of updates """ now_stream_id = self._device_list_id_gen.get_current_token() @@ -84,55 +87,131 @@ def get_devices_by_remote(self, destination, from_stream_id): destination, int(from_stream_id) ) if not has_changed: - return (now_stream_id, []) - - return self.runInteraction( + defer.returnValue((now_stream_id, [])) + + # We retrieve n+1 devices from the list of outbound pokes where n is + # our outbound device update limit. We then check if the very last + # device has the same stream_id as the second-to-last device. If so, + # then we ignore all devices with that stream_id and only send the + # devices with a lower stream_id. + # + # If when culling the list we end up with no devices afterwards, we + # consider the device update to be too large, and simply skip the + # stream_id; the rationale being that such a large device list update + # is likely an error. + updates = yield self.runInteraction( "get_devices_by_remote", self._get_devices_by_remote_txn, destination, from_stream_id, now_stream_id, + limit + 1, ) + # Return an empty list if there are no updates + if not updates: + defer.returnValue((now_stream_id, [])) + + # if we have exceeded the limit, we need to exclude any results with the + # same stream_id as the last row. + if len(updates) > limit: + stream_id_cutoff = updates[-1][2] + now_stream_id = stream_id_cutoff - 1 + else: + stream_id_cutoff = None + + # Perform the equivalent of a GROUP BY + # + # Iterate through the updates list and copy non-duplicate + # (user_id, device_id) entries into a map, with the value being + # the max stream_id across each set of duplicate entries + # + # maps (user_id, device_id) -> stream_id + # as long as their stream_id does not match that of the last row + query_map = {} + for update in updates: + if stream_id_cutoff is not None and update[2] >= stream_id_cutoff: + # Stop processing updates + break + + key = (update[0], update[1]) + query_map[key] = max(query_map.get(key, 0), update[2]) + + # If we didn't find any updates with a stream_id lower than the cutoff, it + # means that there are more than limit updates all of which have the same + # steam_id. + + # That should only happen if a client is spamming the server with new + # devices, in which case E2E isn't going to work well anyway. We'll just + # skip that stream_id and return an empty list, and continue with the next + # stream_id next time. + if not query_map: + defer.returnValue((stream_id_cutoff, [])) + + results = yield self._get_device_update_edus_by_remote( + destination, + from_stream_id, + query_map, + ) + + defer.returnValue((now_stream_id, results)) + def _get_devices_by_remote_txn( - self, txn, destination, from_stream_id, now_stream_id + self, txn, destination, from_stream_id, now_stream_id, limit ): + """Return device update information for a given remote destination + + Args: + txn (LoggingTransaction): The transaction to execute + destination (str): The host the device updates are intended for + from_stream_id (int): The minimum stream_id to filter updates by, exclusive + now_stream_id (int): The maximum stream_id to filter updates by, inclusive + limit (int): Maximum number of device updates to return + + Returns: + List: List of device updates + """ sql = """ - SELECT user_id, device_id, max(stream_id) FROM device_lists_outbound_pokes + SELECT user_id, device_id, stream_id FROM device_lists_outbound_pokes WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ? - GROUP BY user_id, device_id - LIMIT 20 + ORDER BY stream_id + LIMIT ? """ - txn.execute(sql, (destination, from_stream_id, now_stream_id, False)) + txn.execute(sql, (destination, from_stream_id, now_stream_id, False, limit)) - # maps (user_id, device_id) -> stream_id - query_map = {(r[0], r[1]): r[2] for r in txn} - if not query_map: - return (now_stream_id, []) + return list(txn) - if len(query_map) >= 20: - now_stream_id = max(stream_id for stream_id in itervalues(query_map)) + @defer.inlineCallbacks + def _get_device_update_edus_by_remote( + self, destination, from_stream_id, query_map, + ): + """Returns a list of device update EDUs as well as E2EE keys - devices = self._get_e2e_device_keys_txn( - txn, + Args: + destination (str): The host the device updates are intended for + from_stream_id (int): The minimum stream_id to filter updates by, exclusive + query_map (Dict[(str, str): int]): Dictionary mapping + user_id/device_id to update stream_id + + Returns: + List[Dict]: List of objects representing an device update EDU + + """ + devices = yield self.runInteraction( + "_get_e2e_device_keys_txn", + self._get_e2e_device_keys_txn, query_map.keys(), include_all_devices=True, include_deleted_devices=True, ) - prev_sent_id_sql = """ - SELECT coalesce(max(stream_id), 0) as stream_id - FROM device_lists_outbound_last_success - WHERE destination = ? AND user_id = ? AND stream_id <= ? - """ - results = [] for user_id, user_devices in iteritems(devices): # The prev_id for the first row is always the last row before # `from_stream_id` - txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id)) - rows = txn.fetchall() - prev_id = rows[0][0] + prev_id = yield self._get_last_device_update_for_remote_user( + destination, user_id, from_stream_id, + ) for device_id, device in iteritems(user_devices): stream_id = query_map[(user_id, device_id)] result = { @@ -156,7 +235,22 @@ def _get_devices_by_remote_txn( results.append(result) - return (now_stream_id, results) + defer.returnValue(results) + + def _get_last_device_update_for_remote_user( + self, destination, user_id, from_stream_id, + ): + def f(txn): + prev_sent_id_sql = """ + SELECT coalesce(max(stream_id), 0) as stream_id + FROM device_lists_outbound_last_success + WHERE destination = ? AND user_id = ? AND stream_id <= ? + """ + txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id)) + rows = txn.fetchall() + return rows[0][0] + + return self.runInteraction("get_last_device_update_for_remote_user", f) def mark_as_sent_devices_by_remote(self, destination, stream_id): """Mark that updates have successfully been sent to the destination. diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index aef4dfaf57a0..6396ccddb52b 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -71,6 +71,75 @@ def test_get_devices_by_user(self): res["device2"], ) + @defer.inlineCallbacks + def test_get_devices_by_remote(self): + device_ids = ["device_id1", "device_id2"] + + # Add two device updates with a single stream_id + yield self.store.add_device_change_to_streams( + "user_id", device_ids, ["somehost"], + ) + + # Get all device updates ever meant for this remote + now_stream_id, device_updates = yield self.store.get_devices_by_remote( + "somehost", -1, limit=100, + ) + + # Check original device_ids are contained within these updates + self._check_devices_in_updates(device_ids, device_updates) + + @defer.inlineCallbacks + def test_get_devices_by_remote_limited(self): + # Test breaking the update limit in 1, 101, and 1 device_id segments + + # first add one device + device_ids1 = ["device_id0"] + yield self.store.add_device_change_to_streams( + "user_id", device_ids1, ["someotherhost"], + ) + + # then add 101 + device_ids2 = ["device_id" + str(i + 1) for i in range(101)] + yield self.store.add_device_change_to_streams( + "user_id", device_ids2, ["someotherhost"], + ) + + # then one more + device_ids3 = ["newdevice"] + yield self.store.add_device_change_to_streams( + "user_id", device_ids3, ["someotherhost"], + ) + + # + # now read them back. + # + + # first we should get a single update + now_stream_id, device_updates = yield self.store.get_devices_by_remote( + "someotherhost", -1, limit=100, + ) + self._check_devices_in_updates(device_ids1, device_updates) + + # Then we should get an empty list back as the 101 devices broke the limit + now_stream_id, device_updates = yield self.store.get_devices_by_remote( + "someotherhost", now_stream_id, limit=100, + ) + self.assertEqual(len(device_updates), 0) + + # The 101 devices should've been cleared, so we should now just get one device + # update + now_stream_id, device_updates = yield self.store.get_devices_by_remote( + "someotherhost", now_stream_id, limit=100, + ) + self._check_devices_in_updates(device_ids3, device_updates) + + def _check_devices_in_updates(self, expected_device_ids, device_updates): + """Check that an specific device ids exist in a list of device update EDUs""" + self.assertEqual(len(device_updates), len(expected_device_ids)) + + received_device_ids = {update["device_id"] for update in device_updates} + self.assertEqual(received_device_ids, set(expected_device_ids)) + @defer.inlineCallbacks def test_update_device(self): yield self.store.store_device("user_id", "device_id", "display_name 1") From 4f581faa98b162b9949030a167e9a71f81e5e915 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 7 Jun 2019 00:20:17 +0100 Subject: [PATCH 157/231] Automatically retry builds when a buildkite agent is lost (#5380) Sometimes the build agents get lost or die (error codes -1 and 2). Retry automatically a maximum of 2 times if this happens. Error code reference: * -1: Agent was lost * 0: Build successful * 1: There was an error in your code * 2: The build stopped abruptly * 255: The build was cancelled --- .buildkite/pipeline.yml | 66 +++++++++++++++++++++++++++++++++++++++++ changelog.d/5380.misc | 1 + 2 files changed, 67 insertions(+) create mode 100644 changelog.d/5380.misc diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index b805b2d83909..719f22b4e107 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -56,6 +56,12 @@ steps: - docker#v3.0.1: image: "python:2.7" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - command: - "python -m pip install tox" @@ -67,6 +73,12 @@ steps: - docker#v3.0.1: image: "python:3.5" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - command: - "python -m pip install tox" @@ -78,6 +90,12 @@ steps: - docker#v3.0.1: image: "python:3.6" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - command: - "python -m pip install tox" @@ -89,6 +107,12 @@ steps: - docker#v3.0.1: image: "python:3.7" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - command: - "python -m pip install tox" @@ -100,6 +124,12 @@ steps: - docker#v3.0.1: image: "python:2.7" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 2.7 / :postgres: 9.4" env: @@ -111,6 +141,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py27.pg94.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 2.7 / :postgres: 9.5" env: @@ -122,6 +158,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py27.pg95.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 3.5 / :postgres: 9.4" env: @@ -133,6 +175,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py35.pg94.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 3.5 / :postgres: 9.5" env: @@ -144,6 +192,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py35.pg95.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 3.7 / :postgres: 9.5" env: @@ -155,6 +209,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py37.pg95.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 3.7 / :postgres: 11" env: @@ -166,3 +226,9 @@ steps: run: testenv config: - .buildkite/docker-compose.py37.pg11.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 diff --git a/changelog.d/5380.misc b/changelog.d/5380.misc new file mode 100644 index 000000000000..099bba414cf4 --- /dev/null +++ b/changelog.d/5380.misc @@ -0,0 +1 @@ +Automatically retry buildkite builds (max twice) when an agent is lost. From 7c455a86bc6827687ec1c232a8c1621e528a6d78 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 7 Jun 2019 10:29:32 +0100 Subject: [PATCH 158/231] 1.0.0rc1 --- CHANGES.md | 83 ++++++++++++++++++++++++++++++++++++++++ changelog.d/4338.feature | 1 - changelog.d/5089.bugfix | 1 - changelog.d/5156.bugfix | 1 - changelog.d/5200.bugfix | 1 - changelog.d/5216.misc | 1 - changelog.d/5220.feature | 1 - changelog.d/5221.bugfix | 1 - changelog.d/5223.feature | 1 - changelog.d/5226.misc | 1 - changelog.d/5227.misc | 1 - changelog.d/5230.misc | 1 - changelog.d/5232.misc | 1 - changelog.d/5233.bugfix | 1 - changelog.d/5234.misc | 1 - changelog.d/5235.misc | 1 - changelog.d/5236.misc | 1 - changelog.d/5237.misc | 1 - changelog.d/5244.misc | 1 - changelog.d/5249.feature | 1 - changelog.d/5250.misc | 1 - changelog.d/5251.bugfix | 1 - changelog.d/5256.bugfix | 1 - changelog.d/5257.bugfix | 1 - changelog.d/5258.bugfix | 1 - changelog.d/5260.feature | 1 - changelog.d/5268.bugfix | 1 - changelog.d/5274.bugfix | 1 - changelog.d/5275.bugfix | 1 - changelog.d/5276.feature | 1 - changelog.d/5277.bugfix | 1 - changelog.d/5278.bugfix | 1 - changelog.d/5282.doc | 1 - changelog.d/5283.misc | 1 - changelog.d/5284.misc | 1 - changelog.d/5286.feature | 1 - changelog.d/5287.misc | 1 - changelog.d/5288.misc | 1 - changelog.d/5291.bugfix | 1 - changelog.d/5293.bugfix | 1 - changelog.d/5294.bugfix | 1 - changelog.d/5296.misc | 1 - changelog.d/5299.misc | 1 - changelog.d/5300.bugfix | 1 - changelog.d/5303.misc | 1 - changelog.d/5307.bugfix | 1 - changelog.d/5309.bugfix | 1 - changelog.d/5317.bugfix | 1 - changelog.d/5320.misc | 1 - changelog.d/5321.bugfix | 1 - changelog.d/5324.feature | 1 - changelog.d/5328.misc | 1 - changelog.d/5332.misc | 1 - changelog.d/5333.bugfix | 1 - changelog.d/5334.bugfix | 1 - changelog.d/5335.bugfix | 1 - changelog.d/5340.bugfix | 2 - changelog.d/5341.bugfix | 1 - changelog.d/5342.bugfix | 1 - changelog.d/5343.misc | 1 - changelog.d/5344.misc | 1 - changelog.d/5347.misc | 1 - changelog.d/5348.bugfix | 1 - changelog.d/5352.bugfix | 1 - changelog.d/5353.misc | 2 - changelog.d/5354.bugfix | 2 - changelog.d/5355.bugfix | 1 - changelog.d/5356.misc | 1 - changelog.d/5357.doc | 1 - changelog.d/5359.feature | 1 - changelog.d/5360.feature | 1 - changelog.d/5361.feature | 1 - changelog.d/5362.bugfix | 1 - changelog.d/5369.bugfix | 1 - changelog.d/5370.misc | 1 - changelog.d/5371.feature | 1 - changelog.d/5374.feature | 1 - changelog.d/5377.feature | 1 - changelog.d/5379.feature | 1 - changelog.d/5380.misc | 1 - synapse/__init__.py | 2 +- 81 files changed, 84 insertions(+), 83 deletions(-) delete mode 100644 changelog.d/4338.feature delete mode 100644 changelog.d/5089.bugfix delete mode 100644 changelog.d/5156.bugfix delete mode 100644 changelog.d/5200.bugfix delete mode 100644 changelog.d/5216.misc delete mode 100644 changelog.d/5220.feature delete mode 100644 changelog.d/5221.bugfix delete mode 100644 changelog.d/5223.feature delete mode 100644 changelog.d/5226.misc delete mode 100644 changelog.d/5227.misc delete mode 100644 changelog.d/5230.misc delete mode 100644 changelog.d/5232.misc delete mode 100644 changelog.d/5233.bugfix delete mode 100644 changelog.d/5234.misc delete mode 100644 changelog.d/5235.misc delete mode 100644 changelog.d/5236.misc delete mode 100644 changelog.d/5237.misc delete mode 100644 changelog.d/5244.misc delete mode 100644 changelog.d/5249.feature delete mode 100644 changelog.d/5250.misc delete mode 100644 changelog.d/5251.bugfix delete mode 100644 changelog.d/5256.bugfix delete mode 100644 changelog.d/5257.bugfix delete mode 100644 changelog.d/5258.bugfix delete mode 100644 changelog.d/5260.feature delete mode 100644 changelog.d/5268.bugfix delete mode 100644 changelog.d/5274.bugfix delete mode 100644 changelog.d/5275.bugfix delete mode 100644 changelog.d/5276.feature delete mode 100644 changelog.d/5277.bugfix delete mode 100644 changelog.d/5278.bugfix delete mode 100644 changelog.d/5282.doc delete mode 100644 changelog.d/5283.misc delete mode 100644 changelog.d/5284.misc delete mode 100644 changelog.d/5286.feature delete mode 100644 changelog.d/5287.misc delete mode 100644 changelog.d/5288.misc delete mode 100644 changelog.d/5291.bugfix delete mode 100644 changelog.d/5293.bugfix delete mode 100644 changelog.d/5294.bugfix delete mode 100644 changelog.d/5296.misc delete mode 100644 changelog.d/5299.misc delete mode 100644 changelog.d/5300.bugfix delete mode 100644 changelog.d/5303.misc delete mode 100644 changelog.d/5307.bugfix delete mode 100644 changelog.d/5309.bugfix delete mode 100644 changelog.d/5317.bugfix delete mode 100644 changelog.d/5320.misc delete mode 100644 changelog.d/5321.bugfix delete mode 100644 changelog.d/5324.feature delete mode 100644 changelog.d/5328.misc delete mode 100644 changelog.d/5332.misc delete mode 100644 changelog.d/5333.bugfix delete mode 100644 changelog.d/5334.bugfix delete mode 100644 changelog.d/5335.bugfix delete mode 100644 changelog.d/5340.bugfix delete mode 100644 changelog.d/5341.bugfix delete mode 100644 changelog.d/5342.bugfix delete mode 100644 changelog.d/5343.misc delete mode 100644 changelog.d/5344.misc delete mode 100644 changelog.d/5347.misc delete mode 100644 changelog.d/5348.bugfix delete mode 100644 changelog.d/5352.bugfix delete mode 100644 changelog.d/5353.misc delete mode 100644 changelog.d/5354.bugfix delete mode 100644 changelog.d/5355.bugfix delete mode 100644 changelog.d/5356.misc delete mode 100644 changelog.d/5357.doc delete mode 100644 changelog.d/5359.feature delete mode 100644 changelog.d/5360.feature delete mode 100644 changelog.d/5361.feature delete mode 100644 changelog.d/5362.bugfix delete mode 100644 changelog.d/5369.bugfix delete mode 100644 changelog.d/5370.misc delete mode 100644 changelog.d/5371.feature delete mode 100644 changelog.d/5374.feature delete mode 100644 changelog.d/5377.feature delete mode 100644 changelog.d/5379.feature delete mode 100644 changelog.d/5380.misc diff --git a/CHANGES.md b/CHANGES.md index 0ffdf1aaef75..4dea0f6319ae 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,86 @@ +Synapse 1.0.0rc1 (2019-06-07) +============================= + +Features +-------- + +- Synapse now more efficiently collates room statistics. ([\#4338](https://github.com/matrix-org/synapse/issues/4338), [\#5260](https://github.com/matrix-org/synapse/issues/5260), [\#5324](https://github.com/matrix-org/synapse/issues/5324)) +- Add experimental support for relations (aka reactions and edits). ([\#5220](https://github.com/matrix-org/synapse/issues/5220)) +- Ability to configure default room version. ([\#5223](https://github.com/matrix-org/synapse/issues/5223), [\#5249](https://github.com/matrix-org/synapse/issues/5249)) +- Allow configuring a range for the account validity startup job. ([\#5276](https://github.com/matrix-org/synapse/issues/5276)) +- CAS login will now hit the r0 API, not the deprecated v1 one. ([\#5286](https://github.com/matrix-org/synapse/issues/5286)) +- Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). ([\#5359](https://github.com/matrix-org/synapse/issues/5359)) +- Update /_matrix/client/versions to reference support for r0.5.0. ([\#5360](https://github.com/matrix-org/synapse/issues/5360)) +- Add a script to generate new signing-key files. ([\#5361](https://github.com/matrix-org/synapse/issues/5361)) +- Update upgrade and installation guides ahead of 1.0. ([\#5371](https://github.com/matrix-org/synapse/issues/5371)) +- Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). ([\#5374](https://github.com/matrix-org/synapse/issues/5374)) +- Add ability to perform password reset via email without trusting the identity server. ([\#5377](https://github.com/matrix-org/synapse/issues/5377)) +- Set default room version to v4. ([\#5379](https://github.com/matrix-org/synapse/issues/5379)) + + +Bugfixes +-------- + +- Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! ([\#5089](https://github.com/matrix-org/synapse/issues/5089)) +- Prevent federation device list updates breaking when processing multiple updates at once. ([\#5156](https://github.com/matrix-org/synapse/issues/5156)) +- Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. ([\#5200](https://github.com/matrix-org/synapse/issues/5200)) +- Fix race when backfilling in rooms with worker mode. ([\#5221](https://github.com/matrix-org/synapse/issues/5221)) +- Fix appservice timestamp massaging. ([\#5233](https://github.com/matrix-org/synapse/issues/5233)) +- Ensure that server_keys fetched via a notary server are correctly signed. ([\#5251](https://github.com/matrix-org/synapse/issues/5251)) +- Show the correct error when logging out and access token is missing. ([\#5256](https://github.com/matrix-org/synapse/issues/5256)) +- Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms ([\#5257](https://github.com/matrix-org/synapse/issues/5257)) +- Fix error when downloading thumbnail with missing width/height parameter. ([\#5258](https://github.com/matrix-org/synapse/issues/5258)) +- Fix schema update for account validity. ([\#5268](https://github.com/matrix-org/synapse/issues/5268)) +- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291)) +- Fix "db txn 'update_presence' from sentinel context" log messages. ([\#5275](https://github.com/matrix-org/synapse/issues/5275)) +- Fix dropped logcontexts during high outbound traffic. ([\#5277](https://github.com/matrix-org/synapse/issues/5277)) +- Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. ([\#5293](https://github.com/matrix-org/synapse/issues/5293)) +- Fix performance problems with the rooms stats background update. ([\#5294](https://github.com/matrix-org/synapse/issues/5294)) +- Fix noisy 'no key for server' logs. ([\#5300](https://github.com/matrix-org/synapse/issues/5300)) +- Fix bug where a notary server would sometimes forget old keys. ([\#5307](https://github.com/matrix-org/synapse/issues/5307)) +- Prevent users from setting huge displaynames and avatar URLs. ([\#5309](https://github.com/matrix-org/synapse/issues/5309)) +- Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. ([\#5317](https://github.com/matrix-org/synapse/issues/5317)) +- Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. ([\#5321](https://github.com/matrix-org/synapse/issues/5321)) +- Fix various problems which made the signing-key notary server time out for some requests. ([\#5333](https://github.com/matrix-org/synapse/issues/5333)) +- Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. ([\#5334](https://github.com/matrix-org/synapse/issues/5334)) +- Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. ([\#5335](https://github.com/matrix-org/synapse/issues/5335), [\#5340](https://github.com/matrix-org/synapse/issues/5340)) +- Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. ([\#5341](https://github.com/matrix-org/synapse/issues/5341)) +- Fix failure when fetching batches of events during backfill, etc. ([\#5342](https://github.com/matrix-org/synapse/issues/5342)) +- Add a new room version where the timestamps on events are checked against the validity periods on signing keys. ([\#5348](https://github.com/matrix-org/synapse/issues/5348), [\#5354](https://github.com/matrix-org/synapse/issues/5354)) +- Fix room stats and presence background updates to correctly handle missing events. ([\#5352](https://github.com/matrix-org/synapse/issues/5352)) +- Include left members in room summaries' heroes. ([\#5355](https://github.com/matrix-org/synapse/issues/5355)) +- Fix `federation_custom_ca_list` configuration option. ([\#5362](https://github.com/matrix-org/synapse/issues/5362)) +- Fix missing logcontext warnings on shutdown. ([\#5369](https://github.com/matrix-org/synapse/issues/5369)) + + +Improved Documentation +---------------------- + +- Fix docs on resetting the user directory. ([\#5282](https://github.com/matrix-org/synapse/issues/5282)) +- Fix notes about ACME in the MSC1711 faq. ([\#5357](https://github.com/matrix-org/synapse/issues/5357)) + + +Internal Changes +---------------- + +- Synapse will now serve the experimental "room complexity" API endpoint. ([\#5216](https://github.com/matrix-org/synapse/issues/5216)) +- The base classes for the v1 and v2_alpha REST APIs have been unified. ([\#5226](https://github.com/matrix-org/synapse/issues/5226), [\#5328](https://github.com/matrix-org/synapse/issues/5328)) +- Simplifications and comments in do_auth. ([\#5227](https://github.com/matrix-org/synapse/issues/5227)) +- Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. ([\#5230](https://github.com/matrix-org/synapse/issues/5230)) +- Preparatory work for key-validity features. ([\#5232](https://github.com/matrix-org/synapse/issues/5232), [\#5234](https://github.com/matrix-org/synapse/issues/5234), [\#5235](https://github.com/matrix-org/synapse/issues/5235), [\#5236](https://github.com/matrix-org/synapse/issues/5236), [\#5237](https://github.com/matrix-org/synapse/issues/5237), [\#5244](https://github.com/matrix-org/synapse/issues/5244), [\#5250](https://github.com/matrix-org/synapse/issues/5250), [\#5296](https://github.com/matrix-org/synapse/issues/5296), [\#5299](https://github.com/matrix-org/synapse/issues/5299), [\#5343](https://github.com/matrix-org/synapse/issues/5343), [\#5347](https://github.com/matrix-org/synapse/issues/5347), [\#5356](https://github.com/matrix-org/synapse/issues/5356)) +- Specify the type of reCAPTCHA key to use. ([\#5283](https://github.com/matrix-org/synapse/issues/5283)) +- Improve sample config for monthly active user blocking. ([\#5284](https://github.com/matrix-org/synapse/issues/5284)) +- Remove spurious debug from MatrixFederationHttpClient.get_json. ([\#5287](https://github.com/matrix-org/synapse/issues/5287)) +- Improve logging for logcontext leaks. ([\#5288](https://github.com/matrix-org/synapse/issues/5288)) +- Clarify that the admin change password API logs the user out. ([\#5303](https://github.com/matrix-org/synapse/issues/5303)) +- New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. ([\#5320](https://github.com/matrix-org/synapse/issues/5320)) +- Improve docstrings on MatrixFederationClient. ([\#5332](https://github.com/matrix-org/synapse/issues/5332)) +- Clean up FederationClient.get_events for clarity. ([\#5344](https://github.com/matrix-org/synapse/issues/5344)) +- Various improvements to debug logging. ([\#5353](https://github.com/matrix-org/synapse/issues/5353)) +- Don't run CI build checks until sample config check has passed. ([\#5370](https://github.com/matrix-org/synapse/issues/5370)) +- Automatically retry buildkite builds (max twice) when an agent is lost. ([\#5380](https://github.com/matrix-org/synapse/issues/5380)) + + Synapse 0.99.5.2 (2019-05-30) ============================= diff --git a/changelog.d/4338.feature b/changelog.d/4338.feature deleted file mode 100644 index 01285e965c84..000000000000 --- a/changelog.d/4338.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. diff --git a/changelog.d/5089.bugfix b/changelog.d/5089.bugfix deleted file mode 100644 index 68643cebb7b9..000000000000 --- a/changelog.d/5089.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! diff --git a/changelog.d/5156.bugfix b/changelog.d/5156.bugfix deleted file mode 100644 index e8aa7d8241c1..000000000000 --- a/changelog.d/5156.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent federation device list updates breaking when processing multiple updates at once. \ No newline at end of file diff --git a/changelog.d/5200.bugfix b/changelog.d/5200.bugfix deleted file mode 100644 index f346c7b0cc8d..000000000000 --- a/changelog.d/5200.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. diff --git a/changelog.d/5216.misc b/changelog.d/5216.misc deleted file mode 100644 index dbfa29475f0a..000000000000 --- a/changelog.d/5216.misc +++ /dev/null @@ -1 +0,0 @@ -Synapse will now serve the experimental "room complexity" API endpoint. diff --git a/changelog.d/5220.feature b/changelog.d/5220.feature deleted file mode 100644 index 747098c16624..000000000000 --- a/changelog.d/5220.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for relations (aka reactions and edits). diff --git a/changelog.d/5221.bugfix b/changelog.d/5221.bugfix deleted file mode 100644 index 03aa363d156c..000000000000 --- a/changelog.d/5221.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix race when backfilling in rooms with worker mode. diff --git a/changelog.d/5223.feature b/changelog.d/5223.feature deleted file mode 100644 index cfdf1ad41ba5..000000000000 --- a/changelog.d/5223.feature +++ /dev/null @@ -1 +0,0 @@ -Ability to configure default room version. diff --git a/changelog.d/5226.misc b/changelog.d/5226.misc deleted file mode 100644 index e1b9dc58a3d9..000000000000 --- a/changelog.d/5226.misc +++ /dev/null @@ -1 +0,0 @@ -The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/changelog.d/5227.misc b/changelog.d/5227.misc deleted file mode 100644 index 32bd7b60094e..000000000000 --- a/changelog.d/5227.misc +++ /dev/null @@ -1 +0,0 @@ -Simplifications and comments in do_auth. diff --git a/changelog.d/5230.misc b/changelog.d/5230.misc deleted file mode 100644 index c681bc9748c4..000000000000 --- a/changelog.d/5230.misc +++ /dev/null @@ -1 +0,0 @@ -Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. diff --git a/changelog.d/5232.misc b/changelog.d/5232.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5232.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5233.bugfix b/changelog.d/5233.bugfix deleted file mode 100644 index d71b9621602d..000000000000 --- a/changelog.d/5233.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix appservice timestamp massaging. diff --git a/changelog.d/5234.misc b/changelog.d/5234.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5234.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5235.misc b/changelog.d/5235.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5235.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5236.misc b/changelog.d/5236.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5236.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5237.misc b/changelog.d/5237.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5237.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5244.misc b/changelog.d/5244.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5244.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5249.feature b/changelog.d/5249.feature deleted file mode 100644 index cfdf1ad41ba5..000000000000 --- a/changelog.d/5249.feature +++ /dev/null @@ -1 +0,0 @@ -Ability to configure default room version. diff --git a/changelog.d/5250.misc b/changelog.d/5250.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5250.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5251.bugfix b/changelog.d/5251.bugfix deleted file mode 100644 index 9a053204b6dd..000000000000 --- a/changelog.d/5251.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ensure that server_keys fetched via a notary server are correctly signed. \ No newline at end of file diff --git a/changelog.d/5256.bugfix b/changelog.d/5256.bugfix deleted file mode 100644 index 86316ab5dd89..000000000000 --- a/changelog.d/5256.bugfix +++ /dev/null @@ -1 +0,0 @@ -Show the correct error when logging out and access token is missing. diff --git a/changelog.d/5257.bugfix b/changelog.d/5257.bugfix deleted file mode 100644 index 8334af9b9957..000000000000 --- a/changelog.d/5257.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms diff --git a/changelog.d/5258.bugfix b/changelog.d/5258.bugfix deleted file mode 100644 index fb5d44aedbf2..000000000000 --- a/changelog.d/5258.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error when downloading thumbnail with missing width/height parameter. diff --git a/changelog.d/5260.feature b/changelog.d/5260.feature deleted file mode 100644 index 01285e965c84..000000000000 --- a/changelog.d/5260.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. diff --git a/changelog.d/5268.bugfix b/changelog.d/5268.bugfix deleted file mode 100644 index 1a5a03bf0a0c..000000000000 --- a/changelog.d/5268.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix schema update for account validity. diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix deleted file mode 100644 index 9e14d20289f8..000000000000 --- a/changelog.d/5274.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5275.bugfix b/changelog.d/5275.bugfix deleted file mode 100644 index 45a554642a53..000000000000 --- a/changelog.d/5275.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix "db txn 'update_presence' from sentinel context" log messages. diff --git a/changelog.d/5276.feature b/changelog.d/5276.feature deleted file mode 100644 index 403dee0862e4..000000000000 --- a/changelog.d/5276.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuring a range for the account validity startup job. diff --git a/changelog.d/5277.bugfix b/changelog.d/5277.bugfix deleted file mode 100644 index 371aa2e7fbbb..000000000000 --- a/changelog.d/5277.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix dropped logcontexts during high outbound traffic. diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix deleted file mode 100644 index 9e14d20289f8..000000000000 --- a/changelog.d/5278.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5282.doc b/changelog.d/5282.doc deleted file mode 100644 index 350e15bc033e..000000000000 --- a/changelog.d/5282.doc +++ /dev/null @@ -1 +0,0 @@ -Fix docs on resetting the user directory. diff --git a/changelog.d/5283.misc b/changelog.d/5283.misc deleted file mode 100644 index 002721e566ef..000000000000 --- a/changelog.d/5283.misc +++ /dev/null @@ -1 +0,0 @@ -Specify the type of reCAPTCHA key to use. diff --git a/changelog.d/5284.misc b/changelog.d/5284.misc deleted file mode 100644 index c4d42ca3d9e0..000000000000 --- a/changelog.d/5284.misc +++ /dev/null @@ -1 +0,0 @@ -Improve sample config for monthly active user blocking. diff --git a/changelog.d/5286.feature b/changelog.d/5286.feature deleted file mode 100644 index 81860279a32e..000000000000 --- a/changelog.d/5286.feature +++ /dev/null @@ -1 +0,0 @@ -CAS login will now hit the r0 API, not the deprecated v1 one. diff --git a/changelog.d/5287.misc b/changelog.d/5287.misc deleted file mode 100644 index 1286f1dd08db..000000000000 --- a/changelog.d/5287.misc +++ /dev/null @@ -1 +0,0 @@ -Remove spurious debug from MatrixFederationHttpClient.get_json. diff --git a/changelog.d/5288.misc b/changelog.d/5288.misc deleted file mode 100644 index fbf049ba6aad..000000000000 --- a/changelog.d/5288.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging for logcontext leaks. diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix deleted file mode 100644 index 9e14d20289f8..000000000000 --- a/changelog.d/5291.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5293.bugfix b/changelog.d/5293.bugfix deleted file mode 100644 index aa519a8433ee..000000000000 --- a/changelog.d/5293.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. diff --git a/changelog.d/5294.bugfix b/changelog.d/5294.bugfix deleted file mode 100644 index 5924bda31971..000000000000 --- a/changelog.d/5294.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix performance problems with the rooms stats background update. diff --git a/changelog.d/5296.misc b/changelog.d/5296.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5296.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5299.misc b/changelog.d/5299.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5299.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5300.bugfix b/changelog.d/5300.bugfix deleted file mode 100644 index 049e93cd5a41..000000000000 --- a/changelog.d/5300.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix noisy 'no key for server' logs. diff --git a/changelog.d/5303.misc b/changelog.d/5303.misc deleted file mode 100644 index f6a7f1f8e37f..000000000000 --- a/changelog.d/5303.misc +++ /dev/null @@ -1 +0,0 @@ -Clarify that the admin change password API logs the user out. diff --git a/changelog.d/5307.bugfix b/changelog.d/5307.bugfix deleted file mode 100644 index 6b152f48544a..000000000000 --- a/changelog.d/5307.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where a notary server would sometimes forget old keys. diff --git a/changelog.d/5309.bugfix b/changelog.d/5309.bugfix deleted file mode 100644 index 97b35272665c..000000000000 --- a/changelog.d/5309.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent users from setting huge displaynames and avatar URLs. diff --git a/changelog.d/5317.bugfix b/changelog.d/5317.bugfix deleted file mode 100644 index 270937521493..000000000000 --- a/changelog.d/5317.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. diff --git a/changelog.d/5320.misc b/changelog.d/5320.misc deleted file mode 100644 index 5b4bf0530361..000000000000 --- a/changelog.d/5320.misc +++ /dev/null @@ -1 +0,0 @@ -New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. diff --git a/changelog.d/5321.bugfix b/changelog.d/5321.bugfix deleted file mode 100644 index 943a61956dae..000000000000 --- a/changelog.d/5321.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature deleted file mode 100644 index 01285e965c84..000000000000 --- a/changelog.d/5324.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. diff --git a/changelog.d/5328.misc b/changelog.d/5328.misc deleted file mode 100644 index e1b9dc58a3d9..000000000000 --- a/changelog.d/5328.misc +++ /dev/null @@ -1 +0,0 @@ -The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/changelog.d/5332.misc b/changelog.d/5332.misc deleted file mode 100644 index dcfac4eac9fe..000000000000 --- a/changelog.d/5332.misc +++ /dev/null @@ -1 +0,0 @@ -Improve docstrings on MatrixFederationClient. diff --git a/changelog.d/5333.bugfix b/changelog.d/5333.bugfix deleted file mode 100644 index cb05a6dd63b4..000000000000 --- a/changelog.d/5333.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix various problems which made the signing-key notary server time out for some requests. \ No newline at end of file diff --git a/changelog.d/5334.bugfix b/changelog.d/5334.bugfix deleted file mode 100644 index ed141e0918c7..000000000000 --- a/changelog.d/5334.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. diff --git a/changelog.d/5335.bugfix b/changelog.d/5335.bugfix deleted file mode 100644 index 7318cbe35e37..000000000000 --- a/changelog.d/5335.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. diff --git a/changelog.d/5340.bugfix b/changelog.d/5340.bugfix deleted file mode 100644 index 931ee904e141..000000000000 --- a/changelog.d/5340.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. - diff --git a/changelog.d/5341.bugfix b/changelog.d/5341.bugfix deleted file mode 100644 index a7aaa95f3949..000000000000 --- a/changelog.d/5341.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. diff --git a/changelog.d/5342.bugfix b/changelog.d/5342.bugfix deleted file mode 100644 index 66a3076292f6..000000000000 --- a/changelog.d/5342.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix failure when fetching batches of events during backfill, etc. diff --git a/changelog.d/5343.misc b/changelog.d/5343.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5343.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5344.misc b/changelog.d/5344.misc deleted file mode 100644 index a20c563bf170..000000000000 --- a/changelog.d/5344.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up FederationClient.get_events for clarity. diff --git a/changelog.d/5347.misc b/changelog.d/5347.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5347.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5348.bugfix b/changelog.d/5348.bugfix deleted file mode 100644 index 8d396c7990a7..000000000000 --- a/changelog.d/5348.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add a new room version where the timestamps on events are checked against the validity periods on signing keys. \ No newline at end of file diff --git a/changelog.d/5352.bugfix b/changelog.d/5352.bugfix deleted file mode 100644 index 2ffefe5a6846..000000000000 --- a/changelog.d/5352.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix room stats and presence background updates to correctly handle missing events. diff --git a/changelog.d/5353.misc b/changelog.d/5353.misc deleted file mode 100644 index 436245fb11aa..000000000000 --- a/changelog.d/5353.misc +++ /dev/null @@ -1,2 +0,0 @@ -Various improvements to debug logging. - diff --git a/changelog.d/5354.bugfix b/changelog.d/5354.bugfix deleted file mode 100644 index 0c56032b3080..000000000000 --- a/changelog.d/5354.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Add a new room version where the timestamps on events are checked against the validity periods on signing keys. - diff --git a/changelog.d/5355.bugfix b/changelog.d/5355.bugfix deleted file mode 100644 index e1955a7403dd..000000000000 --- a/changelog.d/5355.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include left members in room summaries' heroes. diff --git a/changelog.d/5356.misc b/changelog.d/5356.misc deleted file mode 100644 index 8336bc55dc48..000000000000 --- a/changelog.d/5356.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5357.doc b/changelog.d/5357.doc deleted file mode 100644 index 27cba49641ff..000000000000 --- a/changelog.d/5357.doc +++ /dev/null @@ -1 +0,0 @@ -Fix notes about ACME in the MSC1711 faq. diff --git a/changelog.d/5359.feature b/changelog.d/5359.feature deleted file mode 100644 index 2a0393983485..000000000000 --- a/changelog.d/5359.feature +++ /dev/null @@ -1 +0,0 @@ -Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). diff --git a/changelog.d/5360.feature b/changelog.d/5360.feature deleted file mode 100644 index 01fbb3b06d9e..000000000000 --- a/changelog.d/5360.feature +++ /dev/null @@ -1 +0,0 @@ -Update /_matrix/client/versions to reference support for r0.5.0. diff --git a/changelog.d/5361.feature b/changelog.d/5361.feature deleted file mode 100644 index 10768cdad3c4..000000000000 --- a/changelog.d/5361.feature +++ /dev/null @@ -1 +0,0 @@ -Add a script to generate new signing-key files. diff --git a/changelog.d/5362.bugfix b/changelog.d/5362.bugfix deleted file mode 100644 index 1c8b19182cb6..000000000000 --- a/changelog.d/5362.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `federation_custom_ca_list` configuration option. diff --git a/changelog.d/5369.bugfix b/changelog.d/5369.bugfix deleted file mode 100644 index cc61618f3958..000000000000 --- a/changelog.d/5369.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix missing logcontext warnings on shutdown. diff --git a/changelog.d/5370.misc b/changelog.d/5370.misc deleted file mode 100644 index b0473ef280d7..000000000000 --- a/changelog.d/5370.misc +++ /dev/null @@ -1 +0,0 @@ -Don't run CI build checks until sample config check has passed. diff --git a/changelog.d/5371.feature b/changelog.d/5371.feature deleted file mode 100644 index 7f960630e01a..000000000000 --- a/changelog.d/5371.feature +++ /dev/null @@ -1 +0,0 @@ -Update upgrade and installation guides ahead of 1.0. diff --git a/changelog.d/5374.feature b/changelog.d/5374.feature deleted file mode 100644 index 17937637ab8d..000000000000 --- a/changelog.d/5374.feature +++ /dev/null @@ -1 +0,0 @@ -Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). diff --git a/changelog.d/5377.feature b/changelog.d/5377.feature deleted file mode 100644 index 6aae41847a3e..000000000000 --- a/changelog.d/5377.feature +++ /dev/null @@ -1 +0,0 @@ -Add ability to perform password reset via email without trusting the identity server. diff --git a/changelog.d/5379.feature b/changelog.d/5379.feature deleted file mode 100644 index 7b64786fe679..000000000000 --- a/changelog.d/5379.feature +++ /dev/null @@ -1 +0,0 @@ -Set default room version to v4. diff --git a/changelog.d/5380.misc b/changelog.d/5380.misc deleted file mode 100644 index 099bba414cf4..000000000000 --- a/changelog.d/5380.misc +++ /dev/null @@ -1 +0,0 @@ -Automatically retry buildkite builds (max twice) when an agent is lost. diff --git a/synapse/__init__.py b/synapse/__init__.py index d0e8d7c21ba7..77a4cfc3a5f7 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ except ImportError: pass -__version__ = "0.99.5.2" +__version__ = "1.0.0rc1" From a46ef1e3a4eea55919d86da322628a0713e6ba2d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 10:29:35 +0100 Subject: [PATCH 159/231] Handle HttpResponseException when using federation client. Otherwise we just log exceptions everywhere. --- synapse/groups/attestations.py | 4 ++-- synapse/handlers/groups_local.py | 4 +--- synapse/handlers/profile.py | 29 ++++++++++++++++------------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index e5dda1975f62..cacc6026fa12 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -42,7 +42,7 @@ from twisted.internet import defer -from synapse.api.errors import RequestSendFailed, SynapseError +from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import get_domain_from_id from synapse.util.logcontext import run_in_background @@ -194,7 +194,7 @@ def _renew_attestation(group_id, user_id): yield self.store.update_attestation_renewal( group_id, user_id, attestation ) - except RequestSendFailed as e: + except (RequestSendFailed, HttpResponseException) as e: logger.warning( "Failed to renew attestation of %r in %r: %s", user_id, group_id, e, diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 02c508acece2..f60ace02e8af 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -49,9 +49,7 @@ def f(self, group_id, *args, **kwargs): def http_response_errback(failure): failure.trap(HttpResponseException) e = failure.value - if e.code == 403: - raise e.to_synapse_error() - return failure + raise e.to_synapse_error() def request_failed_errback(failure): failure.trap(RequestSendFailed) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a5fc6c5dbf82..3e0423339433 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -15,12 +15,15 @@ import logging +from six import raise_from + from twisted.internet import defer from synapse.api.errors import ( AuthError, - CodeMessageException, Codes, + HttpResponseException, + RequestSendFailed, StoreError, SynapseError, ) @@ -85,10 +88,10 @@ def get_profile(self, user_id): ignore_backoff=True, ) defer.returnValue(result) - except CodeMessageException as e: - if e.code != 404: - logger.exception("Failed to get displayname") - raise + except RequestSendFailed as e: + raise_from(SynapseError(502, "Failed to fetch profile"), e) + except HttpResponseException as e: + raise e.to_synapse_error() @defer.inlineCallbacks def get_profile_from_cache(self, user_id): @@ -142,10 +145,10 @@ def get_displayname(self, target_user): }, ignore_backoff=True, ) - except CodeMessageException as e: - if e.code != 404: - logger.exception("Failed to get displayname") - raise + except RequestSendFailed as e: + raise_from(SynapseError(502, "Failed to fetch profile"), e) + except HttpResponseException as e: + raise e.to_synapse_error() defer.returnValue(result["displayname"]) @@ -208,10 +211,10 @@ def get_avatar_url(self, target_user): }, ignore_backoff=True, ) - except CodeMessageException as e: - if e.code != 404: - logger.exception("Failed to get avatar_url") - raise + except RequestSendFailed as e: + raise_from(SynapseError(502, "Failed to fetch profile"), e) + except HttpResponseException as e: + raise e.to_synapse_error() defer.returnValue(result["avatar_url"]) From 8e0cee90d291790a088fb88e4120f84bf3420a65 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 7 Jun 2019 10:31:48 +0100 Subject: [PATCH 160/231] Add a sponsor button (#5382) Add a sponsor button with links to matrixdotorg's patreon and liberapay accounts. --- .github/FUNDING.yml | 3 +++ changelog.d/5382.misc | 1 + 2 files changed, 4 insertions(+) create mode 100644 .github/FUNDING.yml create mode 100644 changelog.d/5382.misc diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 000000000000..c21d66665ee0 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +# One username per supported platform and one custom link +patreon: matrixdotorg +custom: https://liberapay.com/matrixdotorg diff --git a/changelog.d/5382.misc b/changelog.d/5382.misc new file mode 100644 index 000000000000..060cbba2a944 --- /dev/null +++ b/changelog.d/5382.misc @@ -0,0 +1 @@ +Add a sponsor button to the repo. From a2419b27fe9df598e9c4f3236fed6c7600fc7c86 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 10:31:53 +0100 Subject: [PATCH 161/231] Newsfile --- changelog.d/5383.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5383.misc diff --git a/changelog.d/5383.misc b/changelog.d/5383.misc new file mode 100644 index 000000000000..9dd5d1df93c1 --- /dev/null +++ b/changelog.d/5383.misc @@ -0,0 +1 @@ +Don't log non-200 responses from federation queries as exceptions. From 6745b7de6d05ff99d70b2065a99a72efac10a5e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 10:47:31 +0100 Subject: [PATCH 162/231] Handle failing to talk to master over replication --- synapse/replication/http/_base.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index e81456ab2bb3..0a432a16fa5b 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -17,11 +17,17 @@ import logging import re +from six import raise_from from six.moves import urllib from twisted.internet import defer -from synapse.api.errors import CodeMessageException, HttpResponseException +from synapse.api.errors import ( + CodeMessageException, + HttpResponseException, + RequestSendFailed, + SynapseError, +) from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import random_string @@ -175,6 +181,8 @@ def send_request(**kwargs): # on the master process that we should send to the client. (And # importantly, not stack traces everywhere) raise e.to_synapse_error() + except RequestSendFailed as e: + raise_from(SynapseError(502, "Failed to talk to master"), e) defer.returnValue(result) From 928d1ccd73ddce5af99539ad800987d2f5bd2942 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 10:57:39 +0100 Subject: [PATCH 163/231] Fix email notifications for large unnamed rooms. When we try and calculate a description for a room for with no name but multiple other users we threw an exception (due to trying to subscript result of `dict.values()`). --- synapse/push/presentable_names.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index eef6e18c2e1d..0c66702325ad 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -162,6 +162,17 @@ def calculate_room_name(store, room_state_ids, user_id, fallback_to_members=True def descriptor_from_member_events(member_events): + """Get a description of the room based on the member events. + + Args: + member_events (Iterable[FrozenEvent]) + + Returns: + str + """ + + member_events = list(member_events) + if len(member_events) == 0: return "nobody" elif len(member_events) == 1: From 8182a1cfb523fb1e8d328716111e98be3a1c5c35 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 11:09:08 +0100 Subject: [PATCH 164/231] Refactor email tests --- tests/push/test_email.py | 64 ++++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 9cdde1a9bd32..62b3c2a99d8d 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -15,6 +15,7 @@ import os +import attr import pkg_resources from twisted.internet.defer import Deferred @@ -30,6 +31,13 @@ load_jinja2_templates = None +@attr.s +class _User(object): + "Helper wrapper for user ID and access token" + id = attr.ib() + token = attr.ib() + + class EmailPusherTests(HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None @@ -77,25 +85,32 @@ def sendmail(*args, **kwargs): return hs - def test_sends_email(self): - + def prepare(self, reactor, clock, hs): # Register the user who gets notified - user_id = self.register_user("user", "pass") - access_token = self.login("user", "pass") - - # Register the user who sends the message - other_user_id = self.register_user("otheruser", "pass") - other_access_token = self.login("otheruser", "pass") + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") + + # Register other users + self.others = [ + _User( + id=self.register_user("otheruser1", "pass"), + token=self.login("otheruser1", "pass"), + ), + _User( + id=self.register_user("otheruser2", "pass"), + token=self.login("otheruser2", "pass"), + ), + ] # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastore().get_user_by_access_token(self.access_token) ) token_id = user_tuple["token_id"] self.get_success( self.hs.get_pusherpool().add_pusher( - user_id=user_id, + user_id=self.user_id, access_token=token_id, kind="email", app_id="m.email", @@ -107,22 +122,27 @@ def test_sends_email(self): ) ) - # Create a room - room = self.helper.create_room_as(user_id, tok=access_token) + def test_simple_sends_email(self): + # Create a simple room with two users + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id, + ) + self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) - # Invite the other person - self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id) + # The other user sends some messages + self.helper.send(room, body="Hi!", tok=self.others[0].token) + self.helper.send(room, body="There!", tok=self.others[0].token) - # The other user joins - self.helper.join(room=room, user=other_user_id, tok=other_access_token) + # We should get emailed about that message + self._check_for_mail() - # The other user sends some messages - self.helper.send(room, body="Hi!", tok=other_access_token) - self.helper.send(room, body="There!", tok=other_access_token) + def _check_for_mail(self): + "Check that the user receives an email notification" # Get the stream ordering before it gets sent pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) ) self.assertEqual(len(pushers), 1) last_stream_ordering = pushers[0]["last_stream_ordering"] @@ -132,7 +152,7 @@ def test_sends_email(self): # It hasn't succeeded yet, so the stream ordering shouldn't have moved pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) ) self.assertEqual(len(pushers), 1) self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"]) @@ -149,7 +169,7 @@ def test_sends_email(self): # The stream ordering has increased pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) ) self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering) From 2ebeda48b2e6ba522fe049ee7ef13450f6839e1b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:10:23 +0100 Subject: [PATCH 165/231] Add test --- synapse/push/emailpusher.py | 19 +++++++++++++++++++ synapse/push/pusherpool.py | 30 +++++++++++++++++++++++------- tests/push/test_email.py | 29 ++++++++++++++++++++++++++++- 3 files changed, 70 insertions(+), 8 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index e8ee67401fbd..c89a8438a938 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -114,6 +114,21 @@ def _start_processing(self): run_as_background_process("emailpush.process", self._process) + def _pause_processing(self): + """Used by tests to temporarily pause processing of events. + + Asserts that its not currently processing. + """ + assert not self._is_processing + self._is_processing = True + + def _resume_processing(self): + """Used by tests to resume processing of events after pausing. + """ + assert self._is_processing + self._is_processing = False + self._start_processing() + @defer.inlineCallbacks def _process(self): # we should never get here if we are already processing @@ -215,6 +230,10 @@ def _unsafe_process(self): @defer.inlineCallbacks def save_last_stream_ordering_and_success(self, last_stream_ordering): + if last_stream_ordering is None: + # This happens if we haven't yet processed anything + return + self.last_stream_ordering = last_stream_ordering yield self.store.update_pusher_last_stream_ordering_and_success( self.app_id, self.email, self.user_id, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 40a7709c0949..63c583565fa0 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -60,6 +60,11 @@ def start(self): def add_pusher(self, user_id, access_token, kind, app_id, app_display_name, device_display_name, pushkey, lang, data, profile_tag=""): + """Creates a new pusher and adds it to the pool + + Returns: + Deferred[EmailPusher|HttpPusher] + """ time_now_msec = self.clock.time_msec() # we try to create the pusher just to validate the config: it @@ -103,7 +108,9 @@ def add_pusher(self, user_id, access_token, kind, app_id, last_stream_ordering=last_stream_ordering, profile_tag=profile_tag, ) - yield self.start_pusher_by_id(app_id, pushkey, user_id) + pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id) + + defer.returnValue(pusher) @defer.inlineCallbacks def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey, @@ -184,7 +191,11 @@ def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids): @defer.inlineCallbacks def start_pusher_by_id(self, app_id, pushkey, user_id): - """Look up the details for the given pusher, and start it""" + """Look up the details for the given pusher, and start it + + Returns: + Deferred[EmailPusher|HttpPusher|None]: The pusher started, if any + """ if not self._should_start_pushers: return @@ -192,13 +203,16 @@ def start_pusher_by_id(self, app_id, pushkey, user_id): app_id, pushkey ) - p = None + pusher_dict = None for r in resultlist: if r['user_name'] == user_id: - p = r + pusher_dict = r - if p: - yield self._start_pusher(p) + pusher = None + if pusher_dict: + pusher = yield self._start_pusher(pusher_dict) + + defer.returnValue(pusher) @defer.inlineCallbacks def _start_pushers(self): @@ -224,7 +238,7 @@ def _start_pusher(self, pusherdict): pusherdict (dict): Returns: - None + Deferred[EmailPusher|HttpPusher] """ try: p = self.pusher_factory.create_pusher(pusherdict) @@ -270,6 +284,8 @@ def _start_pusher(self, pusherdict): p.on_started(have_notifs) + defer.returnValue(p) + @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): appid_pushkey = "%s:%s" % (app_id, pushkey) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 62b3c2a99d8d..c10b65d4b869 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -108,7 +108,7 @@ def prepare(self, reactor, clock, hs): ) token_id = user_tuple["token_id"] - self.get_success( + self.pusher = self.get_success( self.hs.get_pusherpool().add_pusher( user_id=self.user_id, access_token=token_id, @@ -137,6 +137,33 @@ def test_simple_sends_email(self): # We should get emailed about that message self._check_for_mail() + def test_multiple_members_email(self): + # We want to test multiple notifications, so we pause processing of push + # while we send messages. + self.pusher._pause_processing() + + # Create a simple room with multiple other users + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + + for other in self.others: + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=other.id, + ) + self.helper.join(room=room, user=other.id, tok=other.token) + + # The other users send some messages + self.helper.send(room, body="Hi!", tok=self.others[0].token) + self.helper.send(room, body="There!", tok=self.others[1].token) + self.helper.send(room, body="There!", tok=self.others[1].token) + + # Nothing should have happened yet, as we're paused. + assert not self.email_attempts + + self.pusher._resume_processing() + + # We should get emailed about those messages + self._check_for_mail() + def _check_for_mail(self): "Check that the user receives an email notification" From a099926fcc1737f5465a6a112496e8da68293fc9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:13:14 +0100 Subject: [PATCH 166/231] Newsfile --- changelog.d/5388.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5388.bugfix diff --git a/changelog.d/5388.bugfix b/changelog.d/5388.bugfix new file mode 100644 index 000000000000..503e8309151d --- /dev/null +++ b/changelog.d/5388.bugfix @@ -0,0 +1 @@ +Fix email notifications for unnamed rooms with multiple people. From 837340bdce7d0175a8f5fae2c4ed34ac1334d431 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:24:07 +0100 Subject: [PATCH 167/231] Only start background group attestation renewals on master --- synapse/groups/attestations.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index e5dda1975f62..469ab8ac7bee 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -132,9 +132,10 @@ def __init__(self, hs): self.is_mine_id = hs.is_mine_id self.attestations = hs.get_groups_attestation_signing() - self._renew_attestations_loop = self.clock.looping_call( - self._start_renew_attestations, 30 * 60 * 1000, - ) + if not hs.config.worker_app: + self._renew_attestations_loop = self.clock.looping_call( + self._start_renew_attestations, 30 * 60 * 1000, + ) @defer.inlineCallbacks def on_renew_attestation(self, group_id, user_id, content): From 2cca90dd40a5411a3b01b57b5f74b472b2de2cfc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:26:59 +0100 Subject: [PATCH 168/231] Newsfile --- changelog.d/5389.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5389.bugfix diff --git a/changelog.d/5389.bugfix b/changelog.d/5389.bugfix new file mode 100644 index 000000000000..dd648e26c837 --- /dev/null +++ b/changelog.d/5389.bugfix @@ -0,0 +1 @@ +Fix exceptions in federation reader worker caused by attempting to renew attestations, which should only happen on master worker. From 95d38afe96bfb38e02de9767603b2655c07a7e0f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:34:52 +0100 Subject: [PATCH 169/231] Don't log exception when failing to fetch remote content. In particular, let's not log stack traces when we stop processing becuase the response body was too large. --- synapse/http/client.py | 13 +++++++++---- synapse/rest/media/v1/media_repository.py | 6 ++++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index 77fe68818b70..5c073fff07f8 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -17,7 +17,7 @@ import logging from io import BytesIO -from six import text_type +from six import raise_from, text_type from six.moves import urllib import treq @@ -542,10 +542,15 @@ def get_file(self, url, output_stream, max_size=None, headers=None): length = yield make_deferred_yieldable( _readBodyToFile(response, output_stream, max_size) ) + except SynapseError: + # This can happen e.g. because the body is too large. + raise except Exception as e: - logger.exception("Failed to download body") - raise SynapseError( - 502, ("Failed to download remote body: %s" % e), Codes.UNKNOWN + raise_from( + SynapseError( + 502, ("Failed to download remote body: %s" % e), + ), + e ) defer.returnValue( diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 856967735537..a4929dd5dbd2 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -386,8 +386,10 @@ def _download_remote_file(self, server_name, media_id, file_id): raise SynapseError(502, "Failed to fetch remote media") except SynapseError: - logger.exception("Failed to fetch remote media %s/%s", - server_name, media_id) + logger.warn( + "Failed to fetch remote media %s/%s", + server_name, media_id, + ) raise except NotRetryingDestination: logger.warn("Not retrying destination %r", server_name) From 5009d988da81c328d4f13fc8ceb89f85364a44bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:37:38 +0100 Subject: [PATCH 170/231] Newsfile --- changelog.d/5390.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5390.bugfix diff --git a/changelog.d/5390.bugfix b/changelog.d/5390.bugfix new file mode 100644 index 000000000000..e7b7483cf26b --- /dev/null +++ b/changelog.d/5390.bugfix @@ -0,0 +1 @@ +Fix handling of failures fetching remote content to not log failures as exceptions. From 2decc92e2f1f42323475efe54be9f672388fc713 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Sun, 9 Jun 2019 02:20:23 +0100 Subject: [PATCH 171/231] Liberapay is now officially recognised, update FUNDING.yml (#5386) --- .github/FUNDING.yml | 3 ++- changelog.d/5386.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5386.misc diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index c21d66665ee0..1a57677a0e14 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,4 @@ # One username per supported platform and one custom link patreon: matrixdotorg -custom: https://liberapay.com/matrixdotorg +liberapay: matrixdotorg +custom: https://paypal.me/matrixdotorg diff --git a/changelog.d/5386.misc b/changelog.d/5386.misc new file mode 100644 index 000000000000..060cbba2a944 --- /dev/null +++ b/changelog.d/5386.misc @@ -0,0 +1 @@ +Add a sponsor button to the repo. From c2b6e945e1c455c10e09684a0e43e19937db604c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sun, 9 Jun 2019 14:01:32 +0100 Subject: [PATCH 172/231] Share an SSL context object between SSL connections This involves changing how the info callbacks work. --- synapse/crypto/context_factory.py | 149 ++++++++++++++++++------------ 1 file changed, 89 insertions(+), 60 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 59ea087e66d2..aa8b20fe7dd2 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -15,10 +15,12 @@ import logging +from service_identity import VerificationError +from service_identity.pyopenssl import verify_hostname from zope.interface import implementer from OpenSSL import SSL, crypto -from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName +from twisted.internet._sslverify import _defaultCurveName from twisted.internet.abstract import isIPAddress, isIPv6Address from twisted.internet.interfaces import IOpenSSLClientConnectionCreator from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust @@ -70,65 +72,19 @@ def _idnaBytes(text): return idna.encode(text) -def _tolerateErrors(wrapped): - """ - Wrap up an info_callback for pyOpenSSL so that if something goes wrong - the error is immediately logged and the connection is dropped if possible. - This is a copy of twisted.internet._sslverify._tolerateErrors. For - documentation, see the twisted documentation. - """ - - def infoCallback(connection, where, ret): - try: - return wrapped(connection, where, ret) - except: # noqa: E722, taken from the twisted implementation - f = Failure() - logger.exception("Error during info_callback") - connection.get_app_data().failVerification(f) - - return infoCallback +class ClientTLSOptionsFactory(object): + """Factory for Twisted SSLClientConnectionCreators that are used to make connections + to remote servers for federation. + Uses one of two OpenSSL context objects for all connections, depending on whether + we should do SSL certificate verification. -@implementer(IOpenSSLClientConnectionCreator) -class ClientTLSOptionsNoVerify(object): - """ - Client creator for TLS without certificate identity verification. This is a - copy of twisted.internet._sslverify.ClientTLSOptions with the identity - verification left out. For documentation, see the twisted documentation. + get_options decides whether we should do SSL certificate verification and + constructs an SSLClientConnectionCreator factory accordingly. """ - def __init__(self, hostname, ctx): - self._ctx = ctx - - if isIPAddress(hostname) or isIPv6Address(hostname): - self._hostnameBytes = hostname.encode('ascii') - self._sendSNI = False - else: - self._hostnameBytes = _idnaBytes(hostname) - self._sendSNI = True - - ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback)) - - def clientConnectionForTLS(self, tlsProtocol): - context = self._ctx - connection = SSL.Connection(context, None) - connection.set_app_data(tlsProtocol) - return connection - - def _identityVerifyingInfoCallback(self, connection, where, ret): - # Literal IPv4 and IPv6 addresses are not permitted - # as host names according to the RFCs - if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI: - connection.set_tlsext_host_name(self._hostnameBytes) - - -class ClientTLSOptionsFactory(object): - """Factory for Twisted ClientTLSOptions that are used to make connections - to remote servers for federation.""" - def __init__(self, config): self._config = config - self._options_noverify = CertificateOptions() # Check if we're using a custom list of a CA certificates trust_root = config.federation_ca_trust_root @@ -136,11 +92,13 @@ def __init__(self, config): # Use CA root certs provided by OpenSSL trust_root = platformTrust() - self._options_verify = CertificateOptions(trustRoot=trust_root) + self._verify_ssl_context = CertificateOptions(trustRoot=trust_root).getContext() + self._verify_ssl_context.set_info_callback(self._context_info_cb) - def get_options(self, host): - # Use _makeContext so that we get a fresh OpenSSL CTX each time. + self._no_verify_ssl_context = CertificateOptions().getContext() + self._no_verify_ssl_context.set_info_callback(self._context_info_cb) + def get_options(self, host): # Check if certificate verification has been enabled should_verify = self._config.federation_verify_certificates @@ -151,6 +109,77 @@ def get_options(self, host): should_verify = False break - if should_verify: - return ClientTLSOptions(host, self._options_verify._makeContext()) - return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext()) + ssl_context = ( + self._verify_ssl_context if should_verify else self._no_verify_ssl_context + ) + + return SSLClientConnectionCreator(host, ssl_context, should_verify) + + @staticmethod + def _context_info_cb(ssl_connection, where, ret): + """The 'information callback' for our openssl context object.""" + # we assume that the app_data on the connection object has been set to + # a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator) + tls_protocol = ssl_connection.get_app_data() + try: + # ... we further assume that SSLClientConnectionCreator has set the + # 'tls_verifier' attribute to a ConnectionVerifier object. + tls_protocol.tls_verifier.verify_context_info_cb(ssl_connection, where) + except: # noqa: E722, taken from the twisted implementation + logger.exception("Error during info_callback") + f = Failure() + tls_protocol.failVerification(f) + + +@implementer(IOpenSSLClientConnectionCreator) +class SSLClientConnectionCreator(object): + """Creates openssl connection objects for client connections. + + Replaces twisted.internet.ssl.ClientTLSOptions + """ + def __init__(self, hostname, ctx, verify_certs): + self._ctx = ctx + self._verifier = ConnectionVerifier(hostname, verify_certs) + + def clientConnectionForTLS(self, tls_protocol): + context = self._ctx + connection = SSL.Connection(context, None) + + # as per twisted.internet.ssl.ClientTLSOptions, we set the application + # data to our TLSMemoryBIOProtocol... + connection.set_app_data(tls_protocol) + + # ... and we also gut-wrench a 'tls_verifier' attribute into the + # tls_protocol so that the SSL context's info callback has something to + # call to do the cert verification. + setattr(tls_protocol, "tls_verifier", self._verifier) + return connection + + +class ConnectionVerifier(object): + """Set the SNI, and do cert verification + + This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by + the ssl context's info callback. + """ + def __init__(self, hostname, verify_certs): + self._verify_certs = verify_certs + if isIPAddress(hostname) or isIPv6Address(hostname): + self._hostnameBytes = hostname.encode('ascii') + self._sendSNI = False + else: + self._hostnameBytes = _idnaBytes(hostname) + self._sendSNI = True + + self._hostnameASCII = self._hostnameBytes.decode("ascii") + + def verify_context_info_cb(self, ssl_connection, where): + if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI: + ssl_connection.set_tlsext_host_name(self._hostnameBytes) + if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs: + try: + verify_hostname(ssl_connection, self._hostnameASCII) + except VerificationError: + f = Failure() + tls_protocol = ssl_connection.get_app_data() + tls_protocol.failVerification(f) From 88d7182adaef8711bf3cc80ff604e566e517b6e6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 10 Jun 2019 10:33:00 +0100 Subject: [PATCH 173/231] Improve startup checks for insecure notary configs (#5392) It's not really a problem to trust notary responses signed by the old key so long as we are also doing TLS validation. This commit adds a check to the config parsing code at startup to check that we do not have the insecure matrix.org key without tls validation, and refuses to start without it. This allows us to remove the rather alarming-looking warning which happens at runtime. --- changelog.d/5392.bugfix | 1 + synapse/config/key.py | 27 +++++++++++++++++++++++---- synapse/crypto/keyring.py | 7 ------- 3 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 changelog.d/5392.bugfix diff --git a/changelog.d/5392.bugfix b/changelog.d/5392.bugfix new file mode 100644 index 000000000000..295a7cfce1ce --- /dev/null +++ b/changelog.d/5392.bugfix @@ -0,0 +1 @@ +Remove redundant warning about key server response validation. diff --git a/synapse/config/key.py b/synapse/config/key.py index aba7092ccdbb..424875feae3d 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -41,6 +41,15 @@ you are *sure* you want to do this, set 'accept_keys_insecurely' on the keyserver configuration.""" +RELYING_ON_MATRIX_KEY_ERROR = """\ +Your server is configured to accept key server responses without TLS certificate +validation, and which are only signed by the old (possibly compromised) +matrix.org signing key 'ed25519:auto'. This likely isn't what you want to do, +and you should enable 'federation_verify_certificates' in your configuration. + +If you are *sure* you want to do this, set 'accept_keys_insecurely' on the +trusted_key_server configuration.""" + logger = logging.getLogger(__name__) @@ -340,10 +349,20 @@ def _parse_key_servers(key_servers, federation_verify_certificates): result.verify_keys[key_id] = verify_key if ( - not verify_keys - and not server.get("accept_keys_insecurely") - and not federation_verify_certificates + not federation_verify_certificates and + not server.get("accept_keys_insecurely") ): - raise ConfigError(INSECURE_NOTARY_ERROR) + _assert_keyserver_has_verify_keys(result) yield result + + +def _assert_keyserver_has_verify_keys(trusted_key_server): + if not trusted_key_server.verify_keys: + raise ConfigError(INSECURE_NOTARY_ERROR) + + # also check that they are not blindly checking the old matrix.org key + if trusted_key_server.server_name == "matrix.org" and any( + key_id == "ed25519:auto" for key_id in trusted_key_server.verify_keys + ): + raise ConfigError(RELYING_ON_MATRIX_KEY_ERROR) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 96964b0d5062..6f603f19615e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -750,13 +750,6 @@ def _validate_perspectives_response( verify_signed_json(response, perspective_name, perspective_keys[key_id]) verified = True - if perspective_name == "matrix.org" and key_id == "ed25519:auto": - logger.warning( - "Trusting trusted_key_server responses signed by the " - "compromised matrix.org signing key 'ed25519:auto'. " - "This is a placebo." - ) - if not verified: raise KeyLookupError( "Response not signed with a known key: signed with: %r, known keys: %r" From 4914a8882939337cc04d7e3e3162a9401489a437 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 10 Jun 2019 11:34:45 +0100 Subject: [PATCH 174/231] Doc --- synapse/api/auth.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index e24d942553f6..a04be32890f3 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -195,6 +195,11 @@ def get_user_by_req( Args: request - An HTTP request with an access_token query parameter. + allow_expired - Whether to allow the request through even if the account is + expired. If true, Synapse will still require the access token to be + provided but won't check if the account it belongs to has expired. This + works thanks to /login delivering access tokens regardless of accounts' + expiration. Returns: defer.Deferred: resolves to a ``synapse.types.Requester`` object Raises: From 028f674cd323cc12f2e03e5c734c77bb4095f457 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 10 Jun 2019 11:35:54 +0100 Subject: [PATCH 175/231] Better wording --- synapse/api/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a04be32890f3..79e2808dc5a7 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -196,7 +196,7 @@ def get_user_by_req( Args: request - An HTTP request with an access_token query parameter. allow_expired - Whether to allow the request through even if the account is - expired. If true, Synapse will still require the access token to be + expired. If true, Synapse will still require an access token to be provided but won't check if the account it belongs to has expired. This works thanks to /login delivering access tokens regardless of accounts' expiration. From ab157e61a27c48b5f90a0c9168d534d760c0c80c Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 17:31:56 +0530 Subject: [PATCH 176/231] - Fix https://github.com/matrix-org/synapse/issues/4130 - Add parser argument "--no-daemonize" Signed-off-by: sohamg --- synctl | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/synctl b/synctl index 07a68e6d85f6..601ca41fc32f 100755 --- a/synctl +++ b/synctl @@ -69,10 +69,14 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile): +def start(configfile, daemonize = True): write("Starting ...") args = SYNAPSE - args.extend(["--daemonize", "-c", configfile]) + + if daemonize: + args.extend(["--daemonize", "-c", configfile]) + else: + args.extend(["-c", configfile]) try: subprocess.check_call(args) @@ -143,12 +147,20 @@ def main(): help="start or stop all the workers in the given directory" " and the main synapse process", ) + parser.add_argument( + "--no-daemonize", + action="store_false", + help="Run synapse in the foreground (for debugging)" + ) options = parser.parse_args() if options.worker and options.all_processes: write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr) sys.exit(1) + if options.no_daemonize and options.all_processes: + write('Cannot use "--no-daemonize" with "--all-processes"', stream=sys.stderr) + sys.exit(1) configfile = options.configfile @@ -276,7 +288,7 @@ def main(): # Check if synapse is already running if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): abort("synapse.app.homeserver already running") - start(configfile) + start(configfile, bool(options.no_daemonize)) for worker in workers: env = os.environ.copy() From b56a224e22e20258fb72c5d8888be91a61bf9e11 Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 17:54:29 +0530 Subject: [PATCH 177/231] Added changelog file. --- changelog.d/5412.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5412.feature diff --git a/changelog.d/5412.feature b/changelog.d/5412.feature new file mode 100644 index 000000000000..ec1503860a08 --- /dev/null +++ b/changelog.d/5412.feature @@ -0,0 +1 @@ +Add --no-daemonize option to run synapse in the foreground, per issue #4130. Contributed by Soham Gumaste. \ No newline at end of file From 0afcbc65cbbceb78cd65ec21b13d729ba60e2f8c Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 18:28:20 +0530 Subject: [PATCH 178/231] Resolved pep8 extra spacing issue --- synctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synctl b/synctl index 601ca41fc32f..651cf396d7a8 100755 --- a/synctl +++ b/synctl @@ -69,7 +69,7 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile, daemonize = True): +def start(configfile, daemonize=True): write("Starting ...") args = SYNAPSE From 12f49b22ec23c9e7d6f1f3d0dce01304545958a1 Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 18:47:35 +0530 Subject: [PATCH 179/231] Edited description to note that the arg will not work with daemonize set in the config. --- synctl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synctl b/synctl index 651cf396d7a8..665eda51323b 100755 --- a/synctl +++ b/synctl @@ -69,7 +69,7 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile, daemonize=True): +def start(configfile, daemonize = True): write("Starting ...") args = SYNAPSE @@ -150,7 +150,8 @@ def main(): parser.add_argument( "--no-daemonize", action="store_false", - help="Run synapse in the foreground (for debugging)" + help="Run synapse in the foreground for debugging. " + "Will work only if the daemonize option is not set in the config." ) options = parser.parse_args() From ca7abb129c7a50066cbb39e6cfd4a198e7022d3b Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 19:09:14 +0530 Subject: [PATCH 180/231] Accidentally reversed pep8 fixed before, fixed now --- synctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synctl b/synctl index 665eda51323b..30d751236faa 100755 --- a/synctl +++ b/synctl @@ -69,7 +69,7 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile, daemonize = True): +def start(configfile, daemonize=True): write("Starting ...") args = SYNAPSE From 43badd2cd4315c3f3ed45b0092c4479a43a3eb52 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 14:31:05 +0100 Subject: [PATCH 181/231] Fix key verification when key stored with null valid_until_ms Some keys are stored in the synapse database with a null valid_until_ms which caused an exception to be thrown when using that key. We fix this by treating nulls as zeroes, i.e. they keys will match verification requests with a minimum_valid_until_ms of zero (i.e. don't validate ts) but will not match requests with a non-zero minimum_valid_until_ms. Fixes #5391. --- synapse/storage/keys.py | 8 ++++++ tests/crypto/test_keyring.py | 50 +++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 5300720dbb87..e3655ad8d759 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -80,6 +80,14 @@ def _get_keys(txn, batch): for row in txn: server_name, key_id, key_bytes, ts_valid_until_ms = row + + if ts_valid_until_ms is None: + # Old keys may be stored with a ts_valid_until_ms of null, + # in which case we treat this as if it was set to `0`, i.e. + # it won't match key requests that define a minimum + # `ts_valid_until_ms`. + ts_valid_until_ms = 0 + res = FetchKeyResult( verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)), valid_until_ts=ts_valid_until_ms, diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 4b1901ce31a5..5a355f00cc2f 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -25,7 +25,11 @@ from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.crypto.keyring import PerspectivesKeyFetcher, ServerKeyFetcher +from synapse.crypto.keyring import ( + PerspectivesKeyFetcher, + ServerKeyFetcher, + StoreKeyFetcher, +) from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -219,6 +223,50 @@ def test_verify_json_for_server(self): # self.assertFalse(d.called) self.get_success(d) + def test_verify_json_for_server_with_null_valid_until_ms(self): + """Tests that we correctly handle key requests for keys we've stored + with a null `ts_valid_until_ms` + """ + mock_fetcher = keyring.KeyFetcher() + mock_fetcher.get_keys = Mock(return_value=defer.succeed({})) + + kr = keyring.Keyring( + self.hs, key_fetchers=(StoreKeyFetcher(self.hs), mock_fetcher) + ) + + key1 = signedjson.key.generate_signing_key(1) + r = self.hs.datastore.store_server_verify_keys( + "server9", + time.time() * 1000, + [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))], + ) + self.get_success(r) + + json1 = {} + signedjson.sign.sign_json(json1, "server9", key1) + + # should fail immediately on an unsigned object + d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned") + self.failureResultOf(d, SynapseError) + + # should fail on a signed object with a non-zero minimum_valid_until_ms, + # as it tries to refetch the keys and fails. + d = _verify_json_for_server( + kr, "server9", json1, 500, "test signed non-zero min" + ) + self.get_failure(d, SynapseError) + + # We expect the keyring tried to refetch the key once. + mock_fetcher.get_keys.assert_called_once_with( + {"server9": {get_key_id(key1): 500}} + ) + + # should succeed on a signed object with a 0 minimum_valid_until_ms + d = _verify_json_for_server( + kr, "server9", json1, 0, "test signed with zero min" + ) + self.get_success(d) + def test_verify_json_dedupes_key_requests(self): """Two requests for the same key should be deduped.""" key1 = signedjson.key.generate_signing_key(1) From 9bc7768ad37c7b3aaed154b4cc54d6fb5eabe596 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 14:41:00 +0100 Subject: [PATCH 182/231] Newsfile --- changelog.d/5415.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5415.bugfix diff --git a/changelog.d/5415.bugfix b/changelog.d/5415.bugfix new file mode 100644 index 000000000000..83629e193da4 --- /dev/null +++ b/changelog.d/5415.bugfix @@ -0,0 +1 @@ +Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. From d11c634ced532ed5ecdbefb45f0b5ae5cb2f9826 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 15:55:12 +0100 Subject: [PATCH 183/231] clean up impl, and import idna directly --- synapse/crypto/context_factory.py | 26 +++++++++++--------------- synapse/python_dependencies.py | 1 + 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index aa8b20fe7dd2..2f2378263091 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -15,6 +15,7 @@ import logging +import idna from service_identity import VerificationError from service_identity.pyopenssl import verify_hostname from zope.interface import implementer @@ -58,20 +59,6 @@ def getContext(self): return self._context -def _idnaBytes(text): - """ - Convert some text typed by a human into some ASCII bytes. This is a - copy of twisted.internet._idna._idnaBytes. For documentation, see the - twisted documentation. - """ - try: - import idna - except ImportError: - return text.encode("idna") - else: - return idna.encode(text) - - class ClientTLSOptionsFactory(object): """Factory for Twisted SSLClientConnectionCreators that are used to make connections to remote servers for federation. @@ -162,13 +149,21 @@ class ConnectionVerifier(object): This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by the ssl context's info callback. """ + # This code is based on twisted.internet.ssl.ClientTLSOptions. + def __init__(self, hostname, verify_certs): self._verify_certs = verify_certs + if isIPAddress(hostname) or isIPv6Address(hostname): self._hostnameBytes = hostname.encode('ascii') self._sendSNI = False else: - self._hostnameBytes = _idnaBytes(hostname) + # twisted's ClientTLSOptions falls back to the stdlib impl here if + # idna is not installed, but points out that lacks support for + # IDNA2008 (http://bugs.python.org/issue17305). + # + # We can rely on having idna. + self._hostnameBytes = idna.encode(hostname) self._sendSNI = True self._hostnameASCII = self._hostnameBytes.decode("ascii") @@ -176,6 +171,7 @@ def __init__(self, hostname, verify_certs): def verify_context_info_cb(self, ssl_connection, where): if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI: ssl_connection.set_tlsext_host_name(self._hostnameBytes) + if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs: try: verify_hostname(ssl_connection, self._hostnameASCII) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index c78f2cb15e0c..db09ff285fbd 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -44,6 +44,7 @@ "canonicaljson>=1.1.3", "signedjson>=1.0.0", "pynacl>=1.2.1", + "idna>=2", "service_identity>=16.0.0", # our logcontext handling relies on the ability to cancel inlineCallbacks From efe7b3176ecfe81cb7eb94a6882228ba5682278d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 15:58:35 +0100 Subject: [PATCH 184/231] Fix federation connections to literal IP addresses turns out we need a shiny version of service_identity to enforce this correctly. --- synapse/crypto/context_factory.py | 13 ++++++++----- synapse/python_dependencies.py | 4 +++- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 2f2378263091..0639c228cb68 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -17,7 +17,7 @@ import idna from service_identity import VerificationError -from service_identity.pyopenssl import verify_hostname +from service_identity.pyopenssl import verify_hostname, verify_ip_address from zope.interface import implementer from OpenSSL import SSL, crypto @@ -156,7 +156,7 @@ def __init__(self, hostname, verify_certs): if isIPAddress(hostname) or isIPv6Address(hostname): self._hostnameBytes = hostname.encode('ascii') - self._sendSNI = False + self._is_ip_address = True else: # twisted's ClientTLSOptions falls back to the stdlib impl here if # idna is not installed, but points out that lacks support for @@ -164,17 +164,20 @@ def __init__(self, hostname, verify_certs): # # We can rely on having idna. self._hostnameBytes = idna.encode(hostname) - self._sendSNI = True + self._is_ip_address = False self._hostnameASCII = self._hostnameBytes.decode("ascii") def verify_context_info_cb(self, ssl_connection, where): - if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI: + if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address: ssl_connection.set_tlsext_host_name(self._hostnameBytes) if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs: try: - verify_hostname(ssl_connection, self._hostnameASCII) + if self._is_ip_address: + verify_ip_address(ssl_connection, self._hostnameASCII) + else: + verify_hostname(ssl_connection, self._hostnameASCII) except VerificationError: f = Failure() tls_protocol = ssl_connection.get_app_data() diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index db09ff285fbd..6efd81f204a1 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -45,7 +45,9 @@ "signedjson>=1.0.0", "pynacl>=1.2.1", "idna>=2", - "service_identity>=16.0.0", + + # validating SSL certs for IP addresses requires service_identity 18.1. + "service_identity>=18.1.0", # our logcontext handling relies on the ability to cancel inlineCallbacks # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7. From e01668122126a4b6b7d45e2e24f591bb8546623b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 16:06:25 +0100 Subject: [PATCH 185/231] Tests for SSL certs for federation connections Add some tests for bad certificates for federation and .well-known connections --- tests/http/__init__.py | 126 +++++++++++-- tests/http/ca.crt | 19 ++ tests/http/ca.key | 27 +++ .../test_matrix_federation_agent.py | 169 ++++++++++++++++-- tests/http/server.key | 27 +++ tests/http/server.pem | 81 --------- 6 files changed, 343 insertions(+), 106 deletions(-) create mode 100644 tests/http/ca.crt create mode 100644 tests/http/ca.key create mode 100644 tests/http/server.key delete mode 100644 tests/http/server.pem diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 851fc0eb332e..b03fff0945b4 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -13,28 +13,124 @@ # See the License for the specific language governing permissions and # limitations under the License. import os.path +import subprocess + +from zope.interface import implementer from OpenSSL import SSL +from OpenSSL.SSL import Connection +from twisted.internet.interfaces import IOpenSSLServerConnectionCreator + + +def get_test_ca_cert_file(): + """Get the path to the test CA cert + + The keypair is generated with: + + openssl genrsa -out ca.key 2048 + openssl req -new -x509 -key ca.key -days 3650 -out ca.crt \ + -subj '/CN=synapse test CA' + """ + return os.path.join(os.path.dirname(__file__), "ca.crt") + + +def get_test_key_file(): + """get the path to the test key + + The key file is made with: + + openssl genrsa -out server.key 2048 + """ + return os.path.join(os.path.dirname(__file__), "server.key") + + +cert_file_count = 0 + +CONFIG_TEMPLATE = b"""\ +[default] +basicConstraints = CA:FALSE +keyUsage=nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = %(sanentries)b +""" + + +def create_test_cert_file(sanlist): + """build an x509 certificate file + + Args: + sanlist: list[bytes]: a list of subjectAltName values for the cert + + Returns: + str: the path to the file + """ + global cert_file_count + csr_filename = "server.csr" + cnf_filename = "server.%i.cnf" % (cert_file_count,) + cert_filename = "server.%i.crt" % (cert_file_count,) + cert_file_count += 1 + + # first build a CSR + subprocess.run( + [ + "openssl", + "req", + "-new", + "-key", + get_test_key_file(), + "-subj", + "/", + "-out", + csr_filename, + ], + check=True, + ) + # now a config file describing the right SAN entries + sanentries = b",".join(sanlist) + with open(cnf_filename, "wb") as f: + f.write(CONFIG_TEMPLATE % {b"sanentries": sanentries}) -def get_test_cert_file(): - """get the path to the test cert""" + # finally the cert + ca_key_filename = os.path.join(os.path.dirname(__file__), "ca.key") + ca_cert_filename = get_test_ca_cert_file() + subprocess.run( + [ + "openssl", + "x509", + "-req", + "-in", + csr_filename, + "-CA", + ca_cert_filename, + "-CAkey", + ca_key_filename, + "-set_serial", + "1", + "-extfile", + cnf_filename, + "-out", + cert_filename, + ], + check=True, + ) - # the cert file itself is made with: - # - # openssl req -x509 -newkey rsa:4096 -keyout server.pem -out server.pem -days 36500 \ - # -nodes -subj '/CN=testserv' - return os.path.join(os.path.dirname(__file__), 'server.pem') + return cert_filename -class ServerTLSContext(object): - """A TLS Context which presents our test cert.""" +@implementer(IOpenSSLServerConnectionCreator) +class TestServerTLSConnectionFactory(object): + """An SSL connection creator which returns connections which present a certificate + signed by our test CA.""" - def __init__(self): - self.filename = get_test_cert_file() + def __init__(self, sanlist): + """ + Args: + sanlist: list[bytes]: a list of subjectAltName values for the cert + """ + self._cert_file = create_test_cert_file(sanlist) - def getContext(self): + def serverConnectionForTLS(self, tlsProtocol): ctx = SSL.Context(SSL.TLSv1_METHOD) - ctx.use_certificate_file(self.filename) - ctx.use_privatekey_file(self.filename) - return ctx + ctx.use_certificate_file(self._cert_file) + ctx.use_privatekey_file(get_test_key_file()) + return Connection(ctx, None) diff --git a/tests/http/ca.crt b/tests/http/ca.crt new file mode 100644 index 000000000000..730f81e99c02 --- /dev/null +++ b/tests/http/ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCjCCAfKgAwIBAgIJAPwHIHgH/jtjMA0GCSqGSIb3DQEBCwUAMBoxGDAWBgNV +BAMMD3N5bmFwc2UgdGVzdCBDQTAeFw0xOTA2MTAxMTI2NDdaFw0yOTA2MDcxMTI2 +NDdaMBoxGDAWBgNVBAMMD3N5bmFwc2UgdGVzdCBDQTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOZOXCKuylf9jHzJXpU2nS+XEKrnGPgs2SAhQKrzBxg3 +/d8KT2Zsfsj1i3G7oGu7B0ZKO6qG5AxOPCmSMf9/aiSHFilfSh+r8rCpJyWMev2c +/w/xmhoFHgn+H90NnqlXvWb5y1YZCE3gWaituQSaa93GPKacRqXCgIrzjPUuhfeT +uwFQt4iyUhMNBYEy3aw4IuIHdyBqi4noUhR2ZeuflLJ6PswdJ8mEiAvxCbBGPerq +idhWcZwlo0fKu4u1uu5B8TnTsMg2fJgL6c5olBG90Urt22gA6anfP5W/U1ZdVhmB +T3Rv5SJMkGyMGE6sEUetLFyb2GJpgGD7ePkUCZr+IMMCAwEAAaNTMFEwHQYDVR0O +BBYEFLg7nTCYsvQXWTyS6upLc0YTlIwRMB8GA1UdIwQYMBaAFLg7nTCYsvQXWTyS +6upLc0YTlIwRMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADqx +GX4Ul5OGQlcG+xTt4u3vMCeqGo8mh1AnJ7zQbyRmwjJiNxJVX+/EcqFSTsmkBNoe +xdYITI7Z6dyoiKw99yCZDE7gALcyACEU7r0XY7VY/hebAaX6uLaw1sZKKAIC04lD +KgCu82tG85n60Qyud5SiZZF0q1XVq7lbvOYVdzVZ7k8Vssy5p9XnaLJLMggYeOiX +psHIQjvYGnTTEBZZHzWOrc0WGThd69wxTOOkAbCsoTPEwZL8BGUsdtLWtvhp452O +npvaUBzKg39R5X3KTdhB68XptiQfzbQkd3FtrwNuYPUywlsg55Bxkv85n57+xDO3 +D9YkgUqEp0RGUXQgCsQ= +-----END CERTIFICATE----- diff --git a/tests/http/ca.key b/tests/http/ca.key new file mode 100644 index 000000000000..5c99cae1867f --- /dev/null +++ b/tests/http/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEA5k5cIq7KV/2MfMlelTadL5cQqucY+CzZICFAqvMHGDf93wpP +Zmx+yPWLcbuga7sHRko7qobkDE48KZIx/39qJIcWKV9KH6vysKknJYx6/Zz/D/Ga +GgUeCf4f3Q2eqVe9ZvnLVhkITeBZqK25BJpr3cY8ppxGpcKAivOM9S6F95O7AVC3 +iLJSEw0FgTLdrDgi4gd3IGqLiehSFHZl65+Usno+zB0nyYSIC/EJsEY96uqJ2FZx +nCWjR8q7i7W67kHxOdOwyDZ8mAvpzmiUEb3RSu3baADpqd8/lb9TVl1WGYFPdG/l +IkyQbIwYTqwRR60sXJvYYmmAYPt4+RQJmv4gwwIDAQABAoIBAQCFuFG+wYYy+MCt +Y65LLN6vVyMSWAQjdMbM5QHLQDiKU1hQPIhFjBFBVXCVpL9MTde3dDqYlKGsk3BT +ItNs6eoTM2wmsXE0Wn4bHNvh7WMsBhACjeFP4lDCtI6DpvjMkmkidT8eyoIL1Yu5 +aMTYa2Dd79AfXPWYIQrJowfhBBY83KuW5fmYnKKDVLqkT9nf2dgmmQz85RgtNiZC +zFkIsNmPqH1zRbcw0wORfOBrLFvsMc4Tt8EY5Wz3NnH8Zfgf8Q3MgARH1yspz3Vp +B+EYHbsK17xZ+P59KPiX3yefvyYWEUjFF7ymVsVnDxLugYl4pXwWUpm19GxeDvFk +cgBUD5OBAoGBAP7lBdCp6lx6fYtxdxUm3n4MMQmYcac4qZdeBIrvpFMnvOBBuixl +eavcfFmFdwgAr8HyVYiu9ynac504IYvmtYlcpUmiRBbmMHbvLQEYHl7FYFKNz9ej +2ue4oJE3RsPdLsD3xIlc+xN8oT1j0knyorwsHdj0Sv77eZzZS9XZZfJzAoGBAOdO +CibYmoNqK/mqDHkp6PgsnbQGD5/CvPF/BLUWV1QpHxLzUQQeoBOQW5FatHe1H5zi +mbq3emBefVmsCLrRIJ4GQu4vsTMfjcpGLwviWmaK6pHbGPt8IYeEQ2MNyv59EtA2 +pQy4dX7/Oe6NLAR1UEQjXmCuXf+rxnxF3VJd1nRxAoGBANb9eusl9fusgSnVOTjJ +AQ7V36KVRv9hZoG6liBNwo80zDVmms4JhRd1MBkd3mkMkzIF4SkZUnWlwLBSANGM +dX/3eZ5i1AVwgF5Am/f5TNxopDbdT/o1RVT/P8dcFT7s1xuBn+6wU0F7dFBgWqVu +lt4aY85zNrJcj5XBHhqwdDGLAoGBAIksPNUAy9F3m5C6ih8o/aKAQx5KIeXrBUZq +v43tK+kbYfRJHBjHWMOBbuxq0G/VmGPf9q9GtGqGXuxZG+w+rYtJx1OeMQZShjIZ +ITl5CYeahrXtK4mo+fF2PMh3m5UE861LWuKKWhPwpJiWXC5grDNcjlHj1pcTdeip +PjHkuJPhAoGBAIh35DptqqdicOd3dr/+/m2YQywY8aSpMrR0bC06aAkscD7oq4tt +s/jwl0UlHIrEm/aMN7OnGIbpfkVdExfGKYaa5NRlgOwQpShwLufIo/c8fErd2zb8 +K3ptlwBxMrayMXpS3DP78r83Z0B8/FSK2guelzdRJ3ftipZ9io1Gss1C +-----END RSA PRIVATE KEY----- diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 05880a10489b..ecce473b011c 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -17,12 +17,14 @@ from mock import Mock import treq +from service_identity import VerificationError from zope.interface import implementer from twisted.internet import defer from twisted.internet._sslverify import ClientTLSOptions, OpenSSLCertificateOptions from twisted.internet.protocol import Factory from twisted.protocols.tls import TLSMemoryBIOFactory +from twisted.web._newclient import ResponseNeverReceived from twisted.web.http import HTTPChannel from twisted.web.http_headers import Headers from twisted.web.iweb import IPolicyForHTTPS @@ -37,13 +39,29 @@ from synapse.util.caches.ttlcache import TTLCache from synapse.util.logcontext import LoggingContext -from tests.http import ServerTLSContext +from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase from tests.utils import default_config logger = logging.getLogger(__name__) +test_server_connection_factory = None + + +def get_connection_factory(): + # this needs to happen once, but not until we are ready to run the first test + global test_server_connection_factory + if test_server_connection_factory is None: + test_server_connection_factory = TestServerTLSConnectionFactory(sanlist=[ + b'DNS:testserv', + b'DNS:target-server', + b'DNS:xn--bcher-kva.com', + b'IP:1.2.3.4', + b'IP:::1', + ]) + return test_server_connection_factory + class MatrixFederationAgentTests(TestCase): def setUp(self): @@ -53,12 +71,11 @@ def setUp(self): self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) - # for now, we disable cert verification for the test, since the cert we - # present will not be trusted. We should do better here, though. config_dict = default_config("test", parse=False) - config_dict["federation_verify_certificates"] = False - config_dict["trusted_key_servers"] = [] - config = HomeServerConfig() + config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()] + # config_dict["trusted_key_servers"] = [] + + self._config = config = HomeServerConfig() config.parse_config_dict(config_dict) self.agent = MatrixFederationAgent( @@ -77,7 +94,7 @@ def _make_connection(self, client_factory, expected_sni): """ # build the test server - server_tls_protocol = _build_test_server() + server_tls_protocol = _build_test_server(get_connection_factory()) # now, tell the client protocol factory to build the client protocol (it will be a # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an @@ -328,6 +345,88 @@ def test_get_ipv6_address_with_port(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) + def test_get_hostname_bad_cert(self): + """ + Test the behaviour when the certificate on the server doesn't match the hostname + """ + self.mock_resolver.resolve_service.side_effect = lambda _: [] + self.reactor.lookups["testserv1"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix://testserv1/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # No SRV record lookup yet + self.mock_resolver.resolve_service.assert_not_called() + + # there should be an attempt to connect on port 443 for the .well-known + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, '1.2.3.4') + self.assertEqual(port, 443) + + # fonx the connection + client_factory.clientConnectionFailed(None, Exception("nope")) + + # attemptdelay on the hostnameendpoint is 0.3, so takes that long before the + # .well-known request fails. + self.reactor.pump((0.4,)) + + # now there should be a SRV lookup + self.mock_resolver.resolve_service.assert_called_once_with( + b"_matrix._tcp.testserv1" + ) + + # we should fall back to a direct connection + self.assertEqual(len(clients), 2) + (host, port, client_factory, _timeout, _bindAddress) = clients[1] + self.assertEqual(host, '1.2.3.4') + self.assertEqual(port, 8448) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=b'testserv1') + + # there should be no requests + self.assertEqual(len(http_server.requests), 0) + + # ... and the request should have failed + e = self.failureResultOf(test_d, ResponseNeverReceived) + failure_reason = e.value.reasons[0] + self.assertIsInstance(failure_reason.value, VerificationError) + + def test_get_ip_address_bad_cert(self): + """ + Test the behaviour when the server name contains an explicit IP, but + the server cert doesn't cover it + """ + # there will be a getaddrinfo on the IP + self.reactor.lookups["1.2.3.5"] = "1.2.3.5" + + test_d = self._make_get_request(b"matrix://1.2.3.5/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, '1.2.3.5') + self.assertEqual(port, 8448) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=None) + + # there should be no requests + self.assertEqual(len(http_server.requests), 0) + + # ... and the request should have failed + e = self.failureResultOf(test_d, ResponseNeverReceived) + failure_reason = e.value.reasons[0] + self.assertIsInstance(failure_reason.value, VerificationError) + def test_get_no_srv_no_well_known(self): """ Test the behaviour when the server name has no port, no SRV, and no well-known @@ -585,6 +684,49 @@ def test_get_invalid_well_known(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) + def test_get_well_known_unsigned_cert(self): + """Test the behaviour when the .well-known server presents a cert + not signed by a CA + """ + + # we use the same test server as the other tests, but use an agent + # with _well_known_tls_policy left to the default, which will not + # trust it (since the presented cert is signed by a test CA) + + self.mock_resolver.resolve_service.side_effect = lambda _: [] + self.reactor.lookups["testserv"] = "1.2.3.4" + + agent = MatrixFederationAgent( + reactor=self.reactor, + tls_client_options_factory=ClientTLSOptionsFactory(self._config), + _srv_resolver=self.mock_resolver, + _well_known_cache=self.well_known_cache, + ) + + test_d = agent.request(b"GET", b"matrix://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # there should be an attempt to connect on port 443 for the .well-known + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, '1.2.3.4') + self.assertEqual(port, 443) + + http_proto = self._make_connection( + client_factory, expected_sni=b"testserv", + ) + + # there should be no requests + self.assertEqual(len(http_proto.requests), 0) + + # and there should be a SRV lookup instead + self.mock_resolver.resolve_service.assert_called_once_with( + b"_matrix._tcp.testserv" + ) + def test_get_hostname_srv(self): """ Test the behaviour when there is a single SRV record @@ -918,11 +1060,17 @@ def _check_logcontext(context): raise AssertionError("Expected logcontext %s but was %s" % (context, current)) -def _build_test_server(): +def _build_test_server(connection_creator): """Construct a test server This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol + Args: + connection_creator (IOpenSSLServerConnectionCreator): thing to build + SSL connections + sanlist (list[bytes]): list of the SAN entries for the cert returned + by the server + Returns: TLSMemoryBIOProtocol """ @@ -931,7 +1079,7 @@ def _build_test_server(): server_factory.log = _log_request server_tls_factory = TLSMemoryBIOFactory( - ServerTLSContext(), isClient=False, wrappedFactory=server_factory + connection_creator, isClient=False, wrappedFactory=server_factory ) return server_tls_factory.buildProtocol(None) @@ -944,7 +1092,8 @@ def _log_request(request): @implementer(IPolicyForHTTPS) class TrustingTLSPolicyForHTTPS(object): - """An IPolicyForHTTPS which doesn't do any certificate verification""" + """An IPolicyForHTTPS which checks that the certificate belongs to the + right server, but doesn't check the certificate chain.""" def creatorForNetloc(self, hostname, port): certificateOptions = OpenSSLCertificateOptions() diff --git a/tests/http/server.key b/tests/http/server.key new file mode 100644 index 000000000000..c53ee02b21cb --- /dev/null +++ b/tests/http/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvUAWLOE6TEp3FYSfEnJMwYtJg3KIW5BjiAOOvFVOVQfJ5eEa +vzyJ1Z+8DUgLznFnUkAeD9GjPvP7awl3NPJKLQSMkV5Tp+ea4YyV+Aa4R7flROEa +zCGvmleydZw0VqN1atVZ0ikEoglM/APJQd70ec7KSR3QoxaV2/VNCHmyAPdP+0WI +llV54VXX1CZrWSHaCSn1gzo3WjnGbxTOCQE5Z4k5hqJAwLWWhxDv+FX/jD38Sq3H +gMFNpXJv6FYwwaKU8awghHdSY/qlBPE/1rU83vIBFJ3jW6I1WnQDfCQ69of5vshK +N4v4hok56ScwdUnk8lw6xvJx1Uav/XQB9qGh4QIDAQABAoIBAQCHLO5p8hotAgdb +JFZm26N9nxrMPBOvq0ucjEX4ucnwrFaGzynGrNwa7TRqHCrqs0/EjS2ryOacgbL0 +eldeRy26SASLlN+WD7UuI7e+6DXabDzj3RHB+tGuIbPDk+ZCeBDXVTsKBOhdQN1v +KNkpJrJjCtSsMxKiWvCBow353srJKqCDZcF5NIBYBeDBPMoMbfYn5dJ9JhEf+2h4 +0iwpnWDX1Vqf46pCRa0hwEyMXycGeV2CnfJSyV7z52ZHQrvkz8QspSnPpnlCnbOE +UAvc8kZ5e8oZE7W+JfkK38vHbEGM1FCrBmrC/46uUGMRpZfDferGs91RwQVq/F0n +JN9hLzsBAoGBAPh2pm9Xt7a4fWSkX0cDgjI7PT2BvLUjbRwKLV+459uDa7+qRoGE +sSwb2QBqmQ1kbr9JyTS+Ld8dyUTsGHZK+YbTieAxI3FBdKsuFtcYJO/REN0vik+6 +fMaBHPvDHSU2ioq7spZ4JBFskzqs38FvZ0lX7aa3fguMk8GMLnofQ8QxAoGBAML9 +o5sJLN9Tk9bv2aFgnERgfRfNjjV4Wd99TsktnCD04D1GrP2eDSLfpwFlCnguck6b +jxikqcolsNhZH4dgYHqRNj+IljSdl+sYZiygO6Ld0XU+dEFO86N3E9NzZhKcQ1at +85VdwNPCS7JM2fIxEvS9xfbVnsmK6/37ZZ5iI7yxAoGBALw2vRtJGmy60pojfd1A +hibhAyINnlKlFGkSOI7zdgeuRTf6l9BTIRclvTt4hJpFgzM6hMWEbyE94hJoupsZ +bm443o/LCWsox2VI05p6urhD6f9znNWKkiyY78izY+elqksvpjgfqEresaTYAeP5 +LQe9KNSK2VuMUP1j4G04M9BxAoGAWe8ITZJuytZOgrz/YIohqPvj1l2tcIYA1a6C +7xEFSMIIxtpZIWSLZIFJEsCakpHBkPX4iwIveZfmt/JrM1JFTWK6ZZVGyh/BmOIZ +Bg4lU1oBqJTUo+aZQtTCJS29b2n5OPpkNYkXTdP4e9UsVKNDvfPlYZJneUeEzxDr +bqCPIRECgYA544KMwrWxDQZg1dsKWgdVVKx80wEFZAiQr9+0KF6ch6Iu7lwGJHFY +iI6O85paX41qeC/Fo+feIWJVJU2GvG6eBsbO4bmq+KSg4NkABJSYxodgBp9ftNeD +jo1tfw+gudlNe5jXHu7oSX93tqGjR4Cnlgan/KtfkB96yHOumGmOhQ== +-----END RSA PRIVATE KEY----- diff --git a/tests/http/server.pem b/tests/http/server.pem deleted file mode 100644 index 0584cf1a8028..000000000000 --- a/tests/http/server.pem +++ /dev/null @@ -1,81 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCgF43/3lAgJ+p0 -x7Rn8UcL8a4fctvdkikvZrCngw96LkB34Evfq8YGWlOVjU+f9naUJLAKMatmAfEN -r+rMX4VOXmpTwuu6iLtqwreUrRFMESyrmvQxa15p+y85gkY0CFmXMblv6ORbxHTG -ncBGwST4WK4Poewcgt6jcISFCESTUKu1zc3cw1ANIDRyDLB5K44KwIe36dcKckyN -Kdtv4BJ+3fcIZIkPJH62zqCypgFF1oiFt40uJzClxgHdJZlKYpgkfnDTckw4Y/Mx -9k8BbE310KAzUNMV9H7I1eEolzrNr66FQj1eN64X/dqO8lTbwCqAd4diCT4sIUk0 -0SVsAUjNd3g8j651hx+Qb1t8fuOjrny8dmeMxtUgIBHoQcpcj76R55Fs7KZ9uar0 -8OFTyGIze51W1jG2K/7/5M1zxIqrA+7lsXu5OR81s7I+Ng/UUAhiHA/z+42/aiNa -qEuk6tqj3rHfLctnCbtZ+JrRNqSSwEi8F0lMA021ivEd2eJV+284OyJjhXOmKHrX -QADHrmS7Sh4syTZvRNm9n+qWID0KdDr2Sji/KnS3Enp44HDQ4xriT6/xhwEGsyuX -oH5aAkdLznulbWkHBbyx1SUQSTLpOqzaioF9m1vRrLsFvrkrY3D253mPJ5eU9HM/ -dilduFcUgj4rz+6cdXUAh+KK/v95zwIDAQABAoICAFG5tJPaOa0ws0/KYx5s3YgL -aIhFalhCNSQtmCDrlwsYcXDA3/rfBchYdDL0YKGYgBBAal3J3WXFt/j0xThvyu2m -5UC9UPl4s7RckrsjXqEmY1d3UxGnbhtMT19cUdpeKN42VCP9EBaIw9Rg07dLAkSF -gNYaIx6q8F0fI4eGIPvTQtUcqur4CfWpaxyNvckdovV6M85/YXfDwbCOnacPDGIX -jfSK3i0MxGMuOHr6o8uzKR6aBUh6WStHWcw7VXXTvzdiFNbckmx3Gb93rf1b/LBw -QFfx+tBKcC62gKroCOzXso/0sL9YTVeSD/DJZOiJwSiz3Dj/3u1IUMbVvfTU8wSi -CYS7Z+jHxwSOCSSNTXm1wO/MtDsNKbI1+R0cohr/J9pOMQvrVh1+2zSDOFvXAQ1S -yvjn+uqdmijRoV2VEGVHd+34C+ci7eJGAhL/f92PohuuFR2shUETgGWzpACZSJwg -j1d90Hs81hj07vWRb+xCeDh00vimQngz9AD8vYvv/S4mqRGQ6TZdfjLoUwSTg0JD -6sQgRXX026gQhLhn687vLKZfHwzQPZkpQdxOR0dTZ/ho/RyGGRJXH4kN4cA2tPr+ -AKYQ29YXGlEzGG7OqikaZcprNWG6UFgEpuXyBxCgp9r4ladZo3J+1Rhgus8ZYatd -uO98q3WEBmP6CZ2n32mBAoIBAQDS/c/ybFTos0YpGHakwdmSfj5OOQJto2y8ywfG -qDHwO0ebcpNnS1+MA+7XbKUQb/3Iq7iJljkkzJG2DIJ6rpKynYts1ViYpM7M/t0T -W3V1gvUcUL62iqkgws4pnpWmubFkqV31cPSHcfIIclnzeQ1aOEGsGHNAvhty0ciC -DnkJACbqApvopFLOR5f6UFTtKExE+hDH0WqgpsCAKJ1L4g6pBzZatI32/CN9JEVU -tDbxLV75hHlFFjUrG7nT1rPyr/gI8Ceh9/2xeXPfjJUR0PrG3U1nwLqUCZkvFzO6 -XpN2+A+/v4v5xqMjKDKDFy1oq6SCMomwv/viw6wl/84TMbolAoIBAQDCPiMecnR8 -REik6tqVzQO/uSe9ZHjz6J15t5xdwaI6HpSwLlIkQPkLTjyXtFpemK5DOYRxrJvQ -remfrZrN2qtLlb/DKpuGPWRsPOvWCrSuNEp48ivUehtclljrzxAFfy0sM+fWeJ48 -nTnR+td9KNhjNtZixzWdAy/mE+jdaMsXVnk66L73Uz+2WsnvVMW2R6cpCR0F2eP/ -B4zDWRqlT2w47sePAB81mFYSQLvPC6Xcgg1OqMubfiizJI49c8DO6Jt+FFYdsxhd -kG52Eqa/Net6rN3ueiS6yXL5TU3Y6g96bPA2KyNCypucGcddcBfqaiVx/o4AH6yT -NrdsrYtyvk/jAoIBAQDHUwKVeeRJJbvdbQAArCV4MI155n+1xhMe1AuXkCQFWGtQ -nlBE4D72jmyf1UKnIbW2Uwv15xY6/ouVWYIWlj9+QDmMaozVP7Uiko+WDuwLRNl8 -k4dn+dzHV2HejbPBG2JLv3lFOx23q1zEwArcaXrExaq9Ayg2fKJ/uVHcFAIiD6Oz -pR1XDY4w1A/uaN+iYFSVQUyDCQLbnEz1hej73CaPZoHh9Pq83vxD5/UbjVjuRTeZ -L55FNzKpc/r89rNvTPBcuUwnxplDhYKDKVNWzn9rSXwrzTY2Tk8J3rh+k4RqevSd -6D47jH1n5Dy7/TRn0ueKHGZZtTUnyEUkbOJo3ayFAoIBAHKDyZaQqaX9Z8p6fwWj -yVsFoK0ih8BcWkLBAdmwZ6DWGJjJpjmjaG/G3ygc9s4gO1R8m12dAnuDnGE8KzDD -gwtbrKM2Alyg4wyA2hTlWOH/CAzH0RlCJ9Fs/d1/xJVJBeuyajLiB3/6vXTS6qnq -I7BSSxAPG8eGcn21LSsjNeB7ZZtaTgNnu/8ZBUYo9yrgkWc67TZe3/ChldYxOOlO -qqHh/BqNWtjxB4VZTp/g4RbgQVInZ2ozdXEv0v/dt0UEk29ANAjsZif7F3RayJ2f -/0TilzCaJ/9K9pKNhaClVRy7Dt8QjYg6BIWCGSw4ApF7pLnQ9gySn95mersCkVzD -YDsCggEAb0E/TORjQhKfNQvahyLfQFm151e+HIoqBqa4WFyfFxe/IJUaLH/JSSFw -VohbQqPdCmaAeuQ8ERL564DdkcY5BgKcax79fLLCOYP5bT11aQx6uFpfl2Dcm6Z9 -QdCRI4jzPftsd5fxLNH1XtGyC4t6vTic4Pji2O71WgWzx0j5v4aeDY4sZQeFxqCV -/q7Ee8hem1Rn5RFHu14FV45RS4LAWl6wvf5pQtneSKzx8YL0GZIRRytOzdEfnGKr -FeUlAj5uL+5/p0ZEgM7gPsEBwdm8scF79qSUn8UWSoXNeIauF9D4BDg8RZcFFxka -KILVFsq3cQC+bEnoM4eVbjEQkGs1RQ== ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIE/jCCAuagAwIBAgIJANFtVaGvJWZlMA0GCSqGSIb3DQEBCwUAMBMxETAPBgNV -BAMMCHRlc3RzZXJ2MCAXDTE5MDEyNzIyMDIzNloYDzIxMTkwMTAzMjIwMjM2WjAT -MREwDwYDVQQDDAh0ZXN0c2VydjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC -ggIBAKAXjf/eUCAn6nTHtGfxRwvxrh9y292SKS9msKeDD3ouQHfgS9+rxgZaU5WN -T5/2dpQksAoxq2YB8Q2v6sxfhU5ealPC67qIu2rCt5StEUwRLKua9DFrXmn7LzmC -RjQIWZcxuW/o5FvEdMadwEbBJPhYrg+h7ByC3qNwhIUIRJNQq7XNzdzDUA0gNHIM -sHkrjgrAh7fp1wpyTI0p22/gEn7d9whkiQ8kfrbOoLKmAUXWiIW3jS4nMKXGAd0l -mUpimCR+cNNyTDhj8zH2TwFsTfXQoDNQ0xX0fsjV4SiXOs2vroVCPV43rhf92o7y -VNvAKoB3h2IJPiwhSTTRJWwBSM13eDyPrnWHH5BvW3x+46OufLx2Z4zG1SAgEehB -ylyPvpHnkWzspn25qvTw4VPIYjN7nVbWMbYr/v/kzXPEiqsD7uWxe7k5HzWzsj42 -D9RQCGIcD/P7jb9qI1qoS6Tq2qPesd8ty2cJu1n4mtE2pJLASLwXSUwDTbWK8R3Z -4lX7bzg7ImOFc6YoetdAAMeuZLtKHizJNm9E2b2f6pYgPQp0OvZKOL8qdLcSenjg -cNDjGuJPr/GHAQazK5egfloCR0vOe6VtaQcFvLHVJRBJMuk6rNqKgX2bW9GsuwW+ -uStjcPbneY8nl5T0cz92KV24VxSCPivP7px1dQCH4or+/3nPAgMBAAGjUzBRMB0G -A1UdDgQWBBQcQZpzLzTk5KdS/Iz7sGCV7gTd/zAfBgNVHSMEGDAWgBQcQZpzLzTk -5KdS/Iz7sGCV7gTd/zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IC -AQAr/Pgha57jqYsDDX1LyRrVdqoVBpLBeB7x/p9dKYm7S6tBTDFNMZ0SZyQP8VEG -7UoC9/OQ9nCdEMoR7ZKpQsmipwcIqpXHS6l4YOkf5EEq5jpMgvlEesHmBJJeJew/ -FEPDl1bl8d0tSrmWaL3qepmwzA+2lwAAouWk2n+rLiP8CZ3jZeoTXFqYYrUlEqO9 -fHMvuWqTV4KCSyNY+GWCrnHetulgKHlg+W2J1mZnrCKcBhWf9C2DesTJO+JldIeM -ornTFquSt21hZi+k3aySuMn2N3MWiNL8XsZVsAnPSs0zA+2fxjJkShls8Gc7cCvd -a6XrNC+PY6pONguo7rEU4HiwbvnawSTngFFglmH/ImdA/HkaAekW6o82aI8/UxFx -V9fFMO3iKDQdOrg77hI1bx9RlzKNZZinE2/Pu26fWd5d2zqDWCjl8ykGQRAfXgYN -H3BjgyXLl+ao5/pOUYYtzm3ruTXTgRcy5hhL6hVTYhSrf9vYh4LNIeXNKnZ78tyG -TX77/kU2qXhBGCFEUUMqUNV/+ITir2lmoxVjknt19M07aGr8C7SgYt6Rs+qDpMiy -JurgvRh8LpVq4pHx1efxzxCFmo58DMrG40I0+CF3y/niNpOb1gp2wAqByRiORkds -f0ytW6qZ0TpHbD6gOtQLYDnhx3ISuX+QYSekVwQUpffeWQ== ------END CERTIFICATE----- From c413540fb9e4b6ee2ec975a98676ea56d12249c8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 16:21:42 +0100 Subject: [PATCH 186/231] Fix bug sending federation transactions with lots of EDUs If we try and send a transaction with lots of EDUs and we run out of space, we call get_new_device_msgs_for_remote with a limit of 0, which then failed. --- synapse/storage/deviceinbox.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 9b0a99cb490e..4ea0deea4ff8 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -138,6 +138,10 @@ def get_new_device_msgs_for_remote( if not has_changed or last_stream_id == current_stream_id: return defer.succeed(([], current_stream_id)) + if limit <= 0: + # This can happen if we run out of room for EDUs in the transaction. + return defer.succeed(([], last_stream_id)) + def get_new_messages_for_remote_destination_txn(txn): sql = ( "SELECT stream_id, messages_json FROM device_federation_outbox" From 8d0bd9bb6054911589f82f71b5886bcfcade0de3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 16:23:07 +0100 Subject: [PATCH 187/231] fix build fails --- MANIFEST.in | 4 +++- changelog.d/5417.bugfix | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5417.bugfix diff --git a/MANIFEST.in b/MANIFEST.in index ad1523e38706..2c59c7bdc29a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -18,8 +18,10 @@ recursive-include docs * recursive-include scripts * recursive-include scripts-dev * recursive-include synapse *.pyi -recursive-include tests *.pem recursive-include tests *.py +include tests/http/ca.crt +include tests/http/ca.key +include tests/http/server.key recursive-include synapse/res * recursive-include synapse/static *.css diff --git a/changelog.d/5417.bugfix b/changelog.d/5417.bugfix new file mode 100644 index 000000000000..54be963a4e8e --- /dev/null +++ b/changelog.d/5417.bugfix @@ -0,0 +1 @@ +Fix excessive memory using with default `federation_verify_certificates: true` configuration. \ No newline at end of file From 1fb6f686165442c12f0c1a723de49884325e1486 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 16:26:36 +0100 Subject: [PATCH 188/231] Newsfile --- changelog.d/5418.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5418.bugfix diff --git a/changelog.d/5418.bugfix b/changelog.d/5418.bugfix new file mode 100644 index 000000000000..018f0df2a6bd --- /dev/null +++ b/changelog.d/5418.bugfix @@ -0,0 +1 @@ +Fix bug where attemptint to send transactions with large number of EDUs can fail. From 48748c00c416e2fd4fcf7dbc41ce72c02e6fdf6b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 16:28:45 +0100 Subject: [PATCH 189/231] Update changelog.d/5418.bugfix Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5418.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5418.bugfix b/changelog.d/5418.bugfix index 018f0df2a6bd..3fd4d2a88215 100644 --- a/changelog.d/5418.bugfix +++ b/changelog.d/5418.bugfix @@ -1 +1 @@ -Fix bug where attemptint to send transactions with large number of EDUs can fail. +Fix bug where attempting to send transactions with large number of EDUs can fail. From 19780a521ec1d200bbc1d25bf5041f8fc5691b40 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 17:41:10 +0100 Subject: [PATCH 190/231] fix CI on python 2.7 --- tests/http/__init__.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/http/__init__.py b/tests/http/__init__.py index b03fff0945b4..126826fd3f9c 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -70,7 +70,7 @@ def create_test_cert_file(sanlist): cert_file_count += 1 # first build a CSR - subprocess.run( + subprocess.check_call( [ "openssl", "req", @@ -81,8 +81,7 @@ def create_test_cert_file(sanlist): "/", "-out", csr_filename, - ], - check=True, + ] ) # now a config file describing the right SAN entries @@ -93,7 +92,7 @@ def create_test_cert_file(sanlist): # finally the cert ca_key_filename = os.path.join(os.path.dirname(__file__), "ca.key") ca_cert_filename = get_test_ca_cert_file() - subprocess.run( + subprocess.check_call( [ "openssl", "x509", @@ -110,8 +109,7 @@ def create_test_cert_file(sanlist): cnf_filename, "-out", cert_filename, - ], - check=True, + ] ) return cert_filename From 81b8fdedf2e2504555b76ebbedfac88521d3c93f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 17:51:11 +0100 Subject: [PATCH 191/231] rename gutwrenched attr --- synapse/crypto/context_factory.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 0639c228cb68..2bc5cc38073e 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -110,8 +110,10 @@ def _context_info_cb(ssl_connection, where, ret): tls_protocol = ssl_connection.get_app_data() try: # ... we further assume that SSLClientConnectionCreator has set the - # 'tls_verifier' attribute to a ConnectionVerifier object. - tls_protocol.tls_verifier.verify_context_info_cb(ssl_connection, where) + # '_synapse_tls_verifier' attribute to a ConnectionVerifier object. + tls_protocol._synapse_tls_verifier.verify_context_info_cb( + ssl_connection, where + ) except: # noqa: E722, taken from the twisted implementation logger.exception("Error during info_callback") f = Failure() @@ -124,6 +126,7 @@ class SSLClientConnectionCreator(object): Replaces twisted.internet.ssl.ClientTLSOptions """ + def __init__(self, hostname, ctx, verify_certs): self._ctx = ctx self._verifier = ConnectionVerifier(hostname, verify_certs) @@ -136,10 +139,10 @@ def clientConnectionForTLS(self, tls_protocol): # data to our TLSMemoryBIOProtocol... connection.set_app_data(tls_protocol) - # ... and we also gut-wrench a 'tls_verifier' attribute into the + # ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the # tls_protocol so that the SSL context's info callback has something to # call to do the cert verification. - setattr(tls_protocol, "tls_verifier", self._verifier) + setattr(tls_protocol, "_synapse_tls_verifier", self._verifier) return connection @@ -149,13 +152,14 @@ class ConnectionVerifier(object): This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by the ssl context's info callback. """ + # This code is based on twisted.internet.ssl.ClientTLSOptions. def __init__(self, hostname, verify_certs): self._verify_certs = verify_certs if isIPAddress(hostname) or isIPv6Address(hostname): - self._hostnameBytes = hostname.encode('ascii') + self._hostnameBytes = hostname.encode("ascii") self._is_ip_address = True else: # twisted's ClientTLSOptions falls back to the stdlib impl here if From db74c4fc6ce2982a4e563c98b3affca3169b3f18 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 17:55:01 +0100 Subject: [PATCH 192/231] fix ci on py2, again --- tests/http/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 126826fd3f9c..2d5dba646477 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -50,7 +50,7 @@ def get_test_key_file(): [default] basicConstraints = CA:FALSE keyUsage=nonRepudiation, digitalSignature, keyEncipherment -subjectAltName = %(sanentries)b +subjectAltName = %(sanentries)s """ From 01674479651cd12c995581911cfb58e5d5493495 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 18:17:43 +0100 Subject: [PATCH 193/231] 1.0.0rc2 --- CHANGES.md | 11 +++++++++++ changelog.d/5392.bugfix | 1 - changelog.d/5415.bugfix | 1 - changelog.d/5417.bugfix | 1 - synapse/__init__.py | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/5392.bugfix delete mode 100644 changelog.d/5415.bugfix delete mode 100644 changelog.d/5417.bugfix diff --git a/CHANGES.md b/CHANGES.md index 4dea0f6319ae..523cdb1153d7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.0.0rc2 (2019-06-10) +============================= + +Bugfixes +-------- + +- Remove redundant warning about key server response validation. ([\#5392](https://github.com/matrix-org/synapse/issues/5392)) +- Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. ([\#5415](https://github.com/matrix-org/synapse/issues/5415)) +- Fix excessive memory using with default `federation_verify_certificates: true` configuration. ([\#5417](https://github.com/matrix-org/synapse/issues/5417)) + + Synapse 1.0.0rc1 (2019-06-07) ============================= diff --git a/changelog.d/5392.bugfix b/changelog.d/5392.bugfix deleted file mode 100644 index 295a7cfce1ce..000000000000 --- a/changelog.d/5392.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove redundant warning about key server response validation. diff --git a/changelog.d/5415.bugfix b/changelog.d/5415.bugfix deleted file mode 100644 index 83629e193da4..000000000000 --- a/changelog.d/5415.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. diff --git a/changelog.d/5417.bugfix b/changelog.d/5417.bugfix deleted file mode 100644 index 54be963a4e8e..000000000000 --- a/changelog.d/5417.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix excessive memory using with default `federation_verify_certificates: true` configuration. \ No newline at end of file diff --git a/synapse/__init__.py b/synapse/__init__.py index 77a4cfc3a5f7..8dc07fe73c1a 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ except ImportError: pass -__version__ = "1.0.0rc1" +__version__ = "1.0.0rc2" From 49e01e5710fdbd9bb8da24844718eb2f5d6ee5c7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 23:09:31 +0100 Subject: [PATCH 194/231] Fix defaults on checking threepids --- synapse/handlers/auth.py | 1 + synapse/storage/registration.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7f8ddc99c6bd..a0cf37a9f9e0 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -479,6 +479,7 @@ def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs): medium, threepid_creds["client_secret"], sid=threepid_creds["sid"], + validated=True, ) threepid = { diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 9b41cbd757ef..1dd1182e82cb 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -998,7 +998,7 @@ def get_threepid_validation_session( client_secret, address=None, sid=None, - validated=None, + validated=True, ): """Gets a session_id and last_send_attempt (if available) for a client_secret/medium/(address|session_id) combo From 94dac0f3e55938916ec76a2183d6703af6ea4362 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 10 Jun 2019 23:33:59 +0100 Subject: [PATCH 195/231] add monthly active users to phonehome stats (#5252) * add monthly active users to phonehome stats --- changelog.d/5252.feature | 1 + synapse/app/homeserver.py | 1 + synapse/storage/__init__.py | 44 ++++++++++++++++++++++++------------- 3 files changed, 31 insertions(+), 15 deletions(-) create mode 100644 changelog.d/5252.feature diff --git a/changelog.d/5252.feature b/changelog.d/5252.feature new file mode 100644 index 000000000000..44115b0382ef --- /dev/null +++ b/changelog.d/5252.feature @@ -0,0 +1 @@ +Add monthly active users to phonehome stats. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index df524a23dd52..811b547dd3a3 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -541,6 +541,7 @@ def phone_stats_home(): stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() + stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users() stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 71316f7d093e..0ca6f6121fe0 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -279,23 +279,37 @@ def count_daily_users(self): """ Counts the number of users who used this homeserver in the last 24 hours. """ + yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) + return self.runInteraction("count_daily_users", self._count_users, yesterday,) - def _count_users(txn): - yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) - - sql = """ - SELECT COALESCE(count(*), 0) FROM ( - SELECT user_id FROM user_ips - WHERE last_seen > ? - GROUP BY user_id - ) u - """ - - txn.execute(sql, (yesterday,)) - count, = txn.fetchone() - return count + def count_monthly_users(self): + """ + Counts the number of users who used this homeserver in the last 30 days. + Note this method is intended for phonehome metrics only and is different + from the mau figure in synapse.storage.monthly_active_users which, + amongst other things, includes a 3 day grace period before a user counts. + """ + thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) + return self.runInteraction( + "count_monthly_users", + self._count_users, + thirty_days_ago, + ) - return self.runInteraction("count_users", _count_users) + def _count_users(self, txn, time_from): + """ + Returns number of users seen in the past time_from period + """ + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT user_id FROM user_ips + WHERE last_seen > ? + GROUP BY user_id + ) u + """ + txn.execute(sql, (time_from,)) + count, = txn.fetchone() + return count def count_r30_users(self): """ From 6bac9ca6d70fc5bf9a828379a7abbd6e9d064137 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 00:06:39 +0100 Subject: [PATCH 196/231] 1.0.0rc3 --- CHANGES.md | 6 ++++++ synapse/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 523cdb1153d7..f4a3ab71ca66 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.0.0rc3 (2019-06-10) +============================= + +Security: Fix authentication bug introduced in 1.0.0rc1. Please upgrade to rc3 immediately + + Synapse 1.0.0rc2 (2019-06-10) ============================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 8dc07fe73c1a..9c75a0a27fd5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ except ImportError: pass -__version__ = "1.0.0rc2" +__version__ = "1.0.0rc3" From 2ddc13577c93505b887880fa715def9addeafafe Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 11 Jun 2019 00:25:07 +0100 Subject: [PATCH 197/231] Don't warn user about password reset disabling through config code (#5387) Moves the warning about password resets being disabled to the point where a user actually tries to reset their password. Is this an appropriate place for it to happen? Also removed the disabling of msisdn password resets when you don't have an email config, as that just doesn't make sense. Also change the error a user receives upon disabled passwords to specify that only email-based password reset is disabled. --- changelog.d/5387.bugfix | 1 + synapse/config/emailconfig.py | 11 +++++------ synapse/rest/client/v2_alpha/account.py | 19 +++++++++++++++---- 3 files changed, 21 insertions(+), 10 deletions(-) create mode 100644 changelog.d/5387.bugfix diff --git a/changelog.d/5387.bugfix b/changelog.d/5387.bugfix new file mode 100644 index 000000000000..2c6c94efc463 --- /dev/null +++ b/changelog.d/5387.bugfix @@ -0,0 +1 @@ +Warn about disabling email-based password resets when a reset occurs, and remove warning when someone attempts a phone-based reset. diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index ae0425290623..86018dfcce27 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -19,15 +19,12 @@ # This file can't be called email.py because if it is, we cannot: import email.utils -import logging import os import pkg_resources from ._base import Config, ConfigError -logger = logging.getLogger(__name__) - class EmailConfig(Config): def read_config(self, config): @@ -85,10 +82,12 @@ def read_config(self, config): self.email_password_reset_behaviour = ( "remote" if email_trust_identity_server_for_password_resets else "local" ) + self.password_resets_were_disabled_due_to_email_config = False if self.email_password_reset_behaviour == "local" and email_config == {}: - logger.warn( - "User password resets have been disabled due to lack of email config" - ) + # We cannot warn the user this has happened here + # Instead do so when a user attempts to reset their password + self.password_resets_were_disabled_due_to_email_config = True + self.email_password_reset_behaviour = "off" # Get lifetime of a validation token in milliseconds diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index e4c63b69b96f..7cfd7ae7dc11 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -68,7 +68,13 @@ def __init__(self, hs): @defer.inlineCallbacks def on_POST(self, request): if self.config.email_password_reset_behaviour == "off": - raise SynapseError(400, "Password resets have been disabled on this server") + if self.config.password_resets_were_disabled_due_to_email_config: + logger.warn( + "User password resets have been disabled due to lack of email config" + ) + raise SynapseError( + 400, "Email-based password resets have been disabled on this server", + ) body = parse_json_object_from_request(request) @@ -196,9 +202,6 @@ def __init__(self, hs): @defer.inlineCallbacks def on_POST(self, request): - if not self.config.email_password_reset_behaviour == "off": - raise SynapseError(400, "Password resets have been disabled on this server") - body = parse_json_object_from_request(request) assert_params_in_dict(body, [ @@ -251,6 +254,14 @@ def on_GET(self, request, medium): 400, "This medium is currently not supported for password resets", ) + if self.config.email_password_reset_behaviour == "off": + if self.config.password_resets_were_disabled_due_to_email_config: + logger.warn( + "User password resets have been disabled due to lack of email config" + ) + raise SynapseError( + 400, "Email-based password resets have been disabled on this server", + ) sid = parse_string(request, "sid") client_secret = parse_string(request, "client_secret") From 10383e6e6fefe29b007d11220841c17ad9cfc3e1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 11:31:12 +0100 Subject: [PATCH 198/231] Change password reset links to /_matrix. --- synapse/app/homeserver.py | 1 - synapse/push/mailer.py | 2 +- .../res/templates/password_reset_success.html | 2 +- synapse/rest/client/v2_alpha/account.py | 9 +- tests/rest/client/v2_alpha/test_account.py | 241 ++++++++++++++++++ tests/unittest.py | 12 + 6 files changed, 260 insertions(+), 7 deletions(-) create mode 100644 tests/rest/client/v2_alpha/test_account.py diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index df524a23dd52..1045d28949e2 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -176,7 +176,6 @@ def _configure_named_resource(self, name, compress=False): resources.update({ "/_matrix/client/api/v1": client_resource, - "/_synapse/password_reset": client_resource, "/_matrix/client/r0": client_resource, "/_matrix/client/unstable": client_resource, "/_matrix/client/v2_alpha": client_resource, diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 4bc9eb731319..099f9545ab14 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -117,7 +117,7 @@ def send_password_reset_mail( link = ( self.hs.config.public_baseurl + - "_synapse/password_reset/email/submit_token" + "_matrix/client/unstable/password_reset/email/submit_token" "?token=%s&client_secret=%s&sid=%s" % (token, client_secret, sid) ) diff --git a/synapse/res/templates/password_reset_success.html b/synapse/res/templates/password_reset_success.html index 7b6fa5e6f03f..7324d66d1e94 100644 --- a/synapse/res/templates/password_reset_success.html +++ b/synapse/res/templates/password_reset_success.html @@ -1,6 +1,6 @@ -

Your password was successfully reset. You may now close this window.

+

Your email has now been validated, please return to your client to reset your password. You may now close this window.

diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index e4c63b69b96f..7025f486e125 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import re from six.moves import http_client @@ -228,9 +227,11 @@ def on_POST(self, request): class PasswordResetSubmitTokenServlet(RestServlet): """Handles 3PID validation token submission""" - PATTERNS = [ - re.compile("^/_synapse/password_reset/(?P[^/]*)/submit_token/*$"), - ] + PATTERNS = client_patterns( + "/password_reset/(?P[^/]*)/submit_token/*$", + releases=(), + unstable=True, + ) def __init__(self, hs): """ diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py new file mode 100644 index 000000000000..0d1c0868ce51 --- /dev/null +++ b/tests/rest/client/v2_alpha/test_account.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from email.parser import Parser + +import pkg_resources + +import synapse.rest.admin +from synapse.api.constants import LoginType +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import account, register + +from tests import unittest + + +class PasswordResetTestCase(unittest.HomeserverTestCase): + + servlets = [ + account.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + register.register_servlets, + login.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + + # Email config. + self.email_attempts = [] + + def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs): + self.email_attempts.append(msg) + return + + config["email"] = { + "enable_notifs": False, + "template_dir": os.path.abspath( + pkg_resources.resource_filename("synapse", "res/templates") + ), + "smtp_host": "127.0.0.1", + "smtp_port": 20, + "require_transport_security": False, + "smtp_user": None, + "smtp_pass": None, + "notif_from": "test@example.com", + } + config["public_baseurl"] = "https://example.com" + + hs = self.setup_test_homeserver(config=config, sendmail=sendmail) + return hs + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + def test_basic_password_reset(self): + """Test basic password reset flow + """ + old_password = "monkey" + new_password = "kangeroo" + + user_id = self.register_user("kermit", old_password) + self.login("kermit", old_password) + + email = "test@example.com" + + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address=email, + validated_at=0, + added_at=0, + ) + ) + + client_secret = "foobar" + session_id = self._request_token(email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + link = self._get_link_from_email() + + self._validate_token(link) + + self._reset_password(new_password, session_id, client_secret) + + # Assert we can log in with the new password + self.login("kermit", new_password) + + # Assert we can't log in with the old password + self.attempt_wrong_password_login("kermit", old_password) + + def test_cant_reset_password_without_clicking_link(self): + """Test that we do actually need to click the link in the email + """ + old_password = "monkey" + new_password = "kangeroo" + + user_id = self.register_user("kermit", old_password) + self.login("kermit", old_password) + + email = "test@example.com" + + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address=email, + validated_at=0, + added_at=0, + ) + ) + + client_secret = "foobar" + session_id = self._request_token(email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + + # Attempt to reset password without clicking the link + self._reset_password( + new_password, session_id, client_secret, expected_code=401, + ) + + # Assert we can log in with the old password + self.login("kermit", old_password) + + # Assert we can't log in with the new password + self.attempt_wrong_password_login("kermit", new_password) + + def test_no_valid_token(self): + """Test that we do actually need to request a token and can't just + make a session up. + """ + old_password = "monkey" + new_password = "kangeroo" + + user_id = self.register_user("kermit", old_password) + self.login("kermit", old_password) + + email = "test@example.com" + + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address=email, + validated_at=0, + added_at=0, + ) + ) + + client_secret = "foobar" + session_id = "weasle" + + # Attempt to reset password without even requesting an email + self._reset_password( + new_password, session_id, client_secret, expected_code=401, + ) + + # Assert we can log in with the old password + self.login("kermit", old_password) + + # Assert we can't log in with the new password + self.attempt_wrong_password_login("kermit", new_password) + + def _request_token(self, email, client_secret): + request, channel = self.make_request( + "POST", + b"account/password/email/requestToken", + {"client_secret": client_secret, "email": email, "send_attempt": 1}, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + return channel.json_body["sid"] + + def _validate_token(self, link): + # Remove the host + path = link.replace("https://example.com", "") + + request, channel = self.make_request("GET", path, shorthand=False) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + def _get_link_from_email(self): + assert self.email_attempts, "No emails have been sent" + + raw_msg = self.email_attempts[-1].decode("UTF-8") + mail = Parser().parsestr(raw_msg) + + text = None + for part in mail.walk(): + if part.get_content_type() == "text/plain": + text = part.get_payload(decode=True).decode("UTF-8") + break + + if not text: + self.fail("Could not find text portion of email to parse") + + match = re.search(r"https://example.com\S+", text) + assert match, "Could not find link in email" + + return match.group(0) + + def _reset_password( + self, new_password, session_id, client_secret, expected_code=200 + ): + request, channel = self.make_request( + "POST", + b"account/password", + { + "new_password": new_password, + "auth": { + "type": LoginType.EMAIL_IDENTITY, + "threepid_creds": { + "client_secret": client_secret, + "sid": session_id, + }, + }, + }, + ) + self.render(request) + self.assertEquals(expected_code, channel.code, channel.result) diff --git a/tests/unittest.py b/tests/unittest.py index 26204470b167..7dbb64af5999 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -441,3 +441,15 @@ def login(self, username, password, device_id=None): access_token = channel.json_body["access_token"] return access_token + + def attempt_wrong_password_login(self, username, password): + """Attempts to login as the user with the given password, asserting + that the attempt *fails*. + """ + body = {"type": "m.login.password", "user": username, "password": password} + + request, channel = self.make_request( + "POST", "/_matrix/client/r0/login", json.dumps(body).encode('utf8') + ) + self.render(request) + self.assertEqual(channel.code, 403, channel.result) From 453aaaadc0579a715b779cf3e7eeb6872d054396 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 11:33:14 +0100 Subject: [PATCH 199/231] Newsfile --- changelog.d/5424.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5424.misc diff --git a/changelog.d/5424.misc b/changelog.d/5424.misc new file mode 100644 index 000000000000..b92b50e3178d --- /dev/null +++ b/changelog.d/5424.misc @@ -0,0 +1 @@ +Move password reset links to /_matrix/client/unstable namespace. From 426218323b8475a71b3c58d7d291f0046faa62ab Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 11 Jun 2019 12:17:43 +0100 Subject: [PATCH 200/231] Neilj/improve federation docs (#5419) Add FAQ questions to federate.md. Add a health warning making it clear that the 1711 upgrade FAQ is now out of date. --- INSTALL.md | 27 +++++------ changelog.d/5419.doc | 1 + docs/MSC1711_certificates_FAQ.md | 17 +++++++ docs/federate.md | 77 ++++++++++++++++++++++++++++++-- 4 files changed, 106 insertions(+), 16 deletions(-) create mode 100644 changelog.d/5419.doc diff --git a/INSTALL.md b/INSTALL.md index a1ff91a98eef..2df686b19b07 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,14 +1,14 @@ -* [Installing Synapse](#installing-synapse) - * [Installing from source](#installing-from-source) - * [Platform-Specific Instructions](#platform-specific-instructions) - * [Troubleshooting Installation](#troubleshooting-installation) - * [Prebuilt packages](#prebuilt-packages) -* [Setting up Synapse](#setting-up-synapse) - * [TLS certificates](#tls-certificates) - * [Email](#email) - * [Registering a user](#registering-a-user) - * [Setting up a TURN server](#setting-up-a-turn-server) - * [URL previews](#url-previews) +- [Installing Synapse](#installing-synapse) + - [Installing from source](#installing-from-source) + - [Platform-Specific Instructions](#platform-specific-instructions) + - [Troubleshooting Installation](#troubleshooting-installation) + - [Prebuilt packages](#prebuilt-packages) +- [Setting up Synapse](#setting-up-synapse) + - [TLS certificates](#tls-certificates) + - [Email](#email) + - [Registering a user](#registering-a-user) + - [Setting up a TURN server](#setting-up-a-turn-server) + - [URL previews](#url-previews) # Installing Synapse @@ -395,8 +395,9 @@ To configure Synapse to expose an HTTPS port, you will need to edit instance, if using certbot, use `fullchain.pem` as your certificate, not `cert.pem`). -For those of you upgrading your TLS certificate for Synapse 1.0 compliance, -please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100). +For a more detailed guide to configuring your server for federation, see +[federate.md](docs/federate.md) + ## Email diff --git a/changelog.d/5419.doc b/changelog.d/5419.doc new file mode 100644 index 000000000000..74cf5eea8bab --- /dev/null +++ b/changelog.d/5419.doc @@ -0,0 +1 @@ +Expand the federation guide to include relevant content from the MSC1711 FAQ diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 599462bdcb73..7f9a23ff3148 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -1,5 +1,22 @@ # MSC1711 Certificates FAQ +## Historical Note +This document was originally written to guide server admins through the upgrade +path towards Synapse 1.0. Specifically, +[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md) +required that all servers present valid TLS certificates on their federation +API. Admins were encouraged to achieve compliance from version 0.99.0 (released +in February 2019) ahead of version 1.0 (released June 2019) enforcing the +certificate checks. + +Much of what follows is now outdated since most admins will have already +upgraded, however it may be of use to those with old installs returning to the +project. + +If you are setting up a server from scratch you almost certainly should look at +the [installation guide](INSTALL.md) instead. + +## Introduction The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It supports the r0.1 release of the server to server specification, but is compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well diff --git a/docs/federate.md b/docs/federate.md index b7fc09661ce3..6d6bb85e15a9 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -14,9 +14,9 @@ up and will work provided you set the ``server_name`` to match your machine's public DNS hostname, and provide Synapse with a TLS certificate which is valid for your ``server_name``. -Once you have completed the steps necessary to federate, you should be able to -join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a -room for Synapse admins.) +Once federation has been configured, you should be able to join a room over +federation. A good place to start is ``#synapse:matrix.org`` - a room for +Synapse admins. ## Delegation @@ -98,6 +98,77 @@ _matrix._tcp.``. In our example, we would expect this: Note that the target of a SRV record cannot be an alias (CNAME record): it has to point directly to the server hosting the synapse instance. +### Delegation FAQ +#### When do I need a SRV record or .well-known URI? + +If your homeserver listens on the default federation port (8448), and your +`server_name` points to the host that your homeserver runs on, you do not need an SRV +record or `.well-known/matrix/server` URI. + +For instance, if you registered `example.com` and pointed its DNS A record at a +fresh server, you could install Synapse on that host, +giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled, +it would automatically generate a valid TLS certificate for you via Let's Encrypt +and no SRV record or .well-known URI would be needed. + +This is the common case, although you can add an SRV record or +`.well-known/matrix/server` URI for completeness if you wish. + +**However**, if your server does not listen on port 8448, or if your `server_name` +does not point to the host that your homeserver runs on, you will need to let +other servers know how to find it. The way to do this is via .well-known or an +SRV record. + +#### I have created a .well-known URI. Do I still need an SRV record? + +As of Synapse 0.99, Synapse will first check for the existence of a .well-known +URI and follow any delegation it suggests. It will only then check for the +existence of an SRV record. + +That means that the SRV record will often be redundant. However, you should +remember that there may still be older versions of Synapse in the federation +which do not understand .well-known URIs, so if you removed your SRV record +you would no longer be able to federate with them. + +It is therefore best to leave the SRV record in place for now. Synapse 0.34 and +earlier will follow the SRV record (and not care about the invalid +certificate). Synapse 0.99 and later will follow the .well-known URI, with the +correct certificate chain. + +#### Can I manage my own certificates rather than having Synapse renew certificates itself? + +Yes, you are welcome to manage your certificates yourself. Synapse will only +attempt to obtain certificates from Let's Encrypt if you configure it to do +so.The only requirement is that there is a valid TLS cert present for +federation end points. + +#### Do you still recommend against using a reverse proxy on the federation port? + +We no longer actively recommend against using a reverse proxy. Many admins will +find it easier to direct federation traffic to a reverse proxy and manage their +own TLS certificates, and this is a supported configuration. + +See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a +reverse proxy. + +#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy? + +Practically speaking, this is no longer necessary. + +If you are using a reverse proxy for all of your TLS traffic, then you can set +`no_tls: True` in the Synapse config. In that case, the only reason Synapse +needs the certificate is to populate a legacy `tls_fingerprints` field in the +federation API. This is ignored by Synapse 0.99.0 and later, and the only time +pre-0.99 Synapses will check it is when attempting to fetch the server keys - +and generally this is delegated via `matrix.org`, which will be running a modern +version of Synapse. + +#### Do I need the same certificate for the client and federation port? + +No. There is nothing stopping you from using different certificates, +particularly if you are using a reverse proxy. However, Synapse will use the +same certificate on any ports where TLS is configured. + ## Troubleshooting You can use the [federation tester]( From a766c41d258b48ca1690723c1aa51684baa05e6a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 11:46:32 +0100 Subject: [PATCH 201/231] Bump bleach version so that tests can run on old deps. --- synapse/python_dependencies.py | 2 +- tests/push/test_email.py | 6 ------ tests/push/test_http.py | 6 ------ tests/rest/client/test_consent.py | 6 ------ tests/rest/client/v2_alpha/test_register.py | 6 ------ 5 files changed, 1 insertion(+), 25 deletions(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 6efd81f204a1..7dfa78dadb8b 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -80,7 +80,7 @@ ] CONDITIONAL_REQUIREMENTS = { - "email": ["Jinja2>=2.9", "bleach>=1.4.2"], + "email": ["Jinja2>=2.9", "bleach>=1.4.3"], "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"], # we use execute_batch, which arrived in psycopg 2.7. diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 9cdde1a9bd32..9bc5f07de181 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -24,15 +24,9 @@ from tests.unittest import HomeserverTestCase -try: - from synapse.push.mailer import load_jinja2_templates -except Exception: - load_jinja2_templates = None - class EmailPusherTests(HomeserverTestCase): - skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/push/test_http.py b/tests/push/test_http.py index aba618b2be04..22c3f73ef31e 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -23,15 +23,9 @@ from tests.unittest import HomeserverTestCase -try: - from synapse.push.mailer import load_jinja2_templates -except Exception: - load_jinja2_templates = None - class HTTPPusherTests(HomeserverTestCase): - skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index 88f8f1abdc90..efc5a99db33c 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -23,14 +23,8 @@ from tests import unittest from tests.server import render -try: - from synapse.push.mailer import load_jinja2_templates -except Exception: - load_jinja2_templates = None - class ConsentResourceTestCase(unittest.HomeserverTestCase): - skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 0cb6a363d64c..e9d8f3c7343c 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -30,11 +30,6 @@ from tests import unittest -try: - from synapse.push.mailer import load_jinja2_templates -except ImportError: - load_jinja2_templates = None - class RegisterRestServletTestCase(unittest.HomeserverTestCase): @@ -307,7 +302,6 @@ def test_manual_expire(self): class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): - skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ register.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, From 97174780ce726962ca1beb3788b62f16e9fad270 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 17:10:01 +0100 Subject: [PATCH 202/231] 1.0.0 --- CHANGES.md | 21 +++++++++++++++++++++ changelog.d/5418.bugfix | 1 - changelog.d/5419.doc | 1 - changelog.d/5424.misc | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 6 files changed, 28 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/5418.bugfix delete mode 100644 changelog.d/5419.doc delete mode 100644 changelog.d/5424.misc diff --git a/CHANGES.md b/CHANGES.md index f4a3ab71ca66..1b827c80791e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 1.0.0 (2019-06-11) +========================== + +Bugfixes +-------- + +- Fix bug where attempting to send transactions with large number of EDUs can fail. ([\#5418](https://github.com/matrix-org/synapse/issues/5418)) + + +Improved Documentation +---------------------- + +- Expand the federation guide to include relevant content from the MSC1711 FAQ ([\#5419](https://github.com/matrix-org/synapse/issues/5419)) + + +Internal Changes +---------------- + +- Move password reset links to /_matrix/client/unstable namespace. ([\#5424](https://github.com/matrix-org/synapse/issues/5424)) + + Synapse 1.0.0rc3 (2019-06-10) ============================= diff --git a/changelog.d/5418.bugfix b/changelog.d/5418.bugfix deleted file mode 100644 index 3fd4d2a88215..000000000000 --- a/changelog.d/5418.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where attempting to send transactions with large number of EDUs can fail. diff --git a/changelog.d/5419.doc b/changelog.d/5419.doc deleted file mode 100644 index 74cf5eea8bab..000000000000 --- a/changelog.d/5419.doc +++ /dev/null @@ -1 +0,0 @@ -Expand the federation guide to include relevant content from the MSC1711 FAQ diff --git a/changelog.d/5424.misc b/changelog.d/5424.misc deleted file mode 100644 index b92b50e3178d..000000000000 --- a/changelog.d/5424.misc +++ /dev/null @@ -1 +0,0 @@ -Move password reset links to /_matrix/client/unstable namespace. diff --git a/debian/changelog b/debian/changelog index 6a1a72c0e391..ef4edd7ac062 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.0.0) stable; urgency=medium + + * New synapse release 1.0.0. + + -- Synapse Packaging team Tue, 11 Jun 2019 17:09:53 +0100 + matrix-synapse-py3 (0.99.5.2) stable; urgency=medium * New synapse release 0.99.5.2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 9c75a0a27fd5..5bc24863d9c5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ except ImportError: pass -__version__ = "1.0.0rc3" +__version__ = "1.0.0" From 09e9a26b7181e36af7e2a4a0795d68f962742738 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Jun 2019 21:31:59 +1000 Subject: [PATCH 203/231] Remove Python 2.7 support. (#5425) * remove 2.7 from CI and publishing * fill out classifiers and also make it not be installed on 3.5 * some minor bumps so that the old deps work on python 3.5 --- .buildkite/docker-compose.py27.pg94.yaml | 21 ------- .buildkite/docker-compose.py27.pg95.yaml | 21 ------- .buildkite/pipeline.yml | 57 +------------------ .circleci/config.yml | 70 ------------------------ changelog.d/5425.removal | 1 + setup.py | 10 ++++ synapse/__init__.py | 7 +++ synapse/python_dependencies.py | 4 +- tox.ini | 4 +- 9 files changed, 25 insertions(+), 170 deletions(-) delete mode 100644 .buildkite/docker-compose.py27.pg94.yaml delete mode 100644 .buildkite/docker-compose.py27.pg95.yaml create mode 100644 changelog.d/5425.removal diff --git a/.buildkite/docker-compose.py27.pg94.yaml b/.buildkite/docker-compose.py27.pg94.yaml deleted file mode 100644 index 2d4b9eadd99b..000000000000 --- a/.buildkite/docker-compose.py27.pg94.yaml +++ /dev/null @@ -1,21 +0,0 @@ -version: '3.1' - -services: - - postgres: - image: postgres:9.4 - environment: - POSTGRES_PASSWORD: postgres - - testenv: - image: python:2.7 - depends_on: - - postgres - env_file: .env - environment: - SYNAPSE_POSTGRES_HOST: postgres - SYNAPSE_POSTGRES_USER: postgres - SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /app - volumes: - - ..:/app diff --git a/.buildkite/docker-compose.py27.pg95.yaml b/.buildkite/docker-compose.py27.pg95.yaml deleted file mode 100644 index c6a41f1da0f9..000000000000 --- a/.buildkite/docker-compose.py27.pg95.yaml +++ /dev/null @@ -1,21 +0,0 @@ -version: '3.1' - -services: - - postgres: - image: postgres:9.5 - environment: - POSTGRES_PASSWORD: postgres - - testenv: - image: python:2.7 - depends_on: - - postgres - env_file: .env - environment: - SYNAPSE_POSTGRES_HOST: postgres - SYNAPSE_POSTGRES_USER: postgres - SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /app - volumes: - - ..:/app diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 719f22b4e107..8eddf8b93199 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -48,13 +48,13 @@ steps: - command: - "python -m pip install tox" - - "tox -e py27,codecov" - label: ":python: 2.7 / SQLite" + - "tox -e py35-old,codecov" + label: ":python: 3.5 / SQLite / Old Deps" env: TRIAL_FLAGS: "-j 2" plugins: - docker#v3.0.1: - image: "python:2.7" + image: "python:3.5" propagate-environment: true retry: automatic: @@ -114,57 +114,6 @@ steps: - exit_status: 2 limit: 2 - - command: - - "python -m pip install tox" - - "tox -e py27-old,codecov" - label: ":python: 2.7 / SQLite / Old Deps" - env: - TRIAL_FLAGS: "-j 2" - plugins: - - docker#v3.0.1: - image: "python:2.7" - propagate-environment: true - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: ":python: 2.7 / :postgres: 9.4" - env: - TRIAL_FLAGS: "-j 4" - command: - - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'" - plugins: - - docker-compose#v2.1.0: - run: testenv - config: - - .buildkite/docker-compose.py27.pg94.yaml - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: ":python: 2.7 / :postgres: 9.5" - env: - TRIAL_FLAGS: "-j 4" - command: - - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'" - plugins: - - docker-compose#v2.1.0: - run: testenv - config: - - .buildkite/docker-compose.py27.pg95.yaml - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - label: ":python: 3.5 / :postgres: 9.4" env: TRIAL_FLAGS: "-j 4" diff --git a/.circleci/config.yml b/.circleci/config.yml index 137747dae3f2..3c2b32c015cb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,72 +4,18 @@ jobs: machine: true steps: - checkout - - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py2 . - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 . - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - run: docker push matrixdotorg/synapse:${CIRCLE_TAG} - - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2 - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3 dockerhubuploadlatest: machine: true steps: - checkout - - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py2 . - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 . - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - run: docker push matrixdotorg/synapse:latest - - run: docker push matrixdotorg/synapse:latest-py2 - run: docker push matrixdotorg/synapse:latest-py3 - sytestpy2: - docker: - - image: matrixdotorg/sytest-synapsepy2 - working_directory: /src - steps: - - checkout - - run: /synapse_sytest.sh - - store_artifacts: - path: /logs - destination: logs - - store_test_results: - path: /logs - sytestpy2postgres: - docker: - - image: matrixdotorg/sytest-synapsepy2 - working_directory: /src - steps: - - checkout - - run: POSTGRES=1 /synapse_sytest.sh - - store_artifacts: - path: /logs - destination: logs - - store_test_results: - path: /logs - sytestpy2merged: - docker: - - image: matrixdotorg/sytest-synapsepy2 - working_directory: /src - steps: - - checkout - - run: bash .circleci/merge_base_branch.sh - - run: /synapse_sytest.sh - - store_artifacts: - path: /logs - destination: logs - - store_test_results: - path: /logs - sytestpy2postgresmerged: - docker: - - image: matrixdotorg/sytest-synapsepy2 - working_directory: /src - steps: - - checkout - - run: bash .circleci/merge_base_branch.sh - - run: POSTGRES=1 /synapse_sytest.sh - - store_artifacts: - path: /logs - destination: logs - - store_test_results: - path: /logs sytestpy3: docker: @@ -126,14 +72,6 @@ workflows: version: 2 build: jobs: - - sytestpy2: - filters: - branches: - only: /develop|master|release-.*/ - - sytestpy2postgres: - filters: - branches: - only: /develop|master|release-.*/ - sytestpy3: filters: branches: @@ -142,14 +80,6 @@ workflows: filters: branches: only: /develop|master|release-.*/ - - sytestpy2merged: - filters: - branches: - ignore: /develop|master|release-.*/ - - sytestpy2postgresmerged: - filters: - branches: - ignore: /develop|master|release-.*/ - sytestpy3merged: filters: branches: diff --git a/changelog.d/5425.removal b/changelog.d/5425.removal new file mode 100644 index 000000000000..30022ee63d5c --- /dev/null +++ b/changelog.d/5425.removal @@ -0,0 +1 @@ +Python 2.7 is no longer a supported platform. Synapse now requires Python 3.5+ to run. diff --git a/setup.py b/setup.py index 55663e9cac28..3492cdc5a0e2 100755 --- a/setup.py +++ b/setup.py @@ -102,6 +102,16 @@ def exec_file(path_segments): include_package_data=True, zip_safe=False, long_description=long_description, + python_requires='~=3.5', + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Topic :: Communications :: Chat', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + ], scripts=["synctl"] + glob.glob("scripts/*"), cmdclass={'test': TestCommand}, ) diff --git a/synapse/__init__.py b/synapse/__init__.py index 5bc24863d9c5..0c0154678935 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,6 +17,13 @@ """ This is a reference implementation of a Matrix home server. """ +import sys + +# Check that we're not running on an unsupported Python version. +if sys.version_info < (3, 5): + print("Synapse requires Python 3.5 or above.") + sys.exit(1) + try: from twisted.internet import protocol from twisted.internet.protocol import Factory diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 7dfa78dadb8b..11ace2bfb17f 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -44,7 +44,7 @@ "canonicaljson>=1.1.3", "signedjson>=1.0.0", "pynacl>=1.2.1", - "idna>=2", + "idna>=2.5", # validating SSL certs for IP addresses requires service_identity 18.1. "service_identity>=18.1.0", @@ -65,7 +65,7 @@ "sortedcontainers>=1.4.4", "psutil>=2.0.0", "pymacaroons>=0.13.0", - "msgpack>=0.5.0", + "msgpack>=0.5.2", "phonenumbers>=8.2.0", "six>=1.10", # prometheus_client 0.4.0 changed the format of counter metrics diff --git a/tox.ini b/tox.ini index 543b232ae724..0c4d562766da 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = packaging, py27, py36, pep8, check_isort +envlist = packaging, py35, py36, py37, pep8, check_isort [base] deps = @@ -79,7 +79,7 @@ usedevelop=true # A test suite for the oldest supported versions of Python libraries, to catch # any uses of APIs not available in them. -[testenv:py27-old] +[testenv:py35-old] skip_install=True deps = # Old automat version for Twisted From 6312d6cc7c5bc80984758a70e2c368d8b4fb3bfd Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 13 Jun 2019 22:40:52 +1000 Subject: [PATCH 204/231] Expose statistics on extrems to prometheus (#5384) --- changelog.d/5384.feature | 1 + scripts/generate_signing_key.py | 2 +- synapse/metrics/__init__.py | 112 ++++++++++++++++++---- synapse/storage/events.py | 44 ++++++--- tests/storage/test_cleanup_extrems.py | 128 ++++++++++---------------- tests/storage/test_event_metrics.py | 97 +++++++++++++++++++ tests/unittest.py | 61 +++++++++++- 7 files changed, 331 insertions(+), 114 deletions(-) create mode 100644 changelog.d/5384.feature create mode 100644 tests/storage/test_event_metrics.py diff --git a/changelog.d/5384.feature b/changelog.d/5384.feature new file mode 100644 index 000000000000..9497f521c832 --- /dev/null +++ b/changelog.d/5384.feature @@ -0,0 +1 @@ +Statistics on forward extremities per room are now exposed via Prometheus. diff --git a/scripts/generate_signing_key.py b/scripts/generate_signing_key.py index ba3ba9739574..36e9140b5017 100755 --- a/scripts/generate_signing_key.py +++ b/scripts/generate_signing_key.py @@ -16,7 +16,7 @@ import argparse import sys -from signedjson.key import write_signing_keys, generate_signing_key +from signedjson.key import generate_signing_key, write_signing_keys from synapse.util.stringutils import random_string diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index ef48984fdd27..539c35352868 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -25,7 +25,7 @@ import attr from prometheus_client import Counter, Gauge, Histogram -from prometheus_client.core import REGISTRY, GaugeMetricFamily +from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricFamily from twisted.internet import reactor @@ -40,7 +40,6 @@ class RegistryProxy(object): - @staticmethod def collect(): for metric in REGISTRY.collect(): @@ -63,10 +62,7 @@ def collect(self): try: calls = self.caller() except Exception: - logger.exception( - "Exception running callback for LaterGauge(%s)", - self.name, - ) + logger.exception("Exception running callback for LaterGauge(%s)", self.name) yield g return @@ -116,9 +112,7 @@ def __init__(self, name, desc, labels, sub_metrics): # Create a class which have the sub_metrics values as attributes, which # default to 0 on initialization. Used to pass to registered callbacks. self._metrics_class = attr.make_class( - "_MetricsEntry", - attrs={x: attr.ib(0) for x in sub_metrics}, - slots=True, + "_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True ) # Counts number of in flight blocks for a given set of label values @@ -157,7 +151,9 @@ def collect(self): Note: may be called by a separate thread. """ - in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels) + in_flight = GaugeMetricFamily( + self.name + "_total", self.desc, labels=self.labels + ) metrics_by_key = {} @@ -179,7 +175,9 @@ def collect(self): yield in_flight for name in self.sub_metrics: - gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels) + gauge = GaugeMetricFamily( + "_".join([self.name, name]), "", labels=self.labels + ) for key, metrics in six.iteritems(metrics_by_key): gauge.add_metric(key, getattr(metrics, name)) yield gauge @@ -193,12 +191,75 @@ def _register_with_collector(self): all_gauges[self.name] = self +@attr.s(hash=True) +class BucketCollector(object): + """ + Like a Histogram, but allows buckets to be point-in-time instead of + incrementally added to. + + Args: + name (str): Base name of metric to be exported to Prometheus. + data_collector (callable -> dict): A synchronous callable that + returns a dict mapping bucket to number of items in the + bucket. If these buckets are not the same as the buckets + given to this class, they will be remapped into them. + buckets (list[float]): List of floats/ints of the buckets to + give to Prometheus. +Inf is ignored, if given. + + """ + + name = attr.ib() + data_collector = attr.ib() + buckets = attr.ib() + + def collect(self): + + # Fetch the data -- this must be synchronous! + data = self.data_collector() + + buckets = {} + + res = [] + for x in data.keys(): + for i, bound in enumerate(self.buckets): + if x <= bound: + buckets[bound] = buckets.get(bound, 0) + data[x] + break + + for i in self.buckets: + res.append([i, buckets.get(i, 0)]) + + res.append(["+Inf", sum(data.values())]) + + metric = HistogramMetricFamily( + self.name, + "", + buckets=res, + sum_value=sum([x * y for x, y in data.items()]), + ) + yield metric + + def __attrs_post_init__(self): + self.buckets = [float(x) for x in self.buckets if x != "+Inf"] + if self.buckets != sorted(self.buckets): + raise ValueError("Buckets not sorted") + + self.buckets = tuple(self.buckets) + + if self.name in all_gauges.keys(): + logger.warning("%s already registered, reregistering" % (self.name,)) + REGISTRY.unregister(all_gauges.pop(self.name)) + + REGISTRY.register(self) + all_gauges[self.name] = self + + # # Detailed CPU metrics # -class CPUMetrics(object): +class CPUMetrics(object): def __init__(self): ticks_per_sec = 100 try: @@ -237,13 +298,28 @@ def collect(self): "python_gc_time", "Time taken to GC (sec)", ["gen"], - buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50, - 5.00, 7.50, 15.00, 30.00, 45.00, 60.00], + buckets=[ + 0.0025, + 0.005, + 0.01, + 0.025, + 0.05, + 0.10, + 0.25, + 0.50, + 1.00, + 2.50, + 5.00, + 7.50, + 15.00, + 30.00, + 45.00, + 60.00, + ], ) class GCCounts(object): - def collect(self): cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"]) for n, m in enumerate(gc.get_count()): @@ -279,9 +355,7 @@ def collect(self): events_processed_counter = Counter("synapse_federation_client_events_processed", "") event_processing_loop_counter = Counter( - "synapse_event_processing_loop_count", - "Event processing loop iterations", - ["name"], + "synapse_event_processing_loop_count", "Event processing loop iterations", ["name"] ) event_processing_loop_room_count = Counter( @@ -311,7 +385,6 @@ def collect(self): class ReactorLastSeenMetric(object): - def collect(self): cm = GaugeMetricFamily( "python_twisted_reactor_last_seen", @@ -325,7 +398,6 @@ def collect(self): def runUntilCurrentTimer(func): - @functools.wraps(func) def f(*args, **kwargs): now = reactor.seconds() diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f9162be9b90a..1578403f7976 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -17,7 +17,7 @@ import itertools import logging -from collections import OrderedDict, deque, namedtuple +from collections import Counter as c_counter, OrderedDict, deque, namedtuple from functools import wraps from six import iteritems, text_type @@ -33,6 +33,7 @@ from synapse.api.errors import SynapseError from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 +from synapse.metrics import BucketCollector from synapse.metrics.background_process_metrics import run_as_background_process from synapse.state import StateResolutionStore from synapse.storage.background_updates import BackgroundUpdateStore @@ -220,13 +221,38 @@ class EventsStore( EventsWorkerStore, BackgroundUpdateStore, ): - def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() + # Collect metrics on the number of forward extremities that exist. + self._current_forward_extremities_amount = {} + + BucketCollector( + "synapse_forward_extremities", + lambda: self._current_forward_extremities_amount, + buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"] + ) + + # Read the extrems every 60 minutes + hs.get_clock().looping_call(self._read_forward_extremities, 60 * 60 * 1000) + + @defer.inlineCallbacks + def _read_forward_extremities(self): + def fetch(txn): + txn.execute( + """ + select count(*) c from event_forward_extremities + group by room_id + """ + ) + return txn.fetchall() + + res = yield self.runInteraction("read_forward_extremities", fetch) + self._current_forward_extremities_amount = c_counter(list(x[0] for x in res)) + @defer.inlineCallbacks def persist_events(self, events_and_contexts, backfilled=False): """ @@ -568,17 +594,11 @@ def _get_events_which_are_prevs_txn(txn, batch): ) txn.execute(sql, batch) - results.extend( - r[0] - for r in txn - if not json.loads(r[1]).get("soft_failed") - ) + results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed")) for chunk in batch_iter(event_ids, 100): yield self.runInteraction( - "_get_events_which_are_prevs", - _get_events_which_are_prevs_txn, - chunk, + "_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk ) defer.returnValue(results) @@ -640,9 +660,7 @@ def _get_prevs_before_rejected_txn(txn, batch): for chunk in batch_iter(event_ids, 100): yield self.runInteraction( - "_get_prevs_before_rejected", - _get_prevs_before_rejected_txn, - chunk, + "_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk ) defer.returnValue(existing_prevs) diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 6aa8b8b3c679..f4c81ef77dda 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -15,7 +15,6 @@ import os.path -from synapse.api.constants import EventTypes from synapse.storage import prepare_database from synapse.types import Requester, UserID @@ -23,17 +22,12 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): - """Test the background update to clean forward extremities table. """ - def make_homeserver(self, reactor, clock): - # Hack until we understand why test_forked_graph_cleanup fails with v4 - config = self.default_config() - config['default_room_version'] = '1' - return self.setup_test_homeserver(config=config) + Test the background update to clean forward extremities table. + """ def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() - self.event_creator = homeserver.get_event_creation_handler() self.room_creator = homeserver.get_room_creation_handler() # Create a test user and room @@ -42,56 +36,6 @@ def prepare(self, reactor, clock, homeserver): info = self.get_success(self.room_creator.create_room(self.requester, {})) self.room_id = info["room_id"] - def create_and_send_event(self, soft_failed=False, prev_event_ids=None): - """Create and send an event. - - Args: - soft_failed (bool): Whether to create a soft failed event or not - prev_event_ids (list[str]|None): Explicitly set the prev events, - or if None just use the default - - Returns: - str: The new event's ID. - """ - prev_events_and_hashes = None - if prev_event_ids: - prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] - - event, context = self.get_success( - self.event_creator.create_event( - self.requester, - { - "type": EventTypes.Message, - "room_id": self.room_id, - "sender": self.user.to_string(), - "content": {"body": "", "msgtype": "m.text"}, - }, - prev_events_and_hashes=prev_events_and_hashes, - ) - ) - - if soft_failed: - event.internal_metadata.soft_failed = True - - self.get_success( - self.event_creator.send_nonmember_event(self.requester, event, context) - ) - - return event.event_id - - def add_extremity(self, event_id): - """Add the given event as an extremity to the room. - """ - self.get_success( - self.store._simple_insert( - table="event_forward_extremities", - values={"room_id": self.room_id, "event_id": event_id}, - desc="test_add_extremity", - ) - ) - - self.store.get_latest_event_ids_in_room.invalidate((self.room_id,)) - def run_background_update(self): """Re run the background update to clean up the extremities. """ @@ -131,10 +75,16 @@ def test_soft_failed_extremities_handled_correctly(self): """ # Create the room graph - event_id_1 = self.create_and_send_event() - event_id_2 = self.create_and_send_event(True, [event_id_1]) - event_id_3 = self.create_and_send_event(True, [event_id_2]) - event_id_4 = self.create_and_send_event(False, [event_id_3]) + event_id_1 = self.create_and_send_event(self.room_id, self.user) + event_id_2 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_1] + ) + event_id_3 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_2] + ) + event_id_4 = self.create_and_send_event( + self.room_id, self.user, False, [event_id_3] + ) # Check the latest events are as expected latest_event_ids = self.get_success( @@ -154,12 +104,16 @@ def test_basic_cleanup(self): Where SF* are soft failed, and with extremities of A and B """ # Create the room graph - event_id_a = self.create_and_send_event() - event_id_sf1 = self.create_and_send_event(True, [event_id_a]) - event_id_b = self.create_and_send_event(False, [event_id_sf1]) + event_id_a = self.create_and_send_event(self.room_id, self.user) + event_id_sf1 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_a] + ) + event_id_b = self.create_and_send_event( + self.room_id, self.user, False, [event_id_sf1] + ) # Add the new extremity and check the latest events are as expected - self.add_extremity(event_id_a) + self.add_extremity(self.room_id, event_id_a) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) @@ -185,13 +139,19 @@ def test_chain_of_fail_cleanup(self): Where SF* are soft failed, and with extremities of A and B """ # Create the room graph - event_id_a = self.create_and_send_event() - event_id_sf1 = self.create_and_send_event(True, [event_id_a]) - event_id_sf2 = self.create_and_send_event(True, [event_id_sf1]) - event_id_b = self.create_and_send_event(False, [event_id_sf2]) + event_id_a = self.create_and_send_event(self.room_id, self.user) + event_id_sf1 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_a] + ) + event_id_sf2 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_sf1] + ) + event_id_b = self.create_and_send_event( + self.room_id, self.user, False, [event_id_sf2] + ) # Add the new extremity and check the latest events are as expected - self.add_extremity(event_id_a) + self.add_extremity(self.room_id, event_id_a) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) @@ -227,16 +187,26 @@ def test_forked_graph_cleanup(self): """ # Create the room graph - event_id_a = self.create_and_send_event() - event_id_b = self.create_and_send_event() - event_id_sf1 = self.create_and_send_event(True, [event_id_a]) - event_id_sf2 = self.create_and_send_event(True, [event_id_a, event_id_b]) - event_id_sf3 = self.create_and_send_event(True, [event_id_sf1]) - self.create_and_send_event(True, [event_id_sf2, event_id_sf3]) # SF4 - event_id_c = self.create_and_send_event(False, [event_id_sf3]) + event_id_a = self.create_and_send_event(self.room_id, self.user) + event_id_b = self.create_and_send_event(self.room_id, self.user) + event_id_sf1 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_a] + ) + event_id_sf2 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_a, event_id_b] + ) + event_id_sf3 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_sf1] + ) + self.create_and_send_event( + self.room_id, self.user, True, [event_id_sf2, event_id_sf3] + ) # SF4 + event_id_c = self.create_and_send_event( + self.room_id, self.user, False, [event_id_sf3] + ) # Add the new extremity and check the latest events are as expected - self.add_extremity(event_id_a) + self.add_extremity(self.room_id, event_id_a) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py new file mode 100644 index 000000000000..20a068f1fcc6 --- /dev/null +++ b/tests/storage/test_event_metrics.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.metrics import REGISTRY +from synapse.types import Requester, UserID + +from tests.unittest import HomeserverTestCase + + +class ExtremStatisticsTestCase(HomeserverTestCase): + def test_exposed_to_prometheus(self): + """ + Forward extremity counts are exposed via Prometheus. + """ + room_creator = self.hs.get_room_creation_handler() + + user = UserID("alice", "test") + requester = Requester(user, None, False, None, None) + + # Real events, forward extremities + events = [(3, 2), (6, 2), (4, 6)] + + for event_count, extrems in events: + info = self.get_success(room_creator.create_room(requester, {})) + room_id = info["room_id"] + + last_event = None + + # Make a real event chain + for i in range(event_count): + ev = self.create_and_send_event(room_id, user, False, last_event) + last_event = [ev] + + # Sprinkle in some extremities + for i in range(extrems): + ev = self.create_and_send_event(room_id, user, False, last_event) + + # Let it run for a while, then pull out the statistics from the + # Prometheus client registry + self.reactor.advance(60 * 60 * 1000) + self.pump(1) + + items = list( + filter( + lambda x: x.name == "synapse_forward_extremities", + list(REGISTRY.collect()), + ) + ) + + # Check the values are what we want + buckets = {} + _count = 0 + _sum = 0 + + for i in items[0].samples: + if i[0].endswith("_bucket"): + buckets[i[1]['le']] = i[2] + elif i[0].endswith("_count"): + _count = i[2] + elif i[0].endswith("_sum"): + _sum = i[2] + + # 3 buckets, 2 with 2 extrems, 1 with 6 extrems (bucketed as 7), and + # +Inf which is all + self.assertEqual( + buckets, + { + 1.0: 0, + 2.0: 2, + 3.0: 0, + 5.0: 0, + 7.0: 1, + 10.0: 0, + 15.0: 0, + 20.0: 0, + 50.0: 0, + 100.0: 0, + 200.0: 0, + 500.0: 0, + "+Inf": 3, + }, + ) + # 3 rooms, with 10 total events + self.assertEqual(_count, 3) + self.assertEqual(_sum, 10) diff --git a/tests/unittest.py b/tests/unittest.py index 7dbb64af5999..b6dc7932ce5c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -27,11 +27,12 @@ from twisted.internet.defer import Deferred from twisted.trial import unittest +from synapse.api.constants import EventTypes from synapse.config.homeserver import HomeServerConfig from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest from synapse.server import HomeServer -from synapse.types import UserID, create_requester +from synapse.types import Requester, UserID, create_requester from synapse.util.logcontext import LoggingContext from tests.server import get_clock, make_request, render, setup_test_homeserver @@ -442,6 +443,64 @@ def login(self, username, password, device_id=None): access_token = channel.json_body["access_token"] return access_token + def create_and_send_event( + self, room_id, user, soft_failed=False, prev_event_ids=None + ): + """ + Create and send an event. + + Args: + soft_failed (bool): Whether to create a soft failed event or not + prev_event_ids (list[str]|None): Explicitly set the prev events, + or if None just use the default + + Returns: + str: The new event's ID. + """ + event_creator = self.hs.get_event_creation_handler() + secrets = self.hs.get_secrets() + requester = Requester(user, None, False, None, None) + + prev_events_and_hashes = None + if prev_event_ids: + prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] + + event, context = self.get_success( + event_creator.create_event( + requester, + { + "type": EventTypes.Message, + "room_id": room_id, + "sender": user.to_string(), + "content": {"body": secrets.token_hex(), "msgtype": "m.text"}, + }, + prev_events_and_hashes=prev_events_and_hashes, + ) + ) + + if soft_failed: + event.internal_metadata.soft_failed = True + + self.get_success( + event_creator.send_nonmember_event(requester, event, context) + ) + + return event.event_id + + def add_extremity(self, room_id, event_id): + """ + Add the given event as an extremity to the room. + """ + self.get_success( + self.hs.get_datastore()._simple_insert( + table="event_forward_extremities", + values={"room_id": room_id, "event_id": event_id}, + desc="test_add_extremity", + ) + ) + + self.hs.get_datastore().get_latest_event_ids_in_room.invalidate((room_id,)) + def attempt_wrong_password_login(self, username, password): """Attempts to login as the user with the given password, asserting that the attempt *fails*. From 5c15039e065d710459dac9e558c8ec94edf7b6c4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 13 Jun 2019 13:52:08 +0100 Subject: [PATCH 205/231] Clean up code for sending federation EDUs. (#5381) This code confused the hell out of me today. Split _get_new_device_messages into its two (unrelated) parts. --- changelog.d/5381.misc | 1 + .../sender/per_destination_queue.py | 40 ++++++++++++------- 2 files changed, 27 insertions(+), 14 deletions(-) create mode 100644 changelog.d/5381.misc diff --git a/changelog.d/5381.misc b/changelog.d/5381.misc new file mode 100644 index 000000000000..bbf70a044577 --- /dev/null +++ b/changelog.d/5381.misc @@ -0,0 +1 @@ +Clean up code for sending federation EDUs. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 564c57203d33..22a2735405df 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -189,11 +189,21 @@ def _transaction_transmission_loop(self): pending_pdus = [] while True: - device_message_edus, device_stream_id, dev_list_id = ( - # We have to keep 2 free slots for presence and rr_edus - yield self._get_new_device_messages(MAX_EDUS_PER_TRANSACTION - 2) + # We have to keep 2 free slots for presence and rr_edus + limit = MAX_EDUS_PER_TRANSACTION - 2 + + device_update_edus, dev_list_id = ( + yield self._get_device_update_edus(limit) + ) + + limit -= len(device_update_edus) + + to_device_edus, device_stream_id = ( + yield self._get_to_device_message_edus(limit) ) + pending_edus = device_update_edus + to_device_edus + # BEGIN CRITICAL SECTION # # In order to avoid a race condition, we need to make sure that @@ -208,10 +218,6 @@ def _transaction_transmission_loop(self): # We can only include at most 50 PDUs per transactions pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:] - pending_edus = [] - - # We can only include at most 100 EDUs per transactions - # rr_edus and pending_presence take at most one slot each pending_edus.extend(self._get_rr_edus(force_flush=False)) pending_presence = self._pending_presence self._pending_presence = {} @@ -232,7 +238,6 @@ def _transaction_transmission_loop(self): ) ) - pending_edus.extend(device_message_edus) pending_edus.extend( self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus)) ) @@ -272,10 +277,13 @@ def _transaction_transmission_loop(self): sent_edus_by_type.labels(edu.edu_type).inc() # Remove the acknowledged device messages from the database # Only bother if we actually sent some device messages - if device_message_edus: + if to_device_edus: yield self._store.delete_device_msgs_for_remote( self._destination, device_stream_id ) + + # also mark the device updates as sent + if device_update_edus: logger.info( "Marking as sent %r %r", self._destination, dev_list_id ) @@ -347,7 +355,7 @@ def _pop_pending_edus(self, limit): return pending_edus @defer.inlineCallbacks - def _get_new_device_messages(self, limit): + def _get_device_update_edus(self, limit): last_device_list = self._last_device_list_stream_id # Retrieve list of new device updates to send to the destination @@ -366,15 +374,19 @@ def _get_new_device_messages(self, limit): assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs" + defer.returnValue((edus, now_stream_id)) + + @defer.inlineCallbacks + def _get_to_device_message_edus(self, limit): last_device_stream_id = self._last_device_stream_id to_device_stream_id = self._store.get_to_device_stream_token() contents, stream_id = yield self._store.get_new_device_msgs_for_remote( self._destination, last_device_stream_id, to_device_stream_id, - limit - len(edus), + limit, ) - edus.extend( + edus = [ Edu( origin=self._server_name, destination=self._destination, @@ -382,6 +394,6 @@ def _get_new_device_messages(self, limit): content=content, ) for content in contents - ) + ] - defer.returnValue((edus, stream_id, now_stream_id)) + defer.returnValue((edus, stream_id)) From b59a4eba644d123fce03809ead2121e9e0da6645 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 13 Jun 2019 14:49:25 +0100 Subject: [PATCH 206/231] Updates to the federation_client script (#5447) * py3 fixes for federation_client * .well-known support for federation_client --- changelog.d/5447.misc | 1 + scripts-dev/federation_client.py | 43 ++++++++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5447.misc diff --git a/changelog.d/5447.misc b/changelog.d/5447.misc new file mode 100644 index 000000000000..dd520684044a --- /dev/null +++ b/changelog.d/5447.misc @@ -0,0 +1 @@ +Update federation_client dev script to support `.well-known` and work with python3. diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index e0287c8c6ccf..41e7b244187a 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -21,7 +21,8 @@ import base64 import json import sys -from urlparse import urlparse, urlunparse + +from six.moves.urllib import parse as urlparse import nacl.signing import requests @@ -145,7 +146,7 @@ def request_json(method, origin_name, origin_key, destination, path, content): for key, sig in signed_json["signatures"][origin_name].items(): header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig) - authorization_headers.append(bytes(header)) + authorization_headers.append(header.encode("ascii")) print("Authorization: %s" % header, file=sys.stderr) dest = "matrix://%s%s" % (destination, path) @@ -250,7 +251,7 @@ def read_args_from_config(args): class MatrixConnectionAdapter(HTTPAdapter): @staticmethod - def lookup(s): + def lookup(s, skip_well_known=False): if s[-1] == ']': # ipv6 literal (with no port) return s, 8448 @@ -263,19 +264,51 @@ def lookup(s): raise ValueError("Invalid host:port '%s'" % s) return out[0], port + # try a .well-known lookup + if not skip_well_known: + well_known = MatrixConnectionAdapter.get_well_known(s) + if well_known: + return MatrixConnectionAdapter.lookup( + well_known, skip_well_known=True + ) + try: srv = srvlookup.lookup("matrix", "tcp", s)[0] return srv.host, srv.port except Exception: return s, 8448 + @staticmethod + def get_well_known(server_name): + uri = "https://%s/.well-known/matrix/server" % (server_name, ) + print("fetching %s" % (uri, ), file=sys.stderr) + + try: + resp = requests.get(uri) + if resp.status_code != 200: + print("%s gave %i" % (uri, resp.status_code), file=sys.stderr) + return None + + parsed_well_known = resp.json() + if not isinstance(parsed_well_known, dict): + raise Exception("not a dict") + if "m.server" not in parsed_well_known: + raise Exception("Missing key 'm.server'") + new_name = parsed_well_known['m.server'] + print("well-known lookup gave %s" % (new_name, ), file=sys.stderr) + return new_name + + except Exception as e: + print("Invalid response from %s: %s" % (uri, e, ), file=sys.stderr) + return None + def get_connection(self, url, proxies=None): - parsed = urlparse(url) + parsed = urlparse.urlparse(url) (host, port) = self.lookup(parsed.netloc) netloc = "%s:%d" % (host, port) print("Connecting to %s" % (netloc,), file=sys.stderr) - url = urlunparse( + url = urlparse.urlunparse( ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) ) return super(MatrixConnectionAdapter, self).get_connection(url, proxies) From 4f68188d0bbdb1966250375d34125572eb82a117 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 13 Jun 2019 16:42:36 +0100 Subject: [PATCH 207/231] Change to absolute path for contrib/docker because this file is reproduced on dockerhub and relative paths don't work --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index df5d0151e23b..5a596eecb977 100644 --- a/docker/README.md +++ b/docker/README.md @@ -14,7 +14,7 @@ This image is designed to run either with an automatically generated configuration file or with a custom configuration that requires manual editing. An easy way to make use of this image is via docker-compose. See the -[contrib/docker](../contrib/docker) section of the synapse project for +[contrib/docker](https://github.com/matrix-org/synapse/tree/master/contrib/docker) section of the synapse project for examples. ### Without Compose (harder) From a10c8dae85d3706afbab588e1004350aa5b49539 Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 14 Jun 2019 21:09:33 +1000 Subject: [PATCH 208/231] fix prometheus rendering error --- synapse/metrics/__init__.py | 2 +- tests/storage/test_event_metrics.py | 61 +++++++++++------------------ 2 files changed, 24 insertions(+), 39 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 539c35352868..0d3ae1a43d8d 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -227,7 +227,7 @@ def collect(self): break for i in self.buckets: - res.append([i, buckets.get(i, 0)]) + res.append([str(i), buckets.get(i, 0)]) res.append(["+Inf", sum(data.values())]) diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 20a068f1fcc6..1655fcdafce5 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from prometheus_client.exposition import generate_latest + from synapse.metrics import REGISTRY from synapse.types import Requester, UserID @@ -52,46 +54,29 @@ def test_exposed_to_prometheus(self): self.reactor.advance(60 * 60 * 1000) self.pump(1) - items = list( + items = set( filter( - lambda x: x.name == "synapse_forward_extremities", - list(REGISTRY.collect()), + lambda x: b"synapse_forward_extremities_" in x, + generate_latest(REGISTRY).split(b"\n"), ) ) - # Check the values are what we want - buckets = {} - _count = 0 - _sum = 0 - - for i in items[0].samples: - if i[0].endswith("_bucket"): - buckets[i[1]['le']] = i[2] - elif i[0].endswith("_count"): - _count = i[2] - elif i[0].endswith("_sum"): - _sum = i[2] + expected = set([ + b'synapse_forward_extremities_bucket{le="1.0"} 0.0', + b'synapse_forward_extremities_bucket{le="2.0"} 2.0', + b'synapse_forward_extremities_bucket{le="3.0"} 0.0', + b'synapse_forward_extremities_bucket{le="5.0"} 0.0', + b'synapse_forward_extremities_bucket{le="7.0"} 1.0', + b'synapse_forward_extremities_bucket{le="10.0"} 0.0', + b'synapse_forward_extremities_bucket{le="15.0"} 0.0', + b'synapse_forward_extremities_bucket{le="20.0"} 0.0', + b'synapse_forward_extremities_bucket{le="50.0"} 0.0', + b'synapse_forward_extremities_bucket{le="100.0"} 0.0', + b'synapse_forward_extremities_bucket{le="200.0"} 0.0', + b'synapse_forward_extremities_bucket{le="500.0"} 0.0', + b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', + b'synapse_forward_extremities_count 3.0', + b'synapse_forward_extremities_sum 10.0', + ]) - # 3 buckets, 2 with 2 extrems, 1 with 6 extrems (bucketed as 7), and - # +Inf which is all - self.assertEqual( - buckets, - { - 1.0: 0, - 2.0: 2, - 3.0: 0, - 5.0: 0, - 7.0: 1, - 10.0: 0, - 15.0: 0, - 20.0: 0, - 50.0: 0, - 100.0: 0, - 200.0: 0, - 500.0: 0, - "+Inf": 3, - }, - ) - # 3 rooms, with 10 total events - self.assertEqual(_count, 3) - self.assertEqual(_sum, 10) + self.assertEqual(items, expected) From b2a6f90a672174c0c0f815b1e0843c02455b774d Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 14 Jun 2019 21:10:21 +1000 Subject: [PATCH 209/231] changelog --- changelog.d/5458.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5458.feature diff --git a/changelog.d/5458.feature b/changelog.d/5458.feature new file mode 100644 index 000000000000..9497f521c832 --- /dev/null +++ b/changelog.d/5458.feature @@ -0,0 +1 @@ +Statistics on forward extremities per room are now exposed via Prometheus. From d8db29c4818829df7a887cedf40b1e2ac49631e7 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Fri, 14 Jun 2019 13:03:46 +0100 Subject: [PATCH 210/231] Use python3 in the demo --- demo/start.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demo/start.sh b/demo/start.sh index c4a1328a6f9e..5c3a8fe61f66 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -21,7 +21,7 @@ for port in 8080 8081 8082; do pushd demo/$port #rm $DIR/etc/$port.config - python -m synapse.app.homeserver \ + python3 -m synapse.app.homeserver \ --generate-config \ -H "localhost:$https_port" \ --config-path "$DIR/etc/$port.config" \ @@ -55,7 +55,7 @@ for port in 8080 8081 8082; do echo "report_stats: false" >> $DIR/etc/$port.config fi - python -m synapse.app.homeserver \ + python3 -m synapse.app.homeserver \ --config-path "$DIR/etc/$port.config" \ -D \ -vv \ From d0530382eeff053547304532167c0e4654af172c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Jun 2019 13:18:24 +0100 Subject: [PATCH 211/231] Track deactivated accounts in the database (#5378) --- changelog.d/5378.misc | 1 + synapse/handlers/deactivate_account.py | 4 + synapse/storage/registration.py | 114 ++++++++++++++++++ .../delta/55/users_alter_deactivated.sql | 19 +++ tests/rest/client/v2_alpha/test_account.py | 45 +++++++ 5 files changed, 183 insertions(+) create mode 100644 changelog.d/5378.misc create mode 100644 synapse/storage/schema/delta/55/users_alter_deactivated.sql diff --git a/changelog.d/5378.misc b/changelog.d/5378.misc new file mode 100644 index 000000000000..365e49d63402 --- /dev/null +++ b/changelog.d/5378.misc @@ -0,0 +1 @@ +Track deactivated accounts in the database. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 6a91f7698e66..b29089d82c57 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -114,6 +115,9 @@ def deactivate_account(self, user_id, erase_data, id_server=None): # parts users from rooms (if it isn't already running) self._start_user_parting() + # Mark the user as deactivated. + yield self.store.set_user_deactivated_status(user_id, True) + defer.returnValue(identity_server_supports_unbinding) def _start_user_parting(self): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 1dd1182e82cb..4c5751b57f50 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import re from six import iterkeys @@ -31,6 +32,8 @@ THIRTY_MINUTES_IN_MS = 30 * 60 * 1000 +logger = logging.getLogger(__name__) + class RegistrationWorkerStore(SQLBaseStore): def __init__(self, db_conn, hs): @@ -598,11 +601,75 @@ def __init__(self, db_conn, hs): "user_threepids_grandfather", self._bg_user_threepids_grandfather, ) + self.register_background_update_handler( + "users_set_deactivated_flag", self._backgroud_update_set_deactivated_flag, + ) + # Create a background job for culling expired 3PID validity tokens hs.get_clock().looping_call( self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS, ) + @defer.inlineCallbacks + def _backgroud_update_set_deactivated_flag(self, progress, batch_size): + """Retrieves a list of all deactivated users and sets the 'deactivated' flag to 1 + for each of them. + """ + + last_user = progress.get("user_id", "") + + def _backgroud_update_set_deactivated_flag_txn(txn): + txn.execute( + """ + SELECT + users.name, + COUNT(access_tokens.token) AS count_tokens, + COUNT(user_threepids.address) AS count_threepids + FROM users + LEFT JOIN access_tokens ON (access_tokens.user_id = users.name) + LEFT JOIN user_threepids ON (user_threepids.user_id = users.name) + WHERE password_hash IS NULL OR password_hash = '' + AND users.name > ? + GROUP BY users.name + ORDER BY users.name ASC + LIMIT ?; + """, + (last_user, batch_size), + ) + + rows = self.cursor_to_dict(txn) + + if not rows: + return True + + rows_processed_nb = 0 + + for user in rows: + if not user["count_tokens"] and not user["count_threepids"]: + self.set_user_deactivated_status_txn(txn, user["user_id"], True) + rows_processed_nb += 1 + + logger.info("Marked %d rows as deactivated", rows_processed_nb) + + self._background_update_progress_txn( + txn, "users_set_deactivated_flag", {"user_id": rows[-1]["user_id"]} + ) + + if batch_size > len(rows): + return True + else: + return False + + end = yield self.runInteraction( + "users_set_deactivated_flag", + _backgroud_update_set_deactivated_flag_txn, + ) + + if end: + yield self._end_background_update("users_set_deactivated_flag") + + defer.returnValue(batch_size) + @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id=None): """Adds an access token for the given user. @@ -1268,3 +1335,50 @@ def delete_threepid_session_txn(txn): "delete_threepid_session", delete_threepid_session_txn, ) + + def set_user_deactivated_status_txn(self, txn, user_id, deactivated): + self._simple_update_one_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + updatevalues={"deactivated": 1 if deactivated else 0}, + ) + self._invalidate_cache_and_stream( + txn, self.get_user_deactivated_status, (user_id,), + ) + + @defer.inlineCallbacks + def set_user_deactivated_status(self, user_id, deactivated): + """Set the `deactivated` property for the provided user to the provided value. + + Args: + user_id (str): The ID of the user to set the status for. + deactivated (bool): The value to set for `deactivated`. + """ + + yield self.runInteraction( + "set_user_deactivated_status", + self.set_user_deactivated_status_txn, + user_id, deactivated, + ) + + @cachedInlineCallbacks() + def get_user_deactivated_status(self, user_id): + """Retrieve the value for the `deactivated` property for the provided user. + + Args: + user_id (str): The ID of the user to retrieve the status for. + + Returns: + defer.Deferred(bool): The requested value. + """ + + res = yield self._simple_select_one_onecol( + table="users", + keyvalues={"name": user_id}, + retcol="deactivated", + desc="get_user_deactivated_status", + ) + + # Convert the integer into a boolean. + defer.returnValue(res == 1) diff --git a/synapse/storage/schema/delta/55/users_alter_deactivated.sql b/synapse/storage/schema/delta/55/users_alter_deactivated.sql new file mode 100644 index 000000000000..dabdde489bf2 --- /dev/null +++ b/synapse/storage/schema/delta/55/users_alter_deactivated.sql @@ -0,0 +1,19 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE users ADD deactivated SMALLINT DEFAULT 0 NOT NULL; + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('users_set_deactivated_flag', '{}'); diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 0d1c0868ce51..a60a4a3b875b 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import os import re from email.parser import Parser @@ -239,3 +240,47 @@ def _reset_password( ) self.render(request) self.assertEquals(expected_code, channel.code, channel.result) + + +class DeactivateTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + account.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs + + def test_deactivate_account(self): + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + + request_data = json.dumps({ + "auth": { + "type": "m.login.password", + "user": user_id, + "password": "test", + }, + "erase": False, + }) + request, channel = self.make_request( + "POST", + "account/deactivate", + request_data, + access_token=tok, + ) + self.render(request) + self.assertEqual(request.code, 200) + + store = self.hs.get_datastore() + + # Check that the user has been marked as deactivated. + self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id))) + + # Check that this access token has been invalidated. + request, channel = self.make_request("GET", "account/whoami") + self.render(request) + self.assertEqual(request.code, 401) From 3ed595e327aee6d45ed0371c98e828d724c26b2d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jun 2019 14:07:32 +0100 Subject: [PATCH 212/231] Prometheus histograms are cumalative --- synapse/metrics/__init__.py | 1 - synapse/storage/events.py | 3 ++- tests/storage/test_event_metrics.py | 20 ++++++++++---------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 0d3ae1a43d8d..8aee14a8a86a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -224,7 +224,6 @@ def collect(self): for i, bound in enumerate(self.buckets): if x <= bound: buckets[bound] = buckets.get(bound, 0) + data[x] - break for i in self.buckets: res.append([str(i), buckets.get(i, 0)]) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 1578403f7976..f631fb173345 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -228,7 +228,8 @@ def __init__(self, db_conn, hs): self._state_resolution_handler = hs.get_state_resolution_handler() # Collect metrics on the number of forward extremities that exist. - self._current_forward_extremities_amount = {} + # Counter of number of extremities to count + self._current_forward_extremities_amount = c_counter() BucketCollector( "synapse_forward_extremities", diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 1655fcdafce5..19f9ccf5e00d 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -64,16 +64,16 @@ def test_exposed_to_prometheus(self): expected = set([ b'synapse_forward_extremities_bucket{le="1.0"} 0.0', b'synapse_forward_extremities_bucket{le="2.0"} 2.0', - b'synapse_forward_extremities_bucket{le="3.0"} 0.0', - b'synapse_forward_extremities_bucket{le="5.0"} 0.0', - b'synapse_forward_extremities_bucket{le="7.0"} 1.0', - b'synapse_forward_extremities_bucket{le="10.0"} 0.0', - b'synapse_forward_extremities_bucket{le="15.0"} 0.0', - b'synapse_forward_extremities_bucket{le="20.0"} 0.0', - b'synapse_forward_extremities_bucket{le="50.0"} 0.0', - b'synapse_forward_extremities_bucket{le="100.0"} 0.0', - b'synapse_forward_extremities_bucket{le="200.0"} 0.0', - b'synapse_forward_extremities_bucket{le="500.0"} 0.0', + b'synapse_forward_extremities_bucket{le="3.0"} 2.0', + b'synapse_forward_extremities_bucket{le="5.0"} 2.0', + b'synapse_forward_extremities_bucket{le="7.0"} 3.0', + b'synapse_forward_extremities_bucket{le="10.0"} 3.0', + b'synapse_forward_extremities_bucket{le="15.0"} 3.0', + b'synapse_forward_extremities_bucket{le="20.0"} 3.0', + b'synapse_forward_extremities_bucket{le="50.0"} 3.0', + b'synapse_forward_extremities_bucket{le="100.0"} 3.0', + b'synapse_forward_extremities_bucket{le="200.0"} 3.0', + b'synapse_forward_extremities_bucket{le="500.0"} 3.0', b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', b'synapse_forward_extremities_count 3.0', b'synapse_forward_extremities_sum 10.0', From cc7cc853b1b9da283f2568243df49a97116bc61b Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Fri, 14 Jun 2019 14:07:47 +0100 Subject: [PATCH 213/231] Changelog --- changelog.d/5460.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5460.misc diff --git a/changelog.d/5460.misc b/changelog.d/5460.misc new file mode 100644 index 000000000000..badc8bb79ac6 --- /dev/null +++ b/changelog.d/5460.misc @@ -0,0 +1 @@ +Demo script now uses python3. From 9fd4f83f1a31abeae8d110f87fbd257608caa2e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jun 2019 14:19:37 +0100 Subject: [PATCH 214/231] Newsfile --- changelog.d/5461.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5461.feature diff --git a/changelog.d/5461.feature b/changelog.d/5461.feature new file mode 100644 index 000000000000..9497f521c832 --- /dev/null +++ b/changelog.d/5461.feature @@ -0,0 +1 @@ +Statistics on forward extremities per room are now exposed via Prometheus. From 6d56a694f4cbfaf9c57a56837d4170e6c6783f3c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 7 Jun 2019 15:30:54 +0100 Subject: [PATCH 215/231] Don't send renewal emails to deactivated users --- changelog.d/5394.bugfix | 1 + synapse/handlers/account_validity.py | 3 + synapse/handlers/deactivate_account.py | 6 ++ synapse/storage/_base.py | 4 +- synapse/storage/registration.py | 14 +++++ tests/rest/client/v2_alpha/test_register.py | 67 +++++++++++++-------- 6 files changed, 68 insertions(+), 27 deletions(-) create mode 100644 changelog.d/5394.bugfix diff --git a/changelog.d/5394.bugfix b/changelog.d/5394.bugfix new file mode 100644 index 000000000000..2ad9fbe82ce7 --- /dev/null +++ b/changelog.d/5394.bugfix @@ -0,0 +1 @@ +Fix a bug where deactivated users could receive renewal emails if the account validity feature is on. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 261446517d76..5e0b92eb1cc2 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -110,6 +110,9 @@ def _send_renewal_email(self, user_id, expiration_ts): # Stop right here if the user doesn't have at least one email address. # In this case, they will have to ask their server admin to renew their # account manually. + # We don't need to do a specific check to make sure the account isn't + # deactivated, as a deactivated account isn't supposed to have any + # email address attached to it. if not addresses: return diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index b29089d82c57..7378b56c1dd0 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -43,6 +43,8 @@ def __init__(self, hs): # it left off (if it has work left to do). hs.get_reactor().callWhenRunning(self._start_user_parting) + self._account_validity_enabled = hs.config.account_validity.enabled + @defer.inlineCallbacks def deactivate_account(self, user_id, erase_data, id_server=None): """Deactivate a user's account @@ -115,6 +117,10 @@ def deactivate_account(self, user_id, erase_data, id_server=None): # parts users from rooms (if it isn't already running) self._start_user_parting() + # Remove all information on the user from the account_validity table. + if self._account_validity_enabled: + yield self.store.delete_account_validity_for_user(user_id) + # Mark the user as deactivated. yield self.store.set_user_deactivated_status(user_id, True) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index ae891aa332a4..941c07fce540 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -299,12 +299,12 @@ def _set_expiration_date_when_missing(self): def select_users_with_no_expiration_date_txn(txn): """Retrieves the list of registered users with no expiration date from the - database. + database, filtering out deactivated users. """ sql = ( "SELECT users.name FROM users" " LEFT JOIN account_validity ON (users.name = account_validity.user_id)" - " WHERE account_validity.user_id is NULL;" + " WHERE account_validity.user_id is NULL AND users.deactivated = 0;" ) txn.execute(sql, []) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 4c5751b57f50..9f910eac9c2e 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -251,6 +251,20 @@ def set_renewal_mail_status(self, user_id, email_sent): desc="set_renewal_mail_status", ) + @defer.inlineCallbacks + def delete_account_validity_for_user(self, user_id): + """Deletes the entry for the given user in the account validity table, removing + their expiration date and renewal token. + + Args: + user_id (str): ID of the user to remove from the account validity table. + """ + yield self._simple_delete_one( + table="account_validity", + keyvalues={"user_id": user_id}, + desc="delete_account_validity_for_user", + ) + @defer.inlineCallbacks def is_server_admin(self, user): res = yield self._simple_select_one_onecol( diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 8536e6777ac5..b35b21544678 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -26,7 +26,7 @@ from synapse.api.errors import Codes from synapse.appservice import ApplicationService from synapse.rest.client.v1 import login -from synapse.rest.client.v2_alpha import account_validity, register, sync +from synapse.rest.client.v2_alpha import account, account_validity, register, sync from tests import unittest @@ -308,6 +308,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): login.register_servlets, sync.register_servlets, account_validity.register_servlets, + account.register_servlets, ] def make_homeserver(self, reactor, clock): @@ -358,20 +359,7 @@ def sendmail(*args, **kwargs): def test_renewal_email(self): self.email_attempts = [] - user_id = self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - # We need to manually add an email address otherwise the handler will do - # nothing. - now = self.hs.clock.time_msec() - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address="kermit@example.com", - validated_at=now, - added_at=now, - ) - ) + (user_id, tok) = self.create_user() # Move 6 days forward. This should trigger a renewal email to be sent. self.reactor.advance(datetime.timedelta(days=6).total_seconds()) @@ -396,6 +384,44 @@ def test_renewal_email(self): def test_manual_email_send(self): self.email_attempts = [] + (user_id, tok) = self.create_user() + request, channel = self.make_request( + b"POST", + "/_matrix/client/unstable/account_validity/send_mail", + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.assertEqual(len(self.email_attempts), 1) + + def test_deactivated_user(self): + self.email_attempts = [] + + (user_id, tok) = self.create_user() + + request_data = json.dumps({ + "auth": { + "type": "m.login.password", + "user": user_id, + "password": "monkey", + }, + "erase": False, + }) + request, channel = self.make_request( + "POST", + "account/deactivate", + request_data, + access_token=tok, + ) + self.render(request) + self.assertEqual(request.code, 200) + + self.reactor.advance(datetime.timedelta(days=8).total_seconds()) + + self.assertEqual(len(self.email_attempts), 0) + + def create_user(self): user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") # We need to manually add an email address otherwise the handler will do @@ -410,16 +436,7 @@ def test_manual_email_send(self): added_at=now, ) ) - - request, channel = self.make_request( - b"POST", - "/_matrix/client/unstable/account_validity/send_mail", - access_token=tok, - ) - self.render(request) - self.assertEquals(channel.result["code"], b"200", channel.result) - - self.assertEqual(len(self.email_attempts), 1) + return (user_id, tok) def test_manual_email_send_expired_account(self): user_id = self.register_user("kermit", "monkey") From e0b77b004db33563ec0a08fe835406dbc1591b6b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Jun 2019 16:00:45 +0100 Subject: [PATCH 216/231] Fix background job for deactivated flag --- synapse/storage/registration.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 9f910eac9c2e..d36917e4d6b3 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -642,7 +642,9 @@ def _backgroud_update_set_deactivated_flag_txn(txn): FROM users LEFT JOIN access_tokens ON (access_tokens.user_id = users.name) LEFT JOIN user_threepids ON (user_threepids.user_id = users.name) - WHERE password_hash IS NULL OR password_hash = '' + WHERE (users.password_hash IS NULL OR users.password_hash = '') + AND (users.appservice_id IS NULL OR users.appservice_id = '') + AND users.is_guest = 0 AND users.name > ? GROUP BY users.name ORDER BY users.name ASC @@ -666,7 +668,7 @@ def _backgroud_update_set_deactivated_flag_txn(txn): logger.info("Marked %d rows as deactivated", rows_processed_nb) self._background_update_progress_txn( - txn, "users_set_deactivated_flag", {"user_id": rows[-1]["user_id"]} + txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]} ) if batch_size > len(rows): From 304a1376c2fbe6758b7b0c1987d16fcea5205528 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jun 2019 15:47:19 +0100 Subject: [PATCH 217/231] Fix 3PID invite room state over federation. Fixes that when a user exchanges a 3PID invite for a proper invite over federation it does not include the `invite_room_state` key. This was due to synapse incorrectly sending out two invite requests. --- synapse/handlers/federation.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ac5ca791431a..65ac127930fe 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2613,12 +2613,6 @@ def on_exchange_third_party_invite_request(self, origin, room_id, event_dict): # though the sender isn't a local user. event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender) - # XXX we send the invite here, but send_membership_event also sends it, - # so we end up making two requests. I think this is redundant. - returned_invite = yield self.send_invite(origin, event) - # TODO: Make sure the signatures actually are correct. - event.signatures.update(returned_invite.signatures) - member_handler = self.hs.get_room_member_handler() yield member_handler.send_membership_event(None, event, context) From 3c9bb86fde7d0157a59fcd7e2588e1671d945e50 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jun 2019 15:54:30 +0100 Subject: [PATCH 218/231] Newsfile --- changelog.d/5464.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5464.bugfix diff --git a/changelog.d/5464.bugfix b/changelog.d/5464.bugfix new file mode 100644 index 000000000000..8278d1bce9f0 --- /dev/null +++ b/changelog.d/5464.bugfix @@ -0,0 +1 @@ +Fix missing invite state after exchanging 3PID invites over federaton. From 4024520ff807eb6dec618332b203510b45fdb8da Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Jun 2019 16:38:44 +0100 Subject: [PATCH 219/231] Changelog --- changelog.d/5465.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5465.bugfix diff --git a/changelog.d/5465.bugfix b/changelog.d/5465.bugfix new file mode 100644 index 000000000000..d1655c8ea704 --- /dev/null +++ b/changelog.d/5465.bugfix @@ -0,0 +1 @@ +Fix a crash happening when running a specific background update. From 5cec6d1845d7daa4f36748e08b2d36ba0f564f58 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Jun 2019 17:18:21 +0100 Subject: [PATCH 220/231] Fix changelog --- changelog.d/5465.bugfix | 1 - changelog.d/5465.misc | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) delete mode 100644 changelog.d/5465.bugfix create mode 100644 changelog.d/5465.misc diff --git a/changelog.d/5465.bugfix b/changelog.d/5465.bugfix deleted file mode 100644 index d1655c8ea704..000000000000 --- a/changelog.d/5465.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a crash happening when running a specific background update. diff --git a/changelog.d/5465.misc b/changelog.d/5465.misc new file mode 100644 index 000000000000..af5f0f8f4576 --- /dev/null +++ b/changelog.d/5465.misc @@ -0,0 +1,2 @@ +Track deactivated accounts in the database. + From f874b16b2e7208d3a202283c085340196d065560 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Jun 2019 10:31:37 +0100 Subject: [PATCH 221/231] Add plugin APIs for implementations of custom event rules. --- changelog.d/5440.feature | 1 + docs/sample_config.yaml | 13 ++++ synapse/config/homeserver.py | 2 + synapse/config/third_party_event_rules.py | 42 ++++++++++++ synapse/events/third_party_rules.py | 62 ++++++++++++++++++ synapse/handlers/federation.py | 68 ++++++++++++++++++- synapse/handlers/message.py | 14 +++- synapse/server.py | 7 ++ tests/rest/client/third_party_rules.py | 79 +++++++++++++++++++++++ 9 files changed, 284 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5440.feature create mode 100644 synapse/config/third_party_event_rules.py create mode 100644 synapse/events/third_party_rules.py create mode 100644 tests/rest/client/third_party_rules.py diff --git a/changelog.d/5440.feature b/changelog.d/5440.feature new file mode 100644 index 000000000000..63d9b58734be --- /dev/null +++ b/changelog.d/5440.feature @@ -0,0 +1 @@ +Allow server admins to define implementations of extra rules for allowing or denying incoming events. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 4d7e6f3eb5ac..bd80d97a9323 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1351,3 +1351,16 @@ password_config: # alias: "*" # room_id: "*" # action: allow + + +# Server admins can define a Python module that implements extra rules for +# allowing or denying incoming events. In order to work, this module needs to +# override the methods defined in synapse/events/third_party_rules.py. +# +# This feature is designed to be used in closed federations only, where each +# participating server enforces the same rules. +# +#third_party_event_rules: +# module: "my_custom_project.SuperRulesSet" +# config: +# example_option: 'things' diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 5c4fc8ff21e4..acadef4fd316 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -38,6 +38,7 @@ from .server_notices_config import ServerNoticesConfig from .spam_checker import SpamCheckerConfig from .stats import StatsConfig +from .third_party_event_rules import ThirdPartyRulesConfig from .tls import TlsConfig from .user_directory import UserDirectoryConfig from .voip import VoipConfig @@ -73,5 +74,6 @@ class HomeServerConfig( StatsConfig, ServerNoticesConfig, RoomDirectoryConfig, + ThirdPartyRulesConfig, ): pass diff --git a/synapse/config/third_party_event_rules.py b/synapse/config/third_party_event_rules.py new file mode 100644 index 000000000000..a89dd5f98aac --- /dev/null +++ b/synapse/config/third_party_event_rules.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.module_loader import load_module + +from ._base import Config + + +class ThirdPartyRulesConfig(Config): + def read_config(self, config): + self.third_party_event_rules = None + + provider = config.get("third_party_event_rules", None) + if provider is not None: + self.third_party_event_rules = load_module(provider) + + def default_config(self, **kwargs): + return """\ + # Server admins can define a Python module that implements extra rules for + # allowing or denying incoming events. In order to work, this module needs to + # override the methods defined in synapse/events/third_party_rules.py. + # + # This feature is designed to be used in closed federations only, where each + # participating server enforces the same rules. + # + #third_party_event_rules: + # module: "my_custom_project.SuperRulesSet" + # config: + # example_option: 'things' + """ diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py new file mode 100644 index 000000000000..9f98d5152326 --- /dev/null +++ b/synapse/events/third_party_rules.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + + +class ThirdPartyEventRules(object): + """Allows server admins to provide a Python module implementing an extra set of rules + to apply when processing events. + + This is designed to help admins of closed federations with enforcing custom + behaviours. + """ + + def __init__(self, hs): + self.third_party_rules = None + + self.store = hs.get_datastore() + + module = None + config = None + if hs.config.third_party_event_rules: + module, config = hs.config.third_party_event_rules + + if module is not None: + self.third_party_rules = module(config=config) + + @defer.inlineCallbacks + def check_event_allowed(self, event, context): + """Check if a provided event should be allowed in the given context. + + Args: + event (synapse.events.EventBase): The event to be checked. + context (synapse.events.snapshot.EventContext): The context of the event. + + Returns: + defer.Deferred(bool), True if the event should be allowed, False if not. + """ + if self.third_party_rules is None: + defer.returnValue(True) + + prev_state_ids = yield context.get_prev_state_ids(self.store) + + # Retrieve the state events from the database. + state_events = {} + for key, event_id in prev_state_ids.items(): + state_events[key] = yield self.store.get_event(event_id, allow_none=True) + + ret = yield self.third_party_rules.check_event_allowed(event, state_events) + defer.returnValue(ret) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ac5ca791431a..983ac9f915a9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,6 +34,7 @@ from synapse.api.errors import ( AuthError, CodeMessageException, + Codes, FederationDeniedError, FederationError, RequestSendFailed, @@ -127,6 +129,8 @@ def __init__(self, hs): self.room_queues = {} self._room_pdu_linearizer = Linearizer("fed_room_pdu") + self.third_party_event_rules = hs.get_third_party_event_rules() + @defer.inlineCallbacks def on_receive_pdu( self, origin, pdu, sent_to_us_directly=False, @@ -1258,6 +1262,15 @@ def on_make_join_request(self, room_id, user_id): logger.warn("Failed to create join %r because %s", event, e) raise e + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.info("Creation of join %s forbidden by third-party rules", event) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` yield self.auth.check_from_context( @@ -1300,6 +1313,15 @@ def on_send_join_request(self, origin, pdu): origin, event ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.info("Sending of join %s forbidden by third-party rules", event) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + logger.debug( "on_send_join_request: After _handle_new_event: %s, sigs: %s", event.event_id, @@ -1458,6 +1480,15 @@ def on_make_leave_request(self, room_id, user_id): builder=builder, ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.warning("Creation of leave %s forbidden by third-party rules", event) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` @@ -1484,10 +1515,19 @@ def on_send_leave_request(self, origin, pdu): event.internal_metadata.outlier = False - yield self._handle_new_event( + context = yield self._handle_new_event( origin, event ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.info("Sending of leave %s forbidden by third-party rules", event) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + logger.debug( "on_send_leave_request: After _handle_new_event: %s, sigs: %s", event.event_id, @@ -2550,6 +2590,18 @@ def exchange_third_party_invite( builder=builder ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.info( + "Creation of threepid invite %s forbidden by third-party rules", + event, + ) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + event, context = yield self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) @@ -2598,6 +2650,18 @@ def on_exchange_third_party_invite_request(self, origin, room_id, event_dict): builder=builder, ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.warning( + "Exchange of threepid invite %s forbidden by third-party rules", + event, + ) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + event, context = yield self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0b02469cebb5..11650dc80cfe 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2014 - 2016 OpenMarket Ltd -# Copyright 2017 - 2018 New Vector Ltd +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -248,6 +249,7 @@ def __init__(self, hs): self.action_generator = hs.get_action_generator() self.spam_checker = hs.get_spam_checker() + self.third_party_event_rules = hs.get_third_party_event_rules() self._block_events_without_consent_error = ( self.config.block_events_without_consent_error @@ -658,6 +660,14 @@ def handle_new_client_event( else: room_version = yield self.store.get_room_version(event.room_id) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + try: yield self.auth.check_from_context(room_version, event, context) except AuthError as err: diff --git a/synapse/server.py b/synapse/server.py index 9229a68a8dba..a54e023cc98c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,6 +37,7 @@ from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory from synapse.events.spamcheck import SpamChecker +from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.events.utils import EventClientSerializer from synapse.federation.federation_client import FederationClient from synapse.federation.federation_server import ( @@ -178,6 +181,7 @@ def build_DEPENDENCY(self) 'groups_attestation_renewer', 'secrets', 'spam_checker', + 'third_party_event_rules', 'room_member_handler', 'federation_registry', 'server_notices_manager', @@ -483,6 +487,9 @@ def build_stats_handler(self): def build_spam_checker(self): return SpamChecker(self) + def build_third_party_event_rules(self): + return ThirdPartyEventRules(self) + def build_room_member_handler(self): if self.config.worker_app: return RoomMemberWorkerHandler(self) diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/third_party_rules.py new file mode 100644 index 000000000000..7167fc56b61e --- /dev/null +++ b/tests/rest/client/third_party_rules.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest import admin +from synapse.rest.client.v1 import login, room + +from tests import unittest + + +class ThirdPartyRulesTestModule(object): + def __init__(self, config): + pass + + def check_event_allowed(self, event, context): + if event.type == "foo.bar.forbidden": + return False + else: + return True + + @staticmethod + def parse_config(config): + return config + + +class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["third_party_event_rules"] = { + "module": "tests.rest.client.third_party_rules.ThirdPartyRulesTestModule", + "config": {}, + } + + self.hs = self.setup_test_homeserver(config=config) + return self.hs + + def test_third_party_rules(self): + """Tests that a forbidden event is forbidden from being sent, but an allowed one + can be sent. + """ + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + + room_id = self.helper.create_room_as(user_id, tok=tok) + + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % room_id, + {}, + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % room_id, + {}, + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) From 97d7e4c7b75999b991f53f8a7ee6b25d15442e92 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 17 Jun 2019 21:08:15 +1000 Subject: [PATCH 222/231] Move SyTest to Buildkite (#5459) Including workers! --- .buildkite/format_tap.py | 33 ++++ .../merge_base_branch.sh | 17 +- .buildkite/pipeline.yml | 60 ++++++++ .buildkite/synapse_sytest.sh | 145 ++++++++++++++++++ .circleci/config.yml | 67 -------- changelog.d/5459.misc | 1 + 6 files changed, 247 insertions(+), 76 deletions(-) create mode 100644 .buildkite/format_tap.py rename {.circleci => .buildkite}/merge_base_branch.sh (52%) create mode 100644 .buildkite/synapse_sytest.sh create mode 100644 changelog.d/5459.misc diff --git a/.buildkite/format_tap.py b/.buildkite/format_tap.py new file mode 100644 index 000000000000..94582f557145 --- /dev/null +++ b/.buildkite/format_tap.py @@ -0,0 +1,33 @@ +import sys +from tap.parser import Parser +from tap.line import Result, Unknown, Diagnostic + +out = ["### TAP Output for " + sys.argv[2]] + +p = Parser() + +in_error = False + +for line in p.parse_file(sys.argv[1]): + if isinstance(line, Result): + if in_error: + out.append("") + out.append("") + out.append("") + out.append("----") + out.append("") + in_error = False + + if not line.ok and not line.todo: + in_error = True + + out.append("FAILURE Test #%d: ``%s``" % (line.number, line.description)) + out.append("") + out.append("
Show log
")
+
+    elif isinstance(line, Diagnostic) and in_error:
+        out.append(line.text)
+
+if out:
+    for line in out[:-3]:
+        print(line)
diff --git a/.circleci/merge_base_branch.sh b/.buildkite/merge_base_branch.sh
similarity index 52%
rename from .circleci/merge_base_branch.sh
rename to .buildkite/merge_base_branch.sh
index 4c19fa70d745..26176d6465c3 100755
--- a/.circleci/merge_base_branch.sh
+++ b/.buildkite/merge_base_branch.sh
@@ -1,22 +1,21 @@
 #!/usr/bin/env bash
 
-set -e
+set -ex
 
-# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
-# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
-echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
-source $BASH_ENV
+if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then
+    echo "Not merging forward, as this is a release branch"
+    exit 0
+fi
 
-if [[ -z "${CIRCLE_PR_NUMBER}" ]]
-then
-    echo "Can't figure out what the PR number is! Assuming merge target is develop."
+if [[ -z $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
+    echo "Not a pull request, or hasn't had a PR opened yet..."
 
     # It probably hasn't had a PR opened yet. Since all PRs land on develop, we
     # can probably assume it's based on it and will be merged into it.
     GITBASE="develop"
 else
     # Get the reference, using the GitHub API
-    GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
+    GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
 fi
 
 # Show what we are before
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
index 8eddf8b93199..6c6229a2058d 100644
--- a/.buildkite/pipeline.yml
+++ b/.buildkite/pipeline.yml
@@ -2,6 +2,7 @@ env:
   CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
 
 steps:
+
   - command:
       - "python -m pip install tox"
       - "tox -e pep8"
@@ -46,6 +47,7 @@ steps:
 
   - wait
 
+
   - command:
       - "python -m pip install tox"
       - "tox -e py35-old,codecov"
@@ -181,3 +183,61 @@ steps:
           limit: 2
         - exit_status: 2
           limit: 2
+
+
+  - label: "SyTest - :python: 3.5 / SQLite / Monolith"
+    agents:
+      queue: "medium"
+    command:
+      - "bash .buildkite/merge_base_branch.sh"
+      - "bash .buildkite/synapse_sytest.sh"
+    plugins:
+      - docker#v3.0.1:
+          image: "matrixdotorg/sytest-synapse:py35"
+          propagate-environment: true
+    retry:
+      automatic:
+        - exit_status: -1
+          limit: 2
+        - exit_status: 2
+          limit: 2
+
+  - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith"
+    agents:
+      queue: "medium"
+    env:
+      POSTGRES: "1"
+    command:
+      - "bash .buildkite/merge_base_branch.sh"
+      - "bash .buildkite/synapse_sytest.sh"
+    plugins:
+      - docker#v3.0.1:
+          image: "matrixdotorg/sytest-synapse:py35"
+          propagate-environment: true
+    retry:
+      automatic:
+        - exit_status: -1
+          limit: 2
+        - exit_status: 2
+          limit: 2
+
+  - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers"
+    agents:
+      queue: "medium"
+    env:
+      POSTGRES: "1"
+      WORKERS: "1"
+    command:
+      - "bash .buildkite/merge_base_branch.sh"
+      - "bash .buildkite/synapse_sytest.sh"
+    plugins:
+      - docker#v3.0.1:
+          image: "matrixdotorg/sytest-synapse:py35"
+          propagate-environment: true
+    soft_fail: true
+    retry:
+      automatic:
+        - exit_status: -1
+          limit: 2
+        - exit_status: 2
+          limit: 2
diff --git a/.buildkite/synapse_sytest.sh b/.buildkite/synapse_sytest.sh
new file mode 100644
index 000000000000..3011b88bb799
--- /dev/null
+++ b/.buildkite/synapse_sytest.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+#
+# Fetch sytest, and then run the tests for synapse. The entrypoint for the
+# sytest-synapse docker images.
+
+set -ex
+
+if [ -n "$BUILDKITE" ]
+then
+    SYNAPSE_DIR=`pwd`
+else
+    SYNAPSE_DIR="/src"
+fi
+
+# Attempt to find a sytest to use.
+# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
+if [ -d "/sytest" ]; then
+    # If the user has mounted in a SyTest checkout, use that.
+    echo "Using local sytests..."
+
+    # create ourselves a working directory and dos2unix some scripts therein
+    mkdir -p /work/jenkins
+    for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
+        dos2unix -n "/sytest/$i" "/work/$i"
+    done
+    ln -sf /sytest/tests /work
+    ln -sf /sytest/keys /work
+    SYTEST_LIB="/sytest/lib"
+else
+    if [ -n "BUILDKITE_BRANCH" ]
+    then
+        branch_name=$BUILDKITE_BRANCH
+    else
+        # Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
+        branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
+    fi
+
+    # Try and fetch the branch
+    echo "Trying to get same-named sytest branch..."
+    wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
+        # Probably a 404, fall back to develop
+        echo "Using develop instead..."
+        wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
+    }
+
+    mkdir -p /work
+    tar -C /work --strip-components=1 -xf sytest.tar.gz
+    SYTEST_LIB="/work/lib"
+fi
+
+cd /work
+
+# PostgreSQL setup
+if [ -n "$POSTGRES" ]
+then
+    export PGUSER=postgres
+    export POSTGRES_DB_1=pg1
+    export POSTGRES_DB_2=pg2
+
+    # Start the database
+    su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
+
+    # Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
+    jenkins/prep_sytest_for_postgres.sh
+
+    # Make the test databases for the two Synapse servers that will be spun up
+    su -c 'psql -c "CREATE DATABASE pg1;"' postgres
+    su -c 'psql -c "CREATE DATABASE pg2;"' postgres
+
+fi
+
+if [ -n "$OFFLINE" ]; then
+    # if we're in offline mode, just put synapse into the virtualenv, and
+    # hope that the deps are up-to-date.
+    #
+    # (`pip install -e` likes to reinstall setuptools even if it's already installed,
+    # so we just run setup.py explicitly.)
+    #
+    (cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
+else
+    # We've already created the virtualenv, but lets double check we have all
+    # deps.
+    /venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
+    /venv/bin/pip install -q --upgrade --no-cache-dir \
+        lxml psycopg2 coverage codecov tap.py
+
+    # Make sure all Perl deps are installed -- this is done in the docker build
+    # so will only install packages added since the last Docker build
+    ./install-deps.pl
+fi
+
+
+# Run the tests
+>&2 echo "+++ Running tests"
+
+RUN_TESTS=(
+    perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
+)
+
+TEST_STATUS=0
+
+if [ -n "$WORKERS" ]; then
+    RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
+else
+    RUN_TESTS+=(-I Synapse)
+fi
+
+"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
+
+if [ $TEST_STATUS -ne 0 ]; then
+    >&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
+else
+    >&2 echo -e "run-tests \e[32mPASSED\e[0m"
+fi
+
+>&2 echo "--- Copying assets"
+
+# Copy out the logs
+mkdir -p /logs
+cp results.tap /logs/results.tap
+rsync --ignore-missing-args  --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
+
+# Upload coverage to codecov and upload files, if running on Buildkite
+if [ -n "$BUILDKITE" ]
+then
+    /venv/bin/coverage combine || true
+    /venv/bin/coverage xml || true
+    /venv/bin/codecov -X gcov -f coverage.xml
+
+    wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
+    tar xvf buildkite.tar.gz
+    chmod +x ./buildkite-agent
+
+    # Upload the files
+    ./buildkite-agent artifact upload "/logs/**/*.log*"
+    ./buildkite-agent artifact upload "/logs/results.tap"
+
+    if [ $TEST_STATUS -ne 0 ]; then
+        # Annotate, if failure
+        /venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
+    fi
+fi
+
+
+exit $TEST_STATUS
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 3c2b32c015cb..e4fd5ffa6b48 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -17,77 +17,10 @@ jobs:
       - run: docker push matrixdotorg/synapse:latest
       - run: docker push matrixdotorg/synapse:latest-py3
 
-  sytestpy3:
-    docker:
-      - image: matrixdotorg/sytest-synapsepy3
-    working_directory: /src
-    steps:
-      - checkout
-      - run: /synapse_sytest.sh
-      - store_artifacts:
-          path: /logs
-          destination: logs
-      - store_test_results:
-          path: /logs
-  sytestpy3postgres:
-    docker:
-      - image: matrixdotorg/sytest-synapsepy3
-    working_directory: /src
-    steps:
-      - checkout
-      - run: POSTGRES=1 /synapse_sytest.sh
-      - store_artifacts:
-          path: /logs
-          destination: logs
-      - store_test_results:
-          path: /logs
-  sytestpy3merged:
-    docker:
-      - image: matrixdotorg/sytest-synapsepy3
-    working_directory: /src
-    steps:
-      - checkout
-      - run: bash .circleci/merge_base_branch.sh
-      - run: /synapse_sytest.sh
-      - store_artifacts:
-          path: /logs
-          destination: logs
-      - store_test_results:
-          path: /logs
-  sytestpy3postgresmerged:
-    docker:
-      - image: matrixdotorg/sytest-synapsepy3
-    working_directory: /src
-    steps:
-      - checkout
-      - run: bash .circleci/merge_base_branch.sh
-      - run: POSTGRES=1 /synapse_sytest.sh
-      - store_artifacts:
-          path: /logs
-          destination: logs
-      - store_test_results:
-          path: /logs
-
 workflows:
   version: 2
   build:
     jobs:
-      - sytestpy3:
-          filters:
-            branches:
-              only: /develop|master|release-.*/
-      - sytestpy3postgres:
-          filters:
-            branches:
-              only: /develop|master|release-.*/
-      - sytestpy3merged:
-          filters:
-            branches:
-              ignore: /develop|master|release-.*/
-      - sytestpy3postgresmerged:
-          filters:
-            branches:
-              ignore: /develop|master|release-.*/
       - dockerhubuploadrelease:
           filters:
             tags:
diff --git a/changelog.d/5459.misc b/changelog.d/5459.misc
new file mode 100644
index 000000000000..904e45f66b04
--- /dev/null
+++ b/changelog.d/5459.misc
@@ -0,0 +1 @@
+SyTest has been moved to Buildkite.

From eba7caf09fe9bb5f5a0d4b17c5dde1413343cadc Mon Sep 17 00:00:00 2001
From: Amber Brown 
Date: Tue, 18 Jun 2019 00:59:00 +1000
Subject: [PATCH 223/231] Remove Postgres 9.4 support (#5448)

---
 .buildkite/docker-compose.py35.pg94.yaml | 21 ----------------
 .buildkite/pipeline.yml                  | 17 -------------
 CONTRIBUTING.rst                         | 19 +++++++--------
 UPGRADE.rst                              | 31 ++++++++++++++++++++++--
 changelog.d/5448.removal                 |  1 +
 docker/Dockerfile-pgtests                |  4 +--
 docker/run_pg_tests.sh                   |  2 +-
 docs/postgres.rst                        |  4 +--
 synapse/storage/engines/postgres.py      |  8 ++++--
 synapse/storage/search.py                | 22 -----------------
 10 files changed, 50 insertions(+), 79 deletions(-)
 delete mode 100644 .buildkite/docker-compose.py35.pg94.yaml
 create mode 100644 changelog.d/5448.removal

diff --git a/.buildkite/docker-compose.py35.pg94.yaml b/.buildkite/docker-compose.py35.pg94.yaml
deleted file mode 100644
index 978aedd1159e..000000000000
--- a/.buildkite/docker-compose.py35.pg94.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-version: '3.1'
-
-services:
-
-  postgres:
-    image: postgres:9.4
-    environment:
-      POSTGRES_PASSWORD: postgres
-
-  testenv:
-    image: python:3.5
-    depends_on:
-      - postgres
-    env_file: .env
-    environment:
-      SYNAPSE_POSTGRES_HOST: postgres
-      SYNAPSE_POSTGRES_USER: postgres
-      SYNAPSE_POSTGRES_PASSWORD: postgres
-    working_dir: /app
-    volumes:
-      - ..:/app
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
index 6c6229a2058d..20c7aab5a740 100644
--- a/.buildkite/pipeline.yml
+++ b/.buildkite/pipeline.yml
@@ -116,23 +116,6 @@ steps:
         - exit_status: 2
           limit: 2
 
-  - label: ":python: 3.5 / :postgres: 9.4"
-    env:
-      TRIAL_FLAGS: "-j 4"
-    command:
-      - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
-    plugins:
-      - docker-compose#v2.1.0:
-          run: testenv
-          config:
-            - .buildkite/docker-compose.py35.pg94.yaml
-    retry:
-      automatic:
-        - exit_status: -1
-          limit: 2
-        - exit_status: 2
-          limit: 2
-
   - label: ":python: 3.5 / :postgres: 9.5"
     env:
       TRIAL_FLAGS: "-j 4"
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 9a283ced6eb6..2c44422a0e84 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -30,21 +30,20 @@ use github's pull request workflow to review the contribution, and either ask
 you to make any refinements needed or merge it and make them ourselves. The
 changes will then land on master when we next do a release.
 
-We use `CircleCI `_ and `Travis CI
-`_ for continuous integration. All
-pull requests to synapse get automatically tested by Travis and CircleCI.
-If your change breaks the build, this will be shown in GitHub, so please
-keep an eye on the pull request for feedback.
+We use `CircleCI `_ and `Buildkite
+`_ for continuous integration.
+Buildkite builds need to be authorised by a maintainer. If your change breaks
+the build, this will be shown in GitHub, so please keep an eye on the pull
+request for feedback.
 
 To run unit tests in a local development environment, you can use:
 
-- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
-  SQLite-backed Synapse on Python 2.7.
-- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
+- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
+  for SQLite-backed Synapse on Python 3.5.
 - ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
-- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
+- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
   (requires a running local PostgreSQL with access to create databases).
-- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
+- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
   (requires Docker). Entirely self-contained, recommended if you don't want to
   set up PostgreSQL yourself.
 
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 6032a505c945..1fb109a21837 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -49,6 +49,33 @@ returned by the Client-Server API:
     # configured on port 443.
     curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:"
 
+Upgrading to v1.1
+=================
+
+Synapse 1.1 removes support for older Python and PostgreSQL versions, as
+outlined in `our deprecation notice `_.
+
+Minimum Python Version
+----------------------
+
+Synapse v1.1 has a minimum Python requirement of Python 3.5. Python 3.6 or
+Python 3.7 are recommended as they have improved internal string handling,
+significantly reducing memory usage.
+
+If you use current versions of the Matrix.org-distributed Debian packages or
+Docker images, action is not required.
+
+If you install Synapse in a Python virtual environment, please see "Upgrading to
+v0.34.0" for notes on setting up a new virtualenv under Python 3.
+
+Minimum PostgreSQL Version
+--------------------------
+
+If using PostgreSQL under Synapse, you will need to use PostgreSQL 9.5 or above.
+Please see the
+`PostgreSQL documentation `_
+for more details on upgrading your database.
+
 Upgrading to v1.0
 =================
 
@@ -71,11 +98,11 @@ server in a closed federation. This can be done in one of two ways:-
 * Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``.
 
 See the `sample configuration file `_
-for more details on these settings. 
+for more details on these settings.
 
 Email
 -----
-When a user requests a password reset, Synapse will send an email to the 
+When a user requests a password reset, Synapse will send an email to the
 user to confirm the request.
 
 Previous versions of Synapse delegated the job of sending this email to an
diff --git a/changelog.d/5448.removal b/changelog.d/5448.removal
new file mode 100644
index 000000000000..33b9859daedc
--- /dev/null
+++ b/changelog.d/5448.removal
@@ -0,0 +1 @@
+PostgreSQL 9.4 is no longer supported. Synapse requires Postgres 9.5+ or above for Postgres support.
diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests
index 7da8eeb9eb0e..3bfee845c658 100644
--- a/docker/Dockerfile-pgtests
+++ b/docker/Dockerfile-pgtests
@@ -3,10 +3,10 @@
 FROM matrixdotorg/sytest:latest
 
 # The Sytest image doesn't come with python, so install that
-RUN apt-get -qq install -y python python-dev python-pip
+RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
 
 # We need tox to run the tests in run_pg_tests.sh
-RUN pip install tox
+RUN python3 -m pip install tox
 
 ADD run_pg_tests.sh /pg_tests.sh
 ENTRYPOINT /pg_tests.sh
diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh
index e77424c41a37..d18d1e4c8e87 100755
--- a/docker/run_pg_tests.sh
+++ b/docker/run_pg_tests.sh
@@ -17,4 +17,4 @@ su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start'
 # Run the tests
 cd /src
 export TRIAL_FLAGS="-j 4"
-tox --workdir=/tmp -e py27-postgres
+tox --workdir=/tmp -e py35-postgres
diff --git a/docs/postgres.rst b/docs/postgres.rst
index e81e10403f57..33f58e3acea3 100644
--- a/docs/postgres.rst
+++ b/docs/postgres.rst
@@ -1,7 +1,7 @@
 Using Postgres
 --------------
 
-Postgres version 9.4 or later is known to work.
+Postgres version 9.5 or later is known to work.
 
 Install postgres client libraries
 =================================
@@ -16,7 +16,7 @@ a postgres database.
 * For other pre-built packages, please consult the documentation from the
   relevant package.
 
-* If you installed synapse `in a virtualenv 
+* If you installed synapse `in a virtualenv
   <../INSTALL.md#installing-from-source>`_, you can install the library with::
 
       ~/synapse/env/bin/pip install matrix-synapse[postgres]
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 1b97ee74e3a7..289b6bc281ad 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -45,6 +45,10 @@ def on_new_connection(self, db_conn):
         # together. For example, version 8.1.5 will be returned as 80105
         self._version = db_conn.server_version
 
+        # Are we on a supported PostgreSQL version?
+        if self._version < 90500:
+            raise RuntimeError("Synapse requires PostgreSQL 9.5+ or above.")
+
         db_conn.set_isolation_level(
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
@@ -64,9 +68,9 @@ def on_new_connection(self, db_conn):
     @property
     def can_native_upsert(self):
         """
-        Can we use native UPSERTs? This requires PostgreSQL 9.5+.
+        Can we use native UPSERTs?
         """
-        return self._version >= 90500
+        return True
 
     def is_deadlock(self, error):
         if isinstance(error, self.module.DatabaseError):
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index ff49eaae0278..10a27c207a2f 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -341,29 +341,7 @@ def store_search_entries_txn(self, txn, entries):
                 for entry in entries
             )
 
-            # inserts to a GIN index are normally batched up into a pending
-            # list, and then all committed together once the list gets to a
-            # certain size. The trouble with that is that postgres (pre-9.5)
-            # uses work_mem to determine the length of the list, and work_mem
-            # is typically very large.
-            #
-            # We therefore reduce work_mem while we do the insert.
-            #
-            # (postgres 9.5 uses the separate gin_pending_list_limit setting,
-            # so doesn't suffer the same problem, but changing work_mem will
-            # be harmless)
-            #
-            # Note that we don't need to worry about restoring it on
-            # exception, because exceptions will cause the transaction to be
-            # rolled back, including the effects of the SET command.
-            #
-            # Also: we use SET rather than SET LOCAL because there's lots of
-            # other stuff going on in this transaction, which want to have the
-            # normal work_mem setting.
-
-            txn.execute("SET work_mem='256kB'")
             txn.executemany(sql, args)
-            txn.execute("RESET work_mem")
 
         elif isinstance(self.database_engine, Sqlite3Engine):
             sql = (

From 839f9b923182208d1f0f57a0ff02fb0edd5d4a47 Mon Sep 17 00:00:00 2001
From: Jorik Schellekens 
Date: Mon, 17 Jun 2019 16:24:28 +0100
Subject: [PATCH 224/231] One shot demo server startup

Configure the demo servers to use untrusted
tls certs so that they communicate with each other.

This configuration makes them very unsafe so I've added warnings about
it in the readme.
---
 demo/README   |  8 +++++--
 demo/start.sh | 66 +++++++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 70 insertions(+), 4 deletions(-)

diff --git a/demo/README b/demo/README
index 0b584ceb15c5..0bec820ad657 100644
--- a/demo/README
+++ b/demo/README
@@ -1,9 +1,13 @@
+DO NOT USE THESE DEMO SERVERS IN PRODUCTION
+
 Requires you to have done:
     python setup.py develop
 
 
-The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required. 
-It will also start a web server on port 8000 pointed at the webclient.
+The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
+
+To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
+and are configured in a highly insecure way. Do not use these configuration files in production.
 
 stop.sh will stop the synapse servers and the webclient.
 
diff --git a/demo/start.sh b/demo/start.sh
index 5c3a8fe61f66..1c4f12d0bb1b 100755
--- a/demo/start.sh
+++ b/demo/start.sh
@@ -27,8 +27,70 @@ for port in 8080 8081 8082; do
         --config-path "$DIR/etc/$port.config" \
         --report-stats no
 
-    printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
-    echo 'enable_registration: true' >> $DIR/etc/$port.config
+    if ! grep -F "Customisation made by demo/start.sh" -q  $DIR/etc/$port.config; then
+        printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
+        
+        echo 'enable_registration: true' >> $DIR/etc/$port.config
+
+        # Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
+        # accidentaly bork me with your fancy settings.
+		listeners=$(cat <<-PORTLISTENERS
+		# Configure server to listen on both $https_port and $port
+		# This overides some of the default settings above
+		listeners:
+		  - port: $https_port
+		    type: http
+		    tls: true
+		    resources:
+		      - names: [client, federation]
+		
+		  - port: $port
+		    tls: false
+		    bind_addresses: ['::1', '127.0.0.1']
+		    type: http
+		    x_forwarded: true
+		    resources:
+		      - names: [client, federation]
+		        compress: false
+		PORTLISTENERS
+		)
+        echo "${listeners}" >> $DIR/etc/$port.config
+
+        # Disable tls for the servers
+        printf '\n\n# Disable tls on the servers.' >> $DIR/etc/$port.config
+        echo '# DO NOT USE IN PRODUCTION' >> $DIR/etc/$port.config
+        echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' >> $DIR/etc/$port.config
+        echo 'federation_verify_certificates: false' >> $DIR/etc/$port.config
+
+        # Set tls paths
+        echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\"" >> $DIR/etc/$port.config
+        echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\"" >> $DIR/etc/$port.config
+
+        # Generate tls keys
+        openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
+        
+        # Ignore keys from the trusted keys server
+        echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
+        echo 'trusted_key_servers:' >> $DIR/etc/$port.config
+        echo '  - server_name: "matrix.org"' >> $DIR/etc/$port.config
+        echo '    accept_keys_insecurely: true' >> $DIR/etc/$port.config
+
+        # Reduce the blacklist
+        blacklist=$(cat <<-BLACK
+		# Set the blacklist so that it doesn't include 127.0.0.1
+		federation_ip_range_blacklist:
+		  - '10.0.0.0/8'
+		  - '172.16.0.0/12'
+		  - '192.168.0.0/16'
+		  - '100.64.0.0/10'
+		  - '169.254.0.0/16'
+		  - '::1/128'
+		  - 'fe80::/64'
+		  - 'fc00::/7'
+		BLACK
+		)
+        echo "${blacklist}" >> $DIR/etc/$port.config
+    fi
 
     # Check script parameters
     if [ $# -eq 1 ]; then

From 2d6308a043153b149ead23b1a6567a03ae20496b Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 17 Jun 2019 15:52:15 +0100
Subject: [PATCH 225/231] Newsfile

---
 changelog.d/5474.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/5474.feature

diff --git a/changelog.d/5474.feature b/changelog.d/5474.feature
new file mode 100644
index 000000000000..63d9b58734be
--- /dev/null
+++ b/changelog.d/5474.feature
@@ -0,0 +1 @@
+Allow server admins to define implementations of extra rules for allowing or denying incoming events.

From 187d2837a9fc9d5b9e585f3a7f0f54f2ceac7d1b Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 17 Jun 2019 15:48:57 +0100
Subject: [PATCH 226/231] Add third party rules hook into create room

---
 synapse/events/third_party_rules.py | 27 ++++++++++++++++++++++++---
 synapse/handlers/room.py            | 25 ++++++++++++++++++++++++-
 2 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 9f98d5152326..ee7b97ad39d7 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -17,8 +17,8 @@
 
 
 class ThirdPartyEventRules(object):
-    """Allows server admins to provide a Python module implementing an extra set of rules
-    to apply when processing events.
+    """Allows server admins to provide a Python module implementing an extra
+    set of rules to apply when processing events.
 
     This is designed to help admins of closed federations with enforcing custom
     behaviours.
@@ -46,7 +46,7 @@ def check_event_allowed(self, event, context):
             context (synapse.events.snapshot.EventContext): The context of the event.
 
         Returns:
-            defer.Deferred(bool), True if the event should be allowed, False if not.
+            defer.Deferred[bool]: True if the event should be allowed, False if not.
         """
         if self.third_party_rules is None:
             defer.returnValue(True)
@@ -60,3 +60,24 @@ def check_event_allowed(self, event, context):
 
         ret = yield self.third_party_rules.check_event_allowed(event, state_events)
         defer.returnValue(ret)
+
+    @defer.inlineCallbacks
+    def on_create_room(self, requester, config, is_requester_admin):
+        """Intercept requests to create room to allow, deny or update the
+        request config.
+
+        Args:
+            requester (Requester)
+            config (dict): The creation config from the client.
+            is_requester_admin (bool): If the requester is an admin
+
+        Returns:
+            defer.Deferred
+        """
+
+        if self.third_party_rules is None:
+            return
+
+        yield self.third_party_rules.on_create_room(
+            requester, config, is_requester_admin
+        )
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 4a17911a87fa..74793bab335b 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -75,6 +75,10 @@ def __init__(self, hs):
         # linearizer to stop two upgrades happening at once
         self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
 
+        self._server_notices_mxid = hs.config.server_notices_mxid
+
+        self.third_party_event_rules = hs.get_third_party_event_rules()
+
     @defer.inlineCallbacks
     def upgrade_room(self, requester, old_room_id, new_version):
         """Replace a room with a new room with a different version
@@ -470,7 +474,26 @@ def create_room(self, requester, config, ratelimit=True,
 
         yield self.auth.check_auth_blocking(user_id)
 
-        if not self.spam_checker.user_may_create_room(user_id):
+        if (self._server_notices_mxid is not None and
+                requester.user.to_string() == self._server_notices_mxid):
+            # allow the server notices mxid to create rooms
+            is_requester_admin = True
+        else:
+            is_requester_admin = yield self.auth.is_server_admin(
+                requester.user,
+            )
+
+        # Check whether the third party rules allows/changes the room create
+        # request.
+        yield self.third_party_event_rules.on_create_room(
+            requester,
+            config,
+            is_requester_admin=is_requester_admin,
+        )
+
+        if not is_requester_admin and not self.spam_checker.user_may_create_room(
+            user_id,
+        ):
             raise SynapseError(403, "You are not permitted to create rooms")
 
         if ratelimit:

From 25d16fea780b071199ceb97bcf6a9eb21e4509b9 Mon Sep 17 00:00:00 2001
From: Jorik Schellekens 
Date: Mon, 17 Jun 2019 16:50:31 +0100
Subject: [PATCH 227/231] Changelog

---
 changelog.d/5478.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/5478.misc

diff --git a/changelog.d/5478.misc b/changelog.d/5478.misc
new file mode 100644
index 000000000000..829bb1e521d8
--- /dev/null
+++ b/changelog.d/5478.misc
@@ -0,0 +1 @@
+The demo servers talk to each other again.

From 112cf5a73a12c1618414f0e2ef4153bf6d4a89f9 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 17 Jun 2019 16:27:47 +0100
Subject: [PATCH 228/231] Add third party rules hook for 3PID invites

---
 synapse/events/third_party_rules.py | 32 ++++++++++++++++++++++++++++-
 synapse/handlers/room_member.py     | 10 +++++++++
 2 files changed, 41 insertions(+), 1 deletion(-)

diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index ee7b97ad39d7..768cfa8e9c6b 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -35,7 +35,10 @@ def __init__(self, hs):
             module, config = hs.config.third_party_event_rules
 
         if module is not None:
-            self.third_party_rules = module(config=config)
+            self.third_party_rules = module(
+                config=config,
+                http_client=hs.get_simple_http_client(),
+            )
 
     @defer.inlineCallbacks
     def check_event_allowed(self, event, context):
@@ -81,3 +84,30 @@ def on_create_room(self, requester, config, is_requester_admin):
         yield self.third_party_rules.on_create_room(
             requester, config, is_requester_admin
         )
+
+    def check_threepid_can_be_invited(self, medium, address, room_id):
+        """Check if a provided 3PID can be invited in the given room.
+
+        Args:
+            medium (str): The 3PID's medium.
+            address (str): The 3PID's address.
+            room_id (str): The room we want to invite the threepid to.
+
+        Returns:
+            defer.Deferred[bool], True if the 3PID can be invited, False if not.
+        """
+
+        if self.third_party_rules is None:
+            defer.returnValue(True)
+
+        state_ids = yield self.store.get_filtered_current_state_ids(room_id)
+        room_state_events = yield self.store.get_events(state_ids.values())
+
+        state_events = {}
+        for key, event_id in state_ids.items():
+            state_events[key] = room_state_events[event_id]
+
+        ret = yield self.third_party_rules.check_threepid_can_be_invited(
+            medium, address, state_events,
+        )
+        defer.returnValue(ret)
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 93ac986c86ee..458902bb7e15 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -72,6 +72,7 @@ def __init__(self, hs):
 
         self.clock = hs.get_clock()
         self.spam_checker = hs.get_spam_checker()
+        self.third_party_event_rules = hs.get_third_party_event_rules()
         self._server_notices_mxid = self.config.server_notices_mxid
         self._enable_lookup = hs.config.enable_3pid_lookup
         self.allow_per_room_profiles = self.config.allow_per_room_profiles
@@ -723,6 +724,15 @@ def do_3pid_invite(
         # can't just rely on the standard ratelimiting of events.
         yield self.base_handler.ratelimit(requester)
 
+        can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
+            medium, address, room_id,
+        )
+        if not can_invite:
+            raise SynapseError(
+                403, "This third-party identifier can not be invited in this room",
+                Codes.FORBIDDEN,
+            )
+
         invitee = yield self._lookup_3pid(
             id_server, medium, address
         )

From 9ce4220d6ca96644b00e4c014d7ee35505ca8b84 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 17 Jun 2019 16:33:16 +0100
Subject: [PATCH 229/231] Changelog

---
 changelog.d/5477.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/5477.feature

diff --git a/changelog.d/5477.feature b/changelog.d/5477.feature
new file mode 100644
index 000000000000..63d9b58734be
--- /dev/null
+++ b/changelog.d/5477.feature
@@ -0,0 +1 @@
+Allow server admins to define implementations of extra rules for allowing or denying incoming events.

From 33ea87be3926c2cf5bd57170a808a20217433ad6 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 17 Jun 2019 17:39:38 +0100
Subject: [PATCH 230/231] Make check_threepid_can_be_invited async

---
 synapse/events/third_party_rules.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 768cfa8e9c6b..50ceeb1e8ea7 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -85,6 +85,7 @@ def on_create_room(self, requester, config, is_requester_admin):
             requester, config, is_requester_admin
         )
 
+    @defer.inlineCallbacks
     def check_threepid_can_be_invited(self, medium, address, room_id):
         """Check if a provided 3PID can be invited in the given room.
 

From 82d9d524bdb8ce2f54855589e44ce3a20ba1af37 Mon Sep 17 00:00:00 2001
From: cclauss 
Date: Mon, 17 Jun 2019 19:21:30 +0200
Subject: [PATCH 231/231] Fix seven contrib files with Python syntax errors
 (#5446)

* Fix seven contrib files with Python syntax errors

Signed-off-by: cclauss 
---
 changelog.d/5446.misc                      |   1 +
 contrib/cmdclient/console.py               | 109 +++++++++++----------
 contrib/cmdclient/http.py                  |  13 +--
 contrib/graph/graph.py                     |   5 +-
 contrib/graph/graph3.py                    |  21 ++--
 contrib/jitsimeetbridge/jitsimeetbridge.py |  67 ++++++-------
 contrib/scripts/kick_users.py              |  34 ++++---
 7 files changed, 131 insertions(+), 119 deletions(-)
 create mode 100644 changelog.d/5446.misc

diff --git a/changelog.d/5446.misc b/changelog.d/5446.misc
new file mode 100644
index 000000000000..e5209be0a67d
--- /dev/null
+++ b/changelog.d/5446.misc
@@ -0,0 +1 @@
+Update Python syntax in contrib/ to Python 3. 
diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py
index 4918fa1a9ad5..462f1461131a 100755
--- a/contrib/cmdclient/console.py
+++ b/contrib/cmdclient/console.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 
 """ Starts a synapse client console. """
+from __future__ import print_function
 
 from twisted.internet import reactor, defer, threads
 from http import TwistedHttpClient
@@ -109,7 +110,7 @@ def do_config(self, line):
         by using $. E.g. 'config roomid room1' then 'raw get /rooms/$roomid'.
         """
         if len(line) == 0:
-            print json.dumps(self.config, indent=4)
+            print(json.dumps(self.config, indent=4))
             return
 
         try:
@@ -123,8 +124,8 @@ def do_config(self, line):
             ]
             for key, valid_vals in config_rules:
                 if key == args["key"] and args["val"] not in valid_vals:
-                    print "%s value must be one of %s" % (args["key"],
-                                                          valid_vals)
+                    print("%s value must be one of %s" % (args["key"],
+                                                          valid_vals))
                     return
 
             # toggle the http client verbosity
@@ -133,11 +134,11 @@ def do_config(self, line):
 
             # assign the new config
             self.config[args["key"]] = args["val"]
-            print json.dumps(self.config, indent=4)
+            print(json.dumps(self.config, indent=4))
 
             save_config(self.config)
         except Exception as e:
-            print e
+            print(e)
 
     def do_register(self, line):
         """Registers for a new account: "register  "
@@ -153,7 +154,7 @@ def do_register(self, line):
             pwd = getpass.getpass("Type a password for this user: ")
             pwd2 = getpass.getpass("Retype the password: ")
             if pwd != pwd2 or len(pwd) == 0:
-                print "Password mismatch."
+                print("Password mismatch.")
                 pwd = None
             else:
                 password = pwd
@@ -174,12 +175,12 @@ def _do_register(self, data, update_config):
         # check the registration flows
         url = self._url() + "/register"
         json_res = yield self.http_client.do_request("GET", url)
-        print json.dumps(json_res, indent=4)
+        print(json.dumps(json_res, indent=4))
 
         passwordFlow = None
         for flow in json_res["flows"]:
             if flow["type"] == "m.login.recaptcha" or ("stages" in flow and "m.login.recaptcha" in flow["stages"]):
-                print "Unable to register: Home server requires captcha."
+                print("Unable to register: Home server requires captcha.")
                 return
             if flow["type"] == "m.login.password" and "stages" not in flow:
                 passwordFlow = flow
@@ -189,7 +190,7 @@ def _do_register(self, data, update_config):
             return
 
         json_res = yield self.http_client.do_request("POST", url, data=data)
-        print json.dumps(json_res, indent=4)
+        print(json.dumps(json_res, indent=4))
         if update_config and "user_id" in json_res:
             self.config["user"] = json_res["user_id"]
             self.config["token"] = json_res["access_token"]
@@ -215,7 +216,7 @@ def do_login(self, line):
                 reactor.callFromThread(self._do_login, user, p)
                 #print " got %s " % p
         except Exception as e:
-            print e
+            print(e)
 
     @defer.inlineCallbacks
     def _do_login(self, user, password):
@@ -227,13 +228,13 @@ def _do_login(self, user, password):
         }
         url = self._url() + path
         json_res = yield self.http_client.do_request("POST", url, data=data)
-        print json_res
+        print(json_res)
 
         if "access_token" in json_res:
             self.config["user"] = user
             self.config["token"] = json_res["access_token"]
             save_config(self.config)
-            print "Login successful."
+            print("Login successful.")
 
     @defer.inlineCallbacks
     def _check_can_login(self):
@@ -242,10 +243,10 @@ def _check_can_login(self):
         # submitting!
         url = self._url() + path
         json_res = yield self.http_client.do_request("GET", url)
-        print json_res
+        print(json_res)
 
         if "flows" not in json_res:
-            print "Failed to find any login flows."
+            print("Failed to find any login flows.")
             defer.returnValue(False)
 
         flow = json_res["flows"][0] # assume first is the one we want.
@@ -275,9 +276,9 @@ def _do_emailrequest(self, args):
 
         json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
                                                      headers={'Content-Type': ['application/x-www-form-urlencoded']})
-        print json_res
+        print(json_res)
         if 'sid' in json_res:
-            print "Token sent. Your session ID is %s" % (json_res['sid'])
+            print("Token sent. Your session ID is %s" % (json_res['sid']))
 
     def do_emailvalidate(self, line):
         """Validate and associate a third party ID
@@ -297,7 +298,7 @@ def _do_emailvalidate(self, args):
 
         json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
                                                      headers={'Content-Type': ['application/x-www-form-urlencoded']})
-        print json_res
+        print(json_res)
 
     def do_3pidbind(self, line):
         """Validate and associate a third party ID
@@ -317,7 +318,7 @@ def _do_3pidbind(self, args):
 
         json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
                                                      headers={'Content-Type': ['application/x-www-form-urlencoded']})
-        print json_res
+        print(json_res)
 
     def do_join(self, line):
         """Joins a room: "join " """
@@ -325,7 +326,7 @@ def do_join(self, line):
             args = self._parse(line, ["roomid"], force_keys=True)
             self._do_membership_change(args["roomid"], "join", self._usr())
         except Exception as e:
-            print e
+            print(e)
 
     def do_joinalias(self, line):
         try:
@@ -333,7 +334,7 @@ def do_joinalias(self, line):
             path = "/join/%s" % urllib.quote(args["roomname"])
             reactor.callFromThread(self._run_and_pprint, "POST", path, {})
         except Exception as e:
-            print e
+            print(e)
 
     def do_topic(self, line):
         """"topic [set|get]  []"
@@ -343,17 +344,17 @@ def do_topic(self, line):
         try:
             args = self._parse(line, ["action", "roomid", "topic"])
             if "action" not in args or "roomid" not in args:
-                print "Must specify set|get and a room ID."
+                print("Must specify set|get and a room ID.")
                 return
             if args["action"].lower() not in ["set", "get"]:
-                print "Must specify set|get, not %s" % args["action"]
+                print("Must specify set|get, not %s" % args["action"])
                 return
 
             path = "/rooms/%s/topic" % urllib.quote(args["roomid"])
 
             if args["action"].lower() == "set":
                 if "topic" not in args:
-                    print "Must specify a new topic."
+                    print("Must specify a new topic.")
                     return
                 body = {
                     "topic": args["topic"]
@@ -362,7 +363,7 @@ def do_topic(self, line):
             elif args["action"].lower() == "get":
                 reactor.callFromThread(self._run_and_pprint, "GET", path)
         except Exception as e:
-            print e
+            print(e)
 
     def do_invite(self, line):
         """Invite a user to a room: "invite  " """
@@ -373,7 +374,7 @@ def do_invite(self, line):
 
             reactor.callFromThread(self._do_invite, args["roomid"], user_id)
         except Exception as e:
-            print e
+            print(e)
 
     @defer.inlineCallbacks
     def _do_invite(self, roomid, userstring):
@@ -393,29 +394,29 @@ def _do_invite(self, roomid, userstring):
                 if 'public_key' in pubKeyObj:
                     pubKey = nacl.signing.VerifyKey(pubKeyObj['public_key'], encoder=nacl.encoding.HexEncoder)
                 else:
-                    print "No public key found in pubkey response!"
+                    print("No public key found in pubkey response!")
 
                 sigValid = False
 
                 if pubKey:
                     for signame in json_res['signatures']:
                         if signame not in TRUSTED_ID_SERVERS:
-                            print "Ignoring signature from untrusted server %s" % (signame)
+                            print("Ignoring signature from untrusted server %s" % (signame))
                         else:
                             try:
                                 verify_signed_json(json_res, signame, pubKey)
                                 sigValid = True
-                                print "Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame)
+                                print("Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame))
                                 break
                             except SignatureVerifyException as e:
-                                print "Invalid signature from %s" % (signame)
-                                print e
+                                print("Invalid signature from %s" % (signame))
+                                print(e)
 
                 if sigValid:
-                    print "Resolved 3pid %s to %s" % (userstring, json_res['mxid'])
+                    print("Resolved 3pid %s to %s" % (userstring, json_res['mxid']))
                     mxid = json_res['mxid']
                 else:
-                    print "Got association for %s but couldn't verify signature" % (userstring)
+                    print("Got association for %s but couldn't verify signature" % (userstring))
 
             if not mxid:
                 mxid = "@" + userstring + ":" + self._domain()
@@ -428,7 +429,7 @@ def do_leave(self, line):
             args = self._parse(line, ["roomid"], force_keys=True)
             self._do_membership_change(args["roomid"], "leave", self._usr())
         except Exception as e:
-            print e
+            print(e)
 
     def do_send(self, line):
         """Sends a message. "send  " """
@@ -453,10 +454,10 @@ def do_list(self, line):
         """
         args = self._parse(line, ["type", "roomid", "qp"])
         if not "type" in args or not "roomid" in args:
-            print "Must specify type and room ID."
+            print("Must specify type and room ID.")
             return
         if args["type"] not in ["members", "messages"]:
-            print "Unrecognised type: %s" % args["type"]
+            print("Unrecognised type: %s" % args["type"])
             return
         room_id = args["roomid"]
         path = "/rooms/%s/%s" % (urllib.quote(room_id), args["type"])
@@ -468,7 +469,7 @@ def do_list(self, line):
                     key_value = key_value_str.split("=")
                     qp[key_value[0]] = key_value[1]
                 except:
-                    print "Bad query param: %s" % key_value
+                    print("Bad query param: %s" % key_value)
                     return
 
         reactor.callFromThread(self._run_and_pprint, "GET", path,
@@ -508,14 +509,14 @@ def do_raw(self, line):
         args = self._parse(line, ["method", "path", "data"])
         # sanity check
         if "method" not in args or "path" not in args:
-            print "Must specify path and method."
+            print("Must specify path and method.")
             return
 
         args["method"] = args["method"].upper()
         valid_methods = ["PUT", "GET", "POST", "DELETE",
                          "XPUT", "XGET", "XPOST", "XDELETE"]
         if args["method"] not in valid_methods:
-            print "Unsupported method: %s" % args["method"]
+            print("Unsupported method: %s" % args["method"])
             return
 
         if "data" not in args:
@@ -524,7 +525,7 @@ def do_raw(self, line):
             try:
                 args["data"] = json.loads(args["data"])
             except Exception as e:
-                print "Data is not valid JSON. %s" % e
+                print("Data is not valid JSON. %s" % e)
                 return
 
         qp = {"access_token": self._tok()}
@@ -553,7 +554,7 @@ def do_stream(self, line):
             try:
                 timeout = int(args["timeout"])
             except ValueError:
-                print "Timeout must be in milliseconds."
+                print("Timeout must be in milliseconds.")
                 return
         reactor.callFromThread(self._do_event_stream, timeout)
 
@@ -566,7 +567,7 @@ def _do_event_stream(self, timeout):
                     "timeout": str(timeout),
                     "from": self.event_stream_token
                 })
-        print json.dumps(res, indent=4)
+        print(json.dumps(res, indent=4))
 
         if "chunk" in res:
             for event in res["chunk"]:
@@ -669,9 +670,9 @@ def _run_and_pprint(self, method, path, data=None,
                                                     data=data,
                                                     qparams=query_params)
         if alt_text:
-            print alt_text
+            print(alt_text)
         else:
-            print json.dumps(json_res, indent=4)
+            print(json.dumps(json_res, indent=4))
 
 
 def save_config(config):
@@ -680,16 +681,16 @@ def save_config(config):
 
 
 def main(server_url, identity_server_url, username, token, config_path):
-    print "Synapse command line client"
-    print "==========================="
-    print "Server: %s" % server_url
-    print "Type 'help' to get started."
-    print "Close this console with CTRL+C then CTRL+D."
+    print("Synapse command line client")
+    print("===========================")
+    print("Server: %s" % server_url)
+    print("Type 'help' to get started.")
+    print("Close this console with CTRL+C then CTRL+D.")
     if not username or not token:
-        print "-  'register ' - Register an account"
-        print "-  'stream' - Connect to the event stream"
-        print "-  'create ' - Create a room"
-        print "-  'send  ' - Send a message"
+        print("-  'register ' - Register an account")
+        print("-  'stream' - Connect to the event stream")
+        print("-  'create ' - Create a room")
+        print("-  'send  ' - Send a message")
     http_client = TwistedHttpClient()
 
     # the command line client
@@ -705,7 +706,7 @@ def main(server_url, identity_server_url, username, token, config_path):
                 http_client.verbose = "on" == syn_cmd.config["verbose"]
             except:
                 pass
-            print "Loaded config from %s" % config_path
+            print("Loaded config from %s" % config_path)
     except:
         pass
 
@@ -736,7 +737,7 @@ def main(server_url, identity_server_url, username, token, config_path):
     args = parser.parse_args()
 
     if not args.server:
-        print "You must supply a server URL to communicate with."
+        print("You must supply a server URL to communicate with.")
         parser.print_help()
         sys.exit(1)
 
diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py
index c833f3f3186d..1bd600e148be 100644
--- a/contrib/cmdclient/http.py
+++ b/contrib/cmdclient/http.py
@@ -13,6 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import print_function
 from twisted.web.client import Agent, readBody
 from twisted.web.http_headers import Headers
 from twisted.internet import defer, reactor
@@ -141,15 +142,15 @@ def _create_request(self, method, url, producer=None, headers_dict={}):
         headers_dict["User-Agent"] = ["Synapse Cmd Client"]
 
         retries_left = 5
-        print "%s to %s with headers %s" % (method, url, headers_dict)
+        print("%s to %s with headers %s" % (method, url, headers_dict))
         if self.verbose and producer:
             if "password" in producer.data:
                 temp = producer.data["password"]
                 producer.data["password"] = "[REDACTED]"
-                print json.dumps(producer.data, indent=4)
+                print(json.dumps(producer.data, indent=4))
                 producer.data["password"] = temp
             else:
-                print json.dumps(producer.data, indent=4)
+                print(json.dumps(producer.data, indent=4))
 
         while True:
             try:
@@ -161,7 +162,7 @@ def _create_request(self, method, url, producer=None, headers_dict={}):
                 )
                 break
             except Exception as e:
-                print "uh oh: %s" % e
+                print("uh oh: %s" % e)
                 if retries_left:
                     yield self.sleep(2 ** (5 - retries_left))
                     retries_left -= 1
@@ -169,8 +170,8 @@ def _create_request(self, method, url, producer=None, headers_dict={}):
                     raise e
 
         if self.verbose:
-            print "Status %s %s" % (response.code, response.phrase)
-            print pformat(list(response.headers.getAllRawHeaders()))
+            print("Status %s %s" % (response.code, response.phrase))
+            print(pformat(list(response.headers.getAllRawHeaders())))
         defer.returnValue(response)
 
     def sleep(self, seconds):
diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py
index afd1d446b4b1..e174ff5026dc 100644
--- a/contrib/graph/graph.py
+++ b/contrib/graph/graph.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 # Copyright 2014-2016 OpenMarket Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -48,7 +49,7 @@ def make_graph(pdus, room, filename_prefix):
             c = colors.pop()
             color_map[o] = c
         except:
-            print "Run out of colours!"
+            print("Run out of colours!")
             color_map[o] = "black"
 
     graph = pydot.Dot(graph_name="Test")
@@ -93,7 +94,7 @@ def make_graph(pdus, room, filename_prefix):
             end_name = make_name(i, o)
 
             if end_name not in node_map:
-                print "%s not in nodes" % end_name
+                print("%s not in nodes" % end_name)
                 continue
 
             edge = pydot.Edge(node_map[start_name], node_map[end_name])
diff --git a/contrib/graph/graph3.py b/contrib/graph/graph3.py
index 7d3b4d7eb6cd..fe1dc81e9063 100644
--- a/contrib/graph/graph3.py
+++ b/contrib/graph/graph3.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 # Copyright 2016 OpenMarket Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,19 +27,19 @@
 
 
 def make_graph(file_name, room_id, file_prefix, limit):
-    print "Reading lines"
+    print("Reading lines")
     with open(file_name) as f:
         lines = f.readlines()
 
-    print "Read lines"
+    print("Read lines")
 
     events = [FrozenEvent(json.loads(line)) for line in lines]
 
-    print "Loaded events."
+    print("Loaded events.")
 
     events.sort(key=lambda e: e.depth)
 
-    print "Sorted events"
+    print("Sorted events")
 
     if limit:
         events = events[-int(limit):]
@@ -55,7 +56,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
         content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
         content = content.replace("\n", "
\n") - print content + print(content) content = [] for key, value in unfreeze(event.get_dict()["content"]).items(): if value is None: @@ -74,7 +75,7 @@ def make_graph(file_name, room_id, file_prefix, limit): content = "
\n".join(content) - print content + print(content) label = ( "<" @@ -102,7 +103,7 @@ def make_graph(file_name, room_id, file_prefix, limit): node_map[event.event_id] = node graph.add_node(node) - print "Created Nodes" + print("Created Nodes") for event in events: for prev_id, _ in event.prev_events: @@ -120,15 +121,15 @@ def make_graph(file_name, room_id, file_prefix, limit): edge = pydot.Edge(node_map[event.event_id], end_node) graph.add_edge(edge) - print "Created edges" + print("Created edges") graph.write('%s.dot' % file_prefix, format='raw', prog='dot') - print "Created Dot" + print("Created Dot") graph.write_svg("%s.svg" % file_prefix, prog='dot') - print "Created svg" + print("Created svg") if __name__ == "__main__": parser = argparse.ArgumentParser( diff --git a/contrib/jitsimeetbridge/jitsimeetbridge.py b/contrib/jitsimeetbridge/jitsimeetbridge.py index 15f8e1c48bfc..e82d1be5d28a 100644 --- a/contrib/jitsimeetbridge/jitsimeetbridge.py +++ b/contrib/jitsimeetbridge/jitsimeetbridge.py @@ -8,8 +8,9 @@ the bridge. Requires: -npm install jquery jsdom +npm install jquery jsdom """ +from __future__ import print_function import gevent import grequests @@ -51,7 +52,7 @@ def getEvent(self): req = grequests.get(url) resps = grequests.map([req]) obj = json.loads(resps[0].content) - print "incoming from matrix",obj + print("incoming from matrix",obj) if 'end' not in obj: continue self.token = obj['end'] @@ -60,22 +61,22 @@ def getEvent(self): def joinRoom(self, roomId): url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token - print url + print(url) headers={ 'Content-Type': 'application/json' } req = grequests.post(url, headers=headers, data='{}') resps = grequests.map([req]) obj = json.loads(resps[0].content) - print "response: ",obj + print("response: ",obj) def sendEvent(self, roomId, evType, event): url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token - print url - print json.dumps(event) + print(url) + print(json.dumps(event)) headers={ 'Content-Type': 'application/json' } req = grequests.post(url, headers=headers, data=json.dumps(event)) resps = grequests.map([req]) obj = json.loads(resps[0].content) - print "response: ",obj + print("response: ",obj) @@ -85,31 +86,31 @@ def sendEvent(self, roomId, evType, event): def matrixLoop(): while True: ev = matrixCli.getEvent() - print ev + print(ev) if ev['type'] == 'm.room.member': - print 'membership event' + print('membership event') if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME: roomId = ev['room_id'] - print "joining room %s" % (roomId) + print("joining room %s" % (roomId)) matrixCli.joinRoom(roomId) elif ev['type'] == 'm.room.message': if ev['room_id'] in xmppClients: - print "already have a bridge for that user, ignoring" + print("already have a bridge for that user, ignoring") continue - print "got message, connecting" + print("got message, connecting") xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id']) gevent.spawn(xmppClients[ev['room_id']].xmppLoop) elif ev['type'] == 'm.call.invite': - print "Incoming call" + print("Incoming call") #sdp = ev['content']['offer']['sdp'] #print "sdp: %s" % (sdp) #xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id']) #gevent.spawn(xmppClients[ev['room_id']].xmppLoop) elif ev['type'] == 'm.call.answer': - print "Call answered" + print("Call answered") sdp = ev['content']['answer']['sdp'] if ev['room_id'] not in xmppClients: - print "We didn't have a call for that room" + print("We didn't have a call for that room") continue # should probably check call ID too xmppCli = xmppClients[ev['room_id']] @@ -146,7 +147,7 @@ def xmppPoke(self, xml): return obj def sendAnswer(self, answer): - print "sdp from matrix client",answer + print("sdp from matrix client",answer) p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) jingle, out_err = p.communicate(answer) jingle = jingle % { @@ -156,28 +157,28 @@ def sendAnswer(self, answer): 'responder': self.jid, 'sid': self.callsid } - print "answer jingle from sdp",jingle + print("answer jingle from sdp",jingle) res = self.sendIq(jingle) - print "reply from answer: ",res + print("reply from answer: ",res) self.ssrcs = {} jingleSoup = BeautifulSoup(jingle) for cont in jingleSoup.iq.jingle.findAll('content'): if cont.description: self.ssrcs[cont['name']] = cont.description['ssrc'] - print "my ssrcs:",self.ssrcs + print("my ssrcs:",self.ssrcs) gevent.joinall([ gevent.spawn(self.advertiseSsrcs) ]) def advertiseSsrcs(self): - time.sleep(7) - print "SSRC spammer started" + time.sleep(7) + print("SSRC spammer started") while self.running: ssrcMsg = "%(nick)s" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] } res = self.sendIq(ssrcMsg) - print "reply from ssrc announce: ",res + print("reply from ssrc announce: ",res) time.sleep(10) @@ -186,19 +187,19 @@ def xmppLoop(self): self.matrixCallId = time.time() res = self.xmppPoke("" % (self.nextRid(), HOST)) - print res + print(res) self.sid = res.body['sid'] - print "sid %s" % (self.sid) + print("sid %s" % (self.sid)) res = self.sendIq("") res = self.xmppPoke("" % (self.nextRid(), self.sid, HOST)) res = self.sendIq("") - print res + print(res) self.jid = res.body.iq.bind.jid.string - print "jid: %s" % (self.jid) + print("jid: %s" % (self.jid)) self.shortJid = self.jid.split('-')[0] res = self.sendIq("") @@ -217,13 +218,13 @@ def xmppLoop(self): if p.c and p.c.nick: u['nick'] = p.c.nick.string self.muc['users'].append(u) - print "muc: ",self.muc + print("muc: ",self.muc) # wait for stuff while True: - print "waiting..." + print("waiting...") res = self.sendIq("") - print "got from stream: ",res + print("got from stream: ",res) if res.body.iq: jingles = res.body.iq.findAll('jingle') if len(jingles): @@ -232,15 +233,15 @@ def xmppLoop(self): elif 'type' in res.body and res.body['type'] == 'terminate': self.running = False del xmppClients[self.matrixRoom] - return + return def handleInvite(self, jingle): self.initiator = jingle['initiator'] self.callsid = jingle['sid'] p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) - print "raw jingle invite",str(jingle) + print("raw jingle invite",str(jingle)) sdp, out_err = p.communicate(str(jingle)) - print "transformed remote offer sdp",sdp + print("transformed remote offer sdp",sdp) inviteEvent = { 'offer': { 'type': 'offer', @@ -252,7 +253,7 @@ def handleInvite(self, jingle): } matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent) -matrixCli = TrivialMatrixClient(ACCESS_TOKEN) +matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name gevent.joinall([ gevent.spawn(matrixLoop) diff --git a/contrib/scripts/kick_users.py b/contrib/scripts/kick_users.py index 5dfaec3ad0f8..b4a14385d05f 100755 --- a/contrib/scripts/kick_users.py +++ b/contrib/scripts/kick_users.py @@ -1,10 +1,16 @@ #!/usr/bin/env python +from __future__ import print_function from argparse import ArgumentParser import json import requests import sys import urllib +try: + raw_input +except NameError: # Python 3 + raw_input = input + def _mkurl(template, kws): for key in kws: template = template.replace(key, kws[key]) @@ -13,7 +19,7 @@ def _mkurl(template, kws): def main(hs, room_id, access_token, user_id_prefix, why): if not why: why = "Automated kick." - print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix) + print("Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)) room_state_url = _mkurl( "$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN", { @@ -22,13 +28,13 @@ def main(hs, room_id, access_token, user_id_prefix, why): "$TOKEN": access_token } ) - print "Getting room state => %s" % room_state_url + print("Getting room state => %s" % room_state_url) res = requests.get(room_state_url) - print "HTTP %s" % res.status_code + print("HTTP %s" % res.status_code) state_events = res.json() if "error" in state_events: - print "FATAL" - print state_events + print("FATAL") + print(state_events) return kick_list = [] @@ -44,15 +50,15 @@ def main(hs, room_id, access_token, user_id_prefix, why): kick_list.append(event["state_key"]) if len(kick_list) == 0: - print "No user IDs match the prefix '%s'" % user_id_prefix + print("No user IDs match the prefix '%s'" % user_id_prefix) return - print "The following user IDs will be kicked from %s" % room_name + print("The following user IDs will be kicked from %s" % room_name) for uid in kick_list: - print uid + print(uid) doit = raw_input("Continue? [Y]es\n") if len(doit) > 0 and doit.lower() == 'y': - print "Kicking members..." + print("Kicking members...") # encode them all kick_list = [urllib.quote(uid) for uid in kick_list] for uid in kick_list: @@ -69,14 +75,14 @@ def main(hs, room_id, access_token, user_id_prefix, why): "membership": "leave", "reason": why } - print "Kicking %s" % uid + print("Kicking %s" % uid) res = requests.put(kick_url, data=json.dumps(kick_body)) if res.status_code != 200: - print "ERROR: HTTP %s" % res.status_code + print("ERROR: HTTP %s" % res.status_code) if res.json().get("error"): - print "ERROR: JSON %s" % res.json() - - + print("ERROR: JSON %s" % res.json()) + + if __name__ == "__main__": parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")