From 8ee69f299cb3360de5c88f0c6b07525d35247fbd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Jul 2019 17:55:11 +0100 Subject: [PATCH 01/80] Add basic function to get all data for a user out of synapse --- synapse/handlers/admin.py | 247 ++++++++++++++++++++++++++++++++++ synapse/storage/roommember.py | 20 +++ tests/handlers/test_admin.py | 210 +++++++++++++++++++++++++++++ tests/unittest.py | 2 +- 4 files changed, 478 insertions(+), 1 deletion(-) create mode 100644 tests/handlers/test_admin.py diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 941ebfa1072b..e424fc46bddb 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -14,9 +14,17 @@ # limitations under the License. import logging +import os +import tempfile + +from canonicaljson import json from twisted.internet import defer +from synapse.api.constants import Membership +from synapse.types import RoomStreamToken +from synapse.visibility import filter_events_for_client + from ._base import BaseHandler logger = logging.getLogger(__name__) @@ -89,3 +97,242 @@ def search_users(self, term): ret = yield self.store.search_users(term) defer.returnValue(ret) + + @defer.inlineCallbacks + def exfiltrate_user_data(self, user_id, writer): + """Write all data we have of the user to the specified directory. + + Args: + user_id (str) + writer (ExfiltrationWriter) + + Returns: + defer.Deferred + """ + # Get all rooms the user is in or has been in + rooms = yield self.store.get_rooms_for_user_where_membership_is( + user_id, + membership_list=( + Membership.JOIN, + Membership.LEAVE, + Membership.BAN, + Membership.INVITE, + ), + ) + + # We only try and fetch events for rooms the user has been in. If + # they've been e.g. invited to a room without joining then we handle + # those seperately. + rooms_user_has_been_in = yield self.store.get_rooms_user_has_been_in(user_id) + + for index, room in enumerate(rooms): + room_id = room.room_id + + logger.info( + "[%s] Handling room %s, %d/%d", user_id, room_id, index + 1, len(rooms) + ) + + forgotten = yield self.store.did_forget(user_id, room_id) + if forgotten: + logger.info("[%s] User forgot room %d, ignoring", room_id) + continue + + if room_id not in rooms_user_has_been_in: + # If we haven't been in the rooms then the filtering code below + # won't return anything, so we need to handle these cases + # explicitly. + + if room.membership == Membership.INVITE: + event_id = room.event_id + invite = yield self.store.get_event(event_id, allow_none=True) + if invite: + invited_state = invite.unsigned["invite_room_state"] + writer.write_invite(room_id, invite, invited_state) + + continue + + # We only want to bother fetching events up to the last time they + # were joined. We estimate that point by looking at the + # stream_ordering of the last membership if it wasn't a join. + if room.membership == Membership.JOIN: + stream_ordering = yield self.store.get_room_max_stream_ordering() + else: + stream_ordering = room.stream_ordering + + from_key = str(RoomStreamToken(0, 0)) + to_key = str(RoomStreamToken(None, stream_ordering)) + + written_events = set() # Events that we've processed in this room + + # We need to track gaps in the events stream so that we can then + # write out the state at those events. We do this by keeping track + # of events whose prev events we haven't seen. + + # Map from event ID to prev events that haven't been processed, + # dict[str, set[str]]. + event_to_unseen_prevs = {} + + # The reverse mapping to above, i.e. map from unseen event to parent + # events. dict[str, set[str]] + unseen_event_to_parents = {} + + # We fetch events in the room the user could see by fetching *all* + # events that we have and then filtering, this isn't the most + # efficient method perhaps but it does guarentee we get everything. + while True: + events, _ = yield self.store.paginate_room_events( + room_id, from_key, to_key, limit=100, direction="f" + ) + if not events: + break + + from_key = events[-1].internal_metadata.after + + events = yield filter_events_for_client(self.store, user_id, events) + + writer.write_events(room_id, events) + + # Update the extremity tracking dicts + for event in events: + # Check if we have any prev events that haven't been + # processed yet, and add those to the appropriate dicts. + unseen_events = set(event.prev_event_ids()) - written_events + if unseen_events: + event_to_unseen_prevs[event.event_id] = unseen_events + for unseen in unseen_events: + unseen_event_to_parents.setdefault(unseen, set()).add( + event.event_id + ) + + # Now check if this event is an unseen prev event, if so + # then we remove this event from the appropriate dicts. + for event_id in unseen_event_to_parents.pop(event.event_id, []): + event_to_unseen_prevs.get(event_id, set()).discard( + event.event_id + ) + + written_events.add(event.event_id) + + logger.info( + "Written %d events in room %s", len(written_events), room_id + ) + + # Extremities are the events who have at least one unseen prev event. + extremities = ( + event_id + for event_id, unseen_prevs in event_to_unseen_prevs.items() + if unseen_prevs + ) + for event_id in extremities: + if not event_to_unseen_prevs[event_id]: + continue + state = yield self.store.get_state_for_event(event_id) + writer.write_state(room_id, event_id, state) + + defer.returnValue(writer.finished()) + + +class ExfiltrationWriter(object): + """Interfaced used to specify how to write exfilrated data. + """ + + def write_events(self, room_id, events): + """Write a batch of events for a room. + + Args: + room_id (str) + events (list[FrozenEvent]) + """ + pass + + def write_state(self, room_id, event_id, state): + """Write the state at the given event in the room. + + This only gets called for backward extremities rather than for each + event. + + Args: + room_id (str) + event_id (str) + state (list[FrozenEvent]) + """ + pass + + def write_invite(self, room_id, event, state): + """Write an invite for the room, with associated invite state. + + Args: + room_id (str) + invite (FrozenEvent) + state (list[dict]): A subset of the state at the invite, with a + subset of the event keys (type, state_key, content and sender) + """ + + def finished(self): + """Called when exfiltration is complete, and the return valus is passed + to the requester. + """ + pass + + +class FileExfiltrationWriter(ExfiltrationWriter): + """An ExfiltrationWriter that writes the users data to a directory. + + Returns the directory location on completion. + + Args: + user_id (str): The user whose data is being exfiltrated. + directory (str|None): The directory to write the data to, if None then + will write to a temporary directory. + """ + + def __init__(self, user_id, directory=None): + self.user_id = user_id + + if directory: + self.base_directory = directory + else: + self.base_directory = tempfile.mkdtemp( + prefix="synapse-exfiltrate__%s__" % (user_id,) + ) + + os.makedirs(self.base_directory, exist_ok=True) + if list(os.listdir(self.base_directory)): + raise Exception("Directory must be empty") + + def write_events(self, room_id, events): + room_directory = os.path.join(self.base_directory, "rooms", room_id) + os.makedirs(room_directory, exist_ok=True) + events_file = os.path.join(room_directory, "events") + + with open(events_file, "a") as f: + for event in events: + print(json.dumps(event.get_pdu_json()), file=f) + + def write_state(self, room_id, event_id, state): + room_directory = os.path.join(self.base_directory, "rooms", room_id) + state_directory = os.path.join(room_directory, "state") + os.makedirs(state_directory, exist_ok=True) + + event_file = os.path.join(state_directory, event_id) + + with open(event_file, "a") as f: + for event in state.values(): + print(json.dumps(event.get_pdu_json()), file=f) + + def write_invite(self, room_id, event, state): + self.write_events(room_id, [event]) + + # We write the invite state somewhere else as they aren't full events + # and are only a subset of the state at the event. + room_directory = os.path.join(self.base_directory, "rooms", room_id) + os.makedirs(room_directory, exist_ok=True) + + invite_state = os.path.join(room_directory, "invite_state") + + with open(invite_state, "a") as f: + for event in state.values(): + print(json.dumps(event), file=f) + + def finished(self): + return self.base_directory diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 8004aeb90974..32cfd010a58a 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -575,6 +575,26 @@ def f(txn): count = yield self.runInteraction("did_forget_membership", f) defer.returnValue(count == 0) + @defer.inlineCallbacks + def get_rooms_user_has_been_in(self, user_id): + """Get all rooms that the user has ever been in. + + Args: + user_id (str) + + Returns: + Deferred[set[str]]: Set of room IDs. + """ + + room_ids = yield self._simple_select_onecol( + table="room_memberships", + keyvalues={"membership": Membership.JOIN, "user_id": user_id}, + retcol="room_id", + desc="get_rooms_user_has_been_in", + ) + + return set(room_ids) + class RoomMemberStore(RoomMemberWorkerStore): def __init__(self, db_conn, hs): diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py new file mode 100644 index 000000000000..5e7d2d3361f3 --- /dev/null +++ b/tests/handlers/test_admin.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from collections import Counter + +from mock import Mock + +import synapse.api.errors +import synapse.handlers.admin +import synapse.rest.admin +import synapse.storage +from synapse.api.constants import EventTypes +from synapse.rest.client.v1 import login, room + +from tests import unittest + + +class ExfiltrateData(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.admin_handler = hs.get_handlers().admin_handler + + self.user1 = self.register_user("user1", "password") + self.token1 = self.login("user1", "password") + + self.user2 = self.register_user("user2", "password") + self.token2 = self.login("user2", "password") + + def test_single_public_joined_room(self): + """Test that we write *all* events for a public room + """ + room_id = self.helper.create_room_as( + self.user1, tok=self.token1, is_public=True + ) + self.helper.send(room_id, body="Hello!", tok=self.token1) + self.helper.join(room_id, self.user2, tok=self.token2) + self.helper.send(room_id, body="Hello again!", tok=self.token1) + + writer = Mock() + + self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + + writer.write_events.assert_called() + + # Since we can see all events there shouldn't be any extremities, so no + # state should be written + writer.write_state.assert_not_called() + + # Collect all events that were written + written_events = [] + for (called_room_id, events), _ in writer.write_events.call_args_list: + self.assertEqual(called_room_id, room_id) + written_events.extend(events) + + # Check that the right number of events were written + counter = Counter( + (event.type, getattr(event, "state_key", None)) for event in written_events + ) + self.assertEqual(counter[(EventTypes.Message, None)], 2) + self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) + self.assertEqual(counter[(EventTypes.Member, self.user2)], 1) + + def test_single_private_joined_room(self): + """Tests that we correctly write state when we can't see all events in + a room. + """ + room_id = self.helper.create_room_as(self.user1, tok=self.token1) + self.helper.send_state( + room_id, + EventTypes.RoomHistoryVisibility, + body={"history_visibility": "joined"}, + tok=self.token1, + ) + self.helper.send(room_id, body="Hello!", tok=self.token1) + self.helper.join(room_id, self.user2, tok=self.token2) + self.helper.send(room_id, body="Hello again!", tok=self.token1) + + writer = Mock() + + self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + + writer.write_events.assert_called() + + # Since we can't see all events there should be one extremity. + writer.write_state.assert_called_once() + + # Collect all events that were written + written_events = [] + for (called_room_id, events), _ in writer.write_events.call_args_list: + self.assertEqual(called_room_id, room_id) + written_events.extend(events) + + # Check that the right number of events were written + counter = Counter( + (event.type, getattr(event, "state_key", None)) for event in written_events + ) + self.assertEqual(counter[(EventTypes.Message, None)], 1) + self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) + self.assertEqual(counter[(EventTypes.Member, self.user2)], 1) + + def test_single_left_room(self): + """Tests that we don't see events in the room after we leave. + """ + room_id = self.helper.create_room_as(self.user1, tok=self.token1) + self.helper.send(room_id, body="Hello!", tok=self.token1) + self.helper.join(room_id, self.user2, tok=self.token2) + self.helper.send(room_id, body="Hello again!", tok=self.token1) + self.helper.leave(room_id, self.user2, tok=self.token2) + self.helper.send(room_id, body="Helloooooo!", tok=self.token1) + + writer = Mock() + + self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + + writer.write_events.assert_called() + + # Since we can see all events there shouldn't be any extremities, so no + # state should be written + writer.write_state.assert_not_called() + + written_events = [] + for (called_room_id, events), _ in writer.write_events.call_args_list: + self.assertEqual(called_room_id, room_id) + written_events.extend(events) + + # Check that the right number of events were written + counter = Counter( + (event.type, getattr(event, "state_key", None)) for event in written_events + ) + self.assertEqual(counter[(EventTypes.Message, None)], 2) + self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) + self.assertEqual(counter[(EventTypes.Member, self.user2)], 2) + + def test_single_left_rejoined_private_room(self): + """Tests that see the correct events in private rooms when we + repeatedly join and leave. + """ + room_id = self.helper.create_room_as(self.user1, tok=self.token1) + self.helper.send_state( + room_id, + EventTypes.RoomHistoryVisibility, + body={"history_visibility": "joined"}, + tok=self.token1, + ) + self.helper.send(room_id, body="Hello!", tok=self.token1) + self.helper.join(room_id, self.user2, tok=self.token2) + self.helper.send(room_id, body="Hello again!", tok=self.token1) + self.helper.leave(room_id, self.user2, tok=self.token2) + self.helper.send(room_id, body="Helloooooo!", tok=self.token1) + self.helper.join(room_id, self.user2, tok=self.token2) + self.helper.send(room_id, body="Helloooooo!!", tok=self.token1) + + writer = Mock() + + self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + + writer.write_events.assert_called_once() + + # Since we joined/left/joined again we expect there to be two gaps. + self.assertEqual(writer.write_state.call_count, 2) + + written_events = [] + for (called_room_id, events), _ in writer.write_events.call_args_list: + self.assertEqual(called_room_id, room_id) + written_events.extend(events) + + # Check that the right number of events were written + counter = Counter( + (event.type, getattr(event, "state_key", None)) for event in written_events + ) + self.assertEqual(counter[(EventTypes.Message, None)], 2) + self.assertEqual(counter[(EventTypes.Member, self.user1)], 1) + self.assertEqual(counter[(EventTypes.Member, self.user2)], 3) + + def test_invite(self): + """Tests that pending invites get handled correctly. + """ + room_id = self.helper.create_room_as(self.user1, tok=self.token1) + self.helper.send(room_id, body="Hello!", tok=self.token1) + self.helper.invite(room_id, self.user1, self.user2, tok=self.token1) + + writer = Mock() + + self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + + writer.write_events.assert_not_called() + writer.write_state.assert_not_called() + writer.write_invite.assert_called_once() + + args = writer.write_invite.call_args[0] + self.assertEqual(args[0], room_id) + self.assertEqual(args[1].content["membership"], "invite") + self.assertTrue(args[2]) # Assert there is at least one bit of state diff --git a/tests/unittest.py b/tests/unittest.py index d26804b5b507..684d5cb1cf4c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -443,7 +443,7 @@ def register_user(self, username, password, admin=False): "POST", "/_matrix/client/r0/admin/register", body.encode("utf8") ) self.render(request) - self.assertEqual(channel.code, 200) + self.assertEqual(channel.code, 200, channel.json_body) user_id = channel.json_body["user_id"] return user_id From 65dd5543f60bcd6c5137fb03ac297c97b9b16426 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Jul 2019 12:10:23 +0100 Subject: [PATCH 02/80] Newsfile --- changelog.d/5589.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5589.feature diff --git a/changelog.d/5589.feature b/changelog.d/5589.feature new file mode 100644 index 000000000000..a87e669dd4ea --- /dev/null +++ b/changelog.d/5589.feature @@ -0,0 +1 @@ +Add ability to pull all locally stored events out of synapse that a particular user can see. From 9f3c0a8556a82960c795b8dc41a4666e0adebfef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Jul 2019 17:55:26 +0100 Subject: [PATCH 03/80] Add basic admin cmd app --- synapse/app/_base.py | 17 +++- synapse/app/admin_cmd.py | 198 +++++++++++++++++++++++++++++++++++++++ synapse/config/_base.py | 48 +++++++++- 3 files changed, 257 insertions(+), 6 deletions(-) create mode 100644 synapse/app/admin_cmd.py diff --git a/synapse/app/_base.py b/synapse/app/_base.py index d50a9840d4f6..200978a58f51 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -48,7 +48,7 @@ def register_sighup(func): _sighup_callbacks.append(func) -def start_worker_reactor(appname, config): +def start_worker_reactor(appname, config, run_command=reactor.run): """ Run the reactor in the main process Daemonizes if necessary, and then configures some resources, before starting @@ -57,6 +57,7 @@ def start_worker_reactor(appname, config): Args: appname (str): application name which will be sent to syslog config (synapse.config.Config): config object + run_command (Callable[]): callable that actually runs the reactor """ logger = logging.getLogger(config.worker_app) @@ -69,11 +70,19 @@ def start_worker_reactor(appname, config): daemonize=config.worker_daemonize, print_pidfile=config.print_pidfile, logger=logger, + run_command=run_command, ) def start_reactor( - appname, soft_file_limit, gc_thresholds, pid_file, daemonize, print_pidfile, logger + appname, + soft_file_limit, + gc_thresholds, + pid_file, + daemonize, + print_pidfile, + logger, + run_command=reactor.run, ): """ Run the reactor in the main process @@ -88,6 +97,7 @@ def start_reactor( daemonize (bool): true to run the reactor in a background process print_pidfile (bool): whether to print the pid file, if daemonize is True logger (logging.Logger): logger instance to pass to Daemonize + run_command (Callable[]): callable that actually runs the reactor """ install_dns_limiter(reactor) @@ -103,7 +113,8 @@ def run(): change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) - reactor.run() + + run_command() if daemonize: if print_pidfile: diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py new file mode 100644 index 000000000000..bd73c47ae20a --- /dev/null +++ b/synapse/app/admin_cmd.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2016 OpenMarket Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import sys + +from twisted.internet import defer, task + +import synapse +from synapse.app import _base +from synapse.config._base import ConfigError +from synapse.config.homeserver import HomeServerConfig +from synapse.config.logger import setup_logging +from synapse.handlers.admin import FileExfiltrationWriter +from synapse.replication.slave.storage._base import BaseSlavedStore +from synapse.replication.slave.storage.account_data import SlavedAccountDataStore +from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore +from synapse.replication.slave.storage.client_ips import SlavedClientIpStore +from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore +from synapse.replication.slave.storage.devices import SlavedDeviceStore +from synapse.replication.slave.storage.events import SlavedEventStore +from synapse.replication.slave.storage.filtering import SlavedFilteringStore +from synapse.replication.slave.storage.groups import SlavedGroupServerStore +from synapse.replication.slave.storage.presence import SlavedPresenceStore +from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore +from synapse.replication.slave.storage.receipts import SlavedReceiptsStore +from synapse.replication.slave.storage.registration import SlavedRegistrationStore +from synapse.replication.slave.storage.room import RoomStore +from synapse.replication.tcp.client import ReplicationClientHandler +from synapse.server import HomeServer +from synapse.storage.engines import create_engine +from synapse.util.logcontext import LoggingContext +from synapse.util.versionstring import get_version_string + +logger = logging.getLogger("synapse.app.admin_cmd") + + +class AdminCmdSlavedStore( + SlavedReceiptsStore, + SlavedAccountDataStore, + SlavedApplicationServiceStore, + SlavedRegistrationStore, + SlavedFilteringStore, + SlavedPresenceStore, + SlavedGroupServerStore, + SlavedDeviceInboxStore, + SlavedDeviceStore, + SlavedPushRuleStore, + SlavedEventStore, + SlavedClientIpStore, + RoomStore, + BaseSlavedStore, +): + pass + + +class AdminCmdServer(HomeServer): + DATASTORE_CLASS = AdminCmdSlavedStore + + def _listen_http(self, listener_config): + pass + + def start_listening(self, listeners): + pass + + def build_tcp_replication(self): + return AdminCmdReplicationHandler(self) + + +class AdminCmdReplicationHandler(ReplicationClientHandler): + @defer.inlineCallbacks + def on_rdata(self, stream_name, token, rows): + pass + + def get_streams_to_replicate(self): + return {} + + +@defer.inlineCallbacks +def export_data_command(hs, user_id, directory): + """Export data for a user. + + Args: + user_id (str) + directory (str|None): Directory to write output to. Will create a temp + directory if not specified. + """ + + res = yield hs.get_handlers().admin_handler.exfiltrate_user_data( + user_id, FileExfiltrationWriter(user_id, directory=directory) + ) + print(res) + + +def start(config_options): + parser = HomeServerConfig.create_argument_parser("Synapse Admin Command") + + subparser = parser.add_subparsers( + title="Admin Commands", + description="Choose and admin command to perform.", + required=True, + dest="command", + metavar="", + help="The admin command to perform.", + ) + export_data_parser = subparser.add_parser( + "export-data", help="Export all data for a user" + ) + export_data_parser.add_argument("user_id", help="User to extra data from") + export_data_parser.add_argument( + "--output-directory", + action="store", + metavar="DIRECTORY", + required=False, + help="The directory to store the exported data in. Must be emtpy. Defaults" + " to creating a temp directory.", + ) + + try: + config, args = HomeServerConfig.load_config_with_parser(parser, config_options) + except ConfigError as e: + sys.stderr.write("\n" + str(e) + "\n") + sys.exit(1) + + if config.worker_app is not None: + assert config.worker_app == "synapse.app.admin_cmd" + + # Update the config with some basic overrides so that don't have to specify + # a full worker config. + config.worker_app = "synapse.app.admin_cmd" + + if ( + not config.worker_daemonize + and not config.worker_log_file + and not config.worker_log_config + ): + # Since we're meant to be run as a "command" let's not redirect stdio + # unless we've actually set log config. + config.no_redirect_stdio = True + + # Explicitly disable background processes + config.update_user_directory = False + config.start_pushers = False + config.send_federation = False + + setup_logging(config, use_worker_options=True) + + synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + + database_engine = create_engine(config.database_config) + + ss = AdminCmdServer( + config.server_name, + db_config=config.database_config, + config=config, + version_string="Synapse/" + get_version_string(synapse), + database_engine=database_engine, + ) + + ss.setup() + + if args.command == "export-data": + command = lambda: export_data_command(ss, args.user_id, args.output_directory) + else: + # This shouldn't happen. + raise ConfigError("Unknown admin command %s" % (args.command,)) + + # We use task.react as the basic run command as it correctly handles tearing + # down the reactor when the deferreds resolve and setting the return value. + # We also make sure that `_base.start` gets run before we actually run the + # command. + + @defer.inlineCallbacks + def run(_reactor): + with LoggingContext("command"): + yield _base.start(ss, []) + yield command() + + _base.start_worker_reactor( + "synapse-admin-cmd", config, run_command=lambda: task.react(run) + ) + + +if __name__ == "__main__": + with LoggingContext("main"): + start(sys.argv[1:]) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 965478d8d59e..14d3f7c1fed7 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -201,6 +201,26 @@ def load_config(cls, description, argv): Returns: Config object. """ + config_parser = cls.create_argument_parser(description) + obj, _ = cls.load_config_with_parser(config_parser, argv) + + return obj + + @classmethod + def create_argument_parser(cls, description): + """Create an ArgumentParser instance with all the config flags. + + Doesn't support config-file-generation: used by the worker apps. + + Used for workers where we want to add extra flags/subcommands. + + Args: + description (str): App description + + Returns: + ArgumentParser + """ + config_parser = argparse.ArgumentParser(description=description) config_parser.add_argument( "-c", @@ -219,9 +239,31 @@ def load_config(cls, description, argv): " Defaults to the directory containing the last config file", ) - obj = cls() + # We can only invoke `add_arguments` on an actual object, but + # `add_arguments` should be side effect free so this is probably fine. + cls().invoke_all("add_arguments", config_parser) - obj.invoke_all("add_arguments", config_parser) + return config_parser + + @classmethod + def load_config_with_parser(cls, config_parser, argv): + """Parse the commandline and config files with the given parser + + Doesn't support config-file-generation: used by the worker apps. + + Used for workers where we want to add extra flags/subcommands. + + Args: + conifg_parser (ArgumentParser) + argv (list[str]) + + Returns: + tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed + config object and the parsed argparse.Namespace object from + `config_parser.parse_args(..)` + """ + + obj = cls() config_args = config_parser.parse_args(argv) @@ -244,7 +286,7 @@ def load_config(cls, description, argv): obj.invoke_all("read_arguments", config_args) - return obj + return obj, config_args @classmethod def load_or_generate_config(cls, description, argv): From 10fe904d88ae19d4214a249d9b24a99eb7edb618 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Jul 2019 17:21:27 +0100 Subject: [PATCH 04/80] Newsfile --- changelog.d/5597.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5597.feature diff --git a/changelog.d/5597.feature b/changelog.d/5597.feature new file mode 100644 index 000000000000..6f92748885b4 --- /dev/null +++ b/changelog.d/5597.feature @@ -0,0 +1 @@ +Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. From d0b849c86d93ace21bdf7f73e1411f33a9e1b2fe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 3 Jul 2019 15:03:38 +0100 Subject: [PATCH 05/80] Apply comment fixups from code review Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/handlers/admin.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index e424fc46bddb..6c905e97a7fc 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -100,7 +100,7 @@ def search_users(self, term): @defer.inlineCallbacks def exfiltrate_user_data(self, user_id, writer): - """Write all data we have of the user to the specified directory. + """Write all data we have on the user to the given writer. Args: user_id (str) @@ -178,7 +178,7 @@ def exfiltrate_user_data(self, user_id, writer): # We fetch events in the room the user could see by fetching *all* # events that we have and then filtering, this isn't the most - # efficient method perhaps but it does guarentee we get everything. + # efficient method perhaps but it does guarantee we get everything. while True: events, _ = yield self.store.paginate_room_events( room_id, from_key, to_key, limit=100, direction="f" @@ -233,7 +233,7 @@ def exfiltrate_user_data(self, user_id, writer): class ExfiltrationWriter(object): - """Interfaced used to specify how to write exfilrated data. + """Interface used to specify how to write exfiltrated data. """ def write_events(self, room_id, events): @@ -263,7 +263,7 @@ def write_invite(self, room_id, event, state): Args: room_id (str) - invite (FrozenEvent) + event (FrozenEvent) state (list[dict]): A subset of the state at the invite, with a subset of the event keys (type, state_key, content and sender) """ @@ -276,13 +276,13 @@ def finished(self): class FileExfiltrationWriter(ExfiltrationWriter): - """An ExfiltrationWriter that writes the users data to a directory. + """An ExfiltrationWriter that writes the user's data to a directory. Returns the directory location on completion. Args: user_id (str): The user whose data is being exfiltrated. - directory (str|None): The directory to write the data to, if None then + directory (str|None): The directory to write the data to. If None then will write to a temporary directory. """ From c061d4f237273f3400dc8e62aa7421f02caec3dd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 4 Jul 2019 11:07:09 +0100 Subject: [PATCH 06/80] Fixup from review comments. --- synapse/handlers/admin.py | 39 ++++++++++++++++++++---------------- tests/handlers/test_admin.py | 10 ++++----- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 6c905e97a7fc..69d2c8c36fd6 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -99,7 +99,7 @@ def search_users(self, term): defer.returnValue(ret) @defer.inlineCallbacks - def exfiltrate_user_data(self, user_id, writer): + def export_user_data(self, user_id, writer): """Write all data we have on the user to the given writer. Args: @@ -107,7 +107,8 @@ def exfiltrate_user_data(self, user_id, writer): writer (ExfiltrationWriter) Returns: - defer.Deferred + defer.Deferred: Resolves when all data for a user has been written. + The returned value is that returned by `writer.finished()`. """ # Get all rooms the user is in or has been in rooms = yield self.store.get_rooms_for_user_where_membership_is( @@ -134,7 +135,7 @@ def exfiltrate_user_data(self, user_id, writer): forgotten = yield self.store.did_forget(user_id, room_id) if forgotten: - logger.info("[%s] User forgot room %d, ignoring", room_id) + logger.info("[%s] User forgot room %d, ignoring", user_id, room_id) continue if room_id not in rooms_user_has_been_in: @@ -172,9 +173,10 @@ def exfiltrate_user_data(self, user_id, writer): # dict[str, set[str]]. event_to_unseen_prevs = {} - # The reverse mapping to above, i.e. map from unseen event to parent - # events. dict[str, set[str]] - unseen_event_to_parents = {} + # The reverse mapping to above, i.e. map from unseen event to events + # that have the unseen event in their prev_events, i.e. the unseen + # events "children". dict[str, set[str]] + unseen_to_child_events = {} # We fetch events in the room the user could see by fetching *all* # events that we have and then filtering, this isn't the most @@ -200,14 +202,14 @@ def exfiltrate_user_data(self, user_id, writer): if unseen_events: event_to_unseen_prevs[event.event_id] = unseen_events for unseen in unseen_events: - unseen_event_to_parents.setdefault(unseen, set()).add( + unseen_to_child_events.setdefault(unseen, set()).add( event.event_id ) # Now check if this event is an unseen prev event, if so # then we remove this event from the appropriate dicts. - for event_id in unseen_event_to_parents.pop(event.event_id, []): - event_to_unseen_prevs.get(event_id, set()).discard( + for child_id in unseen_to_child_events.pop(event.event_id, []): + event_to_unseen_prevs.get(child_id, set()).discard( event.event_id ) @@ -233,7 +235,7 @@ def exfiltrate_user_data(self, user_id, writer): class ExfiltrationWriter(object): - """Interface used to specify how to write exfiltrated data. + """Interface used to specify how to write exported data. """ def write_events(self, room_id, events): @@ -254,7 +256,7 @@ def write_state(self, room_id, event_id, state): Args: room_id (str) event_id (str) - state (list[FrozenEvent]) + state (dict[tuple[str, str], FrozenEvent]) """ pass @@ -264,13 +266,16 @@ def write_invite(self, room_id, event, state): Args: room_id (str) event (FrozenEvent) - state (list[dict]): A subset of the state at the invite, with a - subset of the event keys (type, state_key, content and sender) + state (dict[tuple[str, str], dict]): A subset of the state at the + invite, with a subset of the event keys (type, state_key + content and sender) """ def finished(self): - """Called when exfiltration is complete, and the return valus is passed - to the requester. + """Called when all data has succesfully been exported and written. + + This functions return value is passed to the caller of + `export_user_data`. """ pass @@ -281,7 +286,7 @@ class FileExfiltrationWriter(ExfiltrationWriter): Returns the directory location on completion. Args: - user_id (str): The user whose data is being exfiltrated. + user_id (str): The user whose data is being exported. directory (str|None): The directory to write the data to. If None then will write to a temporary directory. """ @@ -293,7 +298,7 @@ def __init__(self, user_id, directory=None): self.base_directory = directory else: self.base_directory = tempfile.mkdtemp( - prefix="synapse-exfiltrate__%s__" % (user_id,) + prefix="synapse-exported__%s__" % (user_id,) ) os.makedirs(self.base_directory, exist_ok=True) diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index 5e7d2d3361f3..fc37c4328c3a 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -55,7 +55,7 @@ def test_single_public_joined_room(self): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called() @@ -94,7 +94,7 @@ def test_single_private_joined_room(self): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called() @@ -127,7 +127,7 @@ def test_single_left_room(self): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called() @@ -169,7 +169,7 @@ def test_single_left_rejoined_private_room(self): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_called_once() @@ -198,7 +198,7 @@ def test_invite(self): writer = Mock() - self.get_success(self.admin_handler.exfiltrate_user_data(self.user2, writer)) + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) writer.write_events.assert_not_called() writer.write_state.assert_not_called() From 9481707a52a89022bc5d99c181ceb9e7ec9ec9e9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 5 Jul 2019 11:10:19 +0100 Subject: [PATCH 07/80] Fixes to the federation rate limiter (#5621) - Put the default window_size back to 1000ms (broken by #5181) - Make the `rc_federation` config actually do something - fix an off-by-one error in the 'concurrent' limit - Avoid creating an unused `_PerHostRatelimiter` object for every single incoming request --- changelog.d/5621.bugfix | 1 + synapse/config/ratelimiting.py | 4 +- synapse/util/ratelimitutils.py | 16 ++--- tests/config/test_ratelimiting.py | 40 +++++++++++++ tests/util/test_ratelimitutils.py | 97 +++++++++++++++++++++++++++++++ tests/utils.py | 6 -- 6 files changed, 148 insertions(+), 16 deletions(-) create mode 100644 changelog.d/5621.bugfix create mode 100644 tests/config/test_ratelimiting.py create mode 100644 tests/util/test_ratelimitutils.py diff --git a/changelog.d/5621.bugfix b/changelog.d/5621.bugfix new file mode 100644 index 000000000000..f1a2851f45ef --- /dev/null +++ b/changelog.d/5621.bugfix @@ -0,0 +1 @@ +Various minor fixes to the federation request rate limiter. diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 8c587f3fd2f8..33f31cf21303 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -23,7 +23,7 @@ def __init__(self, config, defaults={"per_second": 0.17, "burst_count": 3.0}): class FederationRateLimitConfig(object): _items_and_default = { - "window_size": 10000, + "window_size": 1000, "sleep_limit": 10, "sleep_delay": 500, "reject_limit": 50, @@ -54,7 +54,7 @@ def read_config(self, config, **kwargs): # Load the new-style federation config, if it exists. Otherwise, fall # back to the old method. - if "federation_rc" in config: + if "rc_federation" in config: self.rc_federation = FederationRateLimitConfig(**config["rc_federation"]) else: self.rc_federation = FederationRateLimitConfig( diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 27bceac00ef6..5ca4521ce36c 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -36,9 +36,11 @@ def __init__(self, clock, config): clock (Clock) config (FederationRateLimitConfig) """ - self.clock = clock - self._config = config - self.ratelimiters = {} + + def new_limiter(): + return _PerHostRatelimiter(clock=clock, config=config) + + self.ratelimiters = collections.defaultdict(new_limiter) def ratelimit(self, host): """Used to ratelimit an incoming request from given host @@ -53,11 +55,9 @@ def ratelimit(self, host): host (str): Origin of incoming request. Returns: - _PerHostRatelimiter + context manager which returns a deferred. """ - return self.ratelimiters.setdefault( - host, _PerHostRatelimiter(clock=self.clock, config=self._config) - ).ratelimit() + return self.ratelimiters[host].ratelimit() class _PerHostRatelimiter(object): @@ -122,7 +122,7 @@ def _on_enter(self, request_id): self.request_times.append(time_now) def queue_request(): - if len(self.current_processing) > self.concurrent_requests: + if len(self.current_processing) >= self.concurrent_requests: queue_defer = defer.Deferred() self.ready_request_queue[request_id] = queue_defer logger.info( diff --git a/tests/config/test_ratelimiting.py b/tests/config/test_ratelimiting.py new file mode 100644 index 000000000000..13ab282384e3 --- /dev/null +++ b/tests/config/test_ratelimiting.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.config.homeserver import HomeServerConfig + +from tests.unittest import TestCase +from tests.utils import default_config + + +class RatelimitConfigTestCase(TestCase): + def test_parse_rc_federation(self): + config_dict = default_config("test") + config_dict["rc_federation"] = { + "window_size": 20000, + "sleep_limit": 693, + "sleep_delay": 252, + "reject_limit": 198, + "concurrent": 7, + } + + config = HomeServerConfig() + config.parse_config_dict(config_dict, "", "") + config_obj = config.rc_federation + + self.assertEqual(config_obj.window_size, 20000) + self.assertEqual(config_obj.sleep_limit, 693) + self.assertEqual(config_obj.sleep_delay, 252) + self.assertEqual(config_obj.reject_limit, 198) + self.assertEqual(config_obj.concurrent, 7) diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py new file mode 100644 index 000000000000..4d1aee91d537 --- /dev/null +++ b/tests/util/test_ratelimitutils.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.config.homeserver import HomeServerConfig +from synapse.util.ratelimitutils import FederationRateLimiter + +from tests.server import get_clock +from tests.unittest import TestCase +from tests.utils import default_config + + +class FederationRateLimiterTestCase(TestCase): + def test_ratelimit(self): + """A simple test with the default values""" + reactor, clock = get_clock() + rc_config = build_rc_config() + ratelimiter = FederationRateLimiter(clock, rc_config) + + with ratelimiter.ratelimit("testhost") as d1: + # shouldn't block + self.successResultOf(d1) + + def test_concurrent_limit(self): + """Test what happens when we hit the concurrent limit""" + reactor, clock = get_clock() + rc_config = build_rc_config({"rc_federation": {"concurrent": 2}}) + ratelimiter = FederationRateLimiter(clock, rc_config) + + with ratelimiter.ratelimit("testhost") as d1: + # shouldn't block + self.successResultOf(d1) + + cm2 = ratelimiter.ratelimit("testhost") + d2 = cm2.__enter__() + # also shouldn't block + self.successResultOf(d2) + + cm3 = ratelimiter.ratelimit("testhost") + d3 = cm3.__enter__() + # this one should block, though ... + self.assertNoResult(d3) + + # ... until we complete an earlier request + cm2.__exit__(None, None, None) + self.successResultOf(d3) + + def test_sleep_limit(self): + """Test what happens when we hit the sleep limit""" + reactor, clock = get_clock() + rc_config = build_rc_config( + {"rc_federation": {"sleep_limit": 2, "sleep_delay": 500}} + ) + ratelimiter = FederationRateLimiter(clock, rc_config) + + with ratelimiter.ratelimit("testhost") as d1: + # shouldn't block + self.successResultOf(d1) + + with ratelimiter.ratelimit("testhost") as d2: + # nor this + self.successResultOf(d2) + + with ratelimiter.ratelimit("testhost") as d3: + # this one should block, though ... + self.assertNoResult(d3) + sleep_time = _await_resolution(reactor, d3) + self.assertAlmostEqual(sleep_time, 500, places=3) + + +def _await_resolution(reactor, d): + """advance the clock until the deferred completes. + + Returns the number of milliseconds it took to complete. + """ + start_time = reactor.seconds() + while not d.called: + reactor.advance(0.01) + return (reactor.seconds() - start_time) * 1000 + + +def build_rc_config(settings={}): + config_dict = default_config("test") + config_dict.update(settings) + config = HomeServerConfig() + config.parse_config_dict(config_dict, "", "") + return config.rc_federation diff --git a/tests/utils.py b/tests/utils.py index d8e55b0801de..8a94ce0b475d 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -152,12 +152,6 @@ def default_config(name, parse=False): "mau_stats_only": False, "mau_limits_reserved_threepids": [], "admin_contact": None, - "rc_federation": { - "reject_limit": 10, - "sleep_limit": 10, - "sleep_delay": 10, - "concurrent": 10, - }, "rc_message": {"per_second": 10000, "burst_count": 10000}, "rc_registration": {"per_second": 10000, "burst_count": 10000}, "rc_login": { From a6a776f3d80e441d416c384b7ad9344ee1967d31 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 5 Jul 2019 12:59:42 +0100 Subject: [PATCH 08/80] remove dead transaction persist code (#5622) this hasn't done anything for years --- changelog.d/5622.misc | 1 + synapse/federation/persistence.py | 32 ------------------- .../federation/sender/transaction_manager.py | 9 ------ synapse/storage/transactions.py | 28 ---------------- 4 files changed, 1 insertion(+), 69 deletions(-) create mode 100644 changelog.d/5622.misc diff --git a/changelog.d/5622.misc b/changelog.d/5622.misc new file mode 100644 index 000000000000..9f0a87311c27 --- /dev/null +++ b/changelog.d/5622.misc @@ -0,0 +1 @@ +Remove dead code for persiting outgoing federation transactions. diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index d086e0424304..44edcabed472 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -21,8 +21,6 @@ import logging -from twisted.internet import defer - from synapse.logging.utils import log_function logger = logging.getLogger(__name__) @@ -63,33 +61,3 @@ def set_response(self, origin, transaction, code, response): return self.store.set_received_txn_response( transaction.transaction_id, origin, code, response ) - - @defer.inlineCallbacks - @log_function - def prepare_to_send(self, transaction): - """ Persists the `Transaction` we are about to send and works out the - correct value for the `prev_ids` key. - - Returns: - Deferred - """ - transaction.prev_ids = yield self.store.prep_send_transaction( - transaction.transaction_id, - transaction.destination, - transaction.origin_server_ts, - ) - - @log_function - def delivered(self, transaction, response_code, response_dict): - """ Marks the given `Transaction` as having been successfully - delivered to the remote homeserver, and what the response was. - - Returns: - Deferred - """ - return self.store.delivered_txn( - transaction.transaction_id, - transaction.destination, - response_code, - response_dict, - ) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index c987bb9a0d3c..0460a8c4acc6 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -63,8 +63,6 @@ def send_new_transaction(self, destination, pending_pdus, pending_edus): len(edus), ) - logger.debug("TX [%s] Persisting transaction...", destination) - transaction = Transaction.create_new( origin_server_ts=int(self.clock.time_msec()), transaction_id=txn_id, @@ -76,9 +74,6 @@ def send_new_transaction(self, destination, pending_pdus, pending_edus): self._next_txn_id += 1 - yield self._transaction_actions.prepare_to_send(transaction) - - logger.debug("TX [%s] Persisted transaction", destination) logger.info( "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)", destination, @@ -118,10 +113,6 @@ def json_data_cb(): logger.info("TX [%s] {%s} got %d response", destination, txn_id, code) - yield self._transaction_actions.delivered(transaction, code, response) - - logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id) - if code == 200: for e_id, r in response.get("pdus", {}).items(): if "error" in r: diff --git a/synapse/storage/transactions.py b/synapse/storage/transactions.py index b1188f6bcb28..fd1861917805 100644 --- a/synapse/storage/transactions.py +++ b/synapse/storage/transactions.py @@ -133,34 +133,6 @@ def set_received_txn_response(self, transaction_id, origin, code, response_dict) desc="set_received_txn_response", ) - def prep_send_transaction(self, transaction_id, destination, origin_server_ts): - """Persists an outgoing transaction and calculates the values for the - previous transaction id list. - - This should be called before sending the transaction so that it has the - correct value for the `prev_ids` key. - - Args: - transaction_id (str) - destination (str) - origin_server_ts (int) - - Returns: - list: A list of previous transaction ids. - """ - return defer.succeed([]) - - def delivered_txn(self, transaction_id, destination, code, response_dict): - """Persists the response for an outgoing transaction. - - Args: - transaction_id (str) - destination (str) - code (int) - response_json (str) - """ - pass - @defer.inlineCallbacks def get_destination_retry_timings(self, destination): """Gets the current retry timings (if any) for a given destination. From 9ccea16d45416397b37fa407709ff455bca415e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 5 Jul 2019 14:07:56 +0100 Subject: [PATCH 09/80] Assume key existence. Update docstrings --- synapse/handlers/admin.py | 4 +--- synapse/storage/stream.py | 16 ++++++++++------ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 69d2c8c36fd6..f06914a378f3 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -209,9 +209,7 @@ def export_user_data(self, user_id, writer): # Now check if this event is an unseen prev event, if so # then we remove this event from the appropriate dicts. for child_id in unseen_to_child_events.pop(event.event_id, []): - event_to_unseen_prevs.get(child_id, set()).discard( - event.event_id - ) + event_to_unseen_prevs[child_id].discard(event.event_id) written_events.add(event.event_id) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index d9482a384339..7b5b3b8c8d31 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -833,7 +833,9 @@ def _paginate_room_events_txn( Returns: Deferred[tuple[list[_EventDictReturn], str]]: Returns the results as a list of _EventDictReturn and a token that points to the end - of the result set. + of the result set. If no events are returned then the end of the + stream has been reached (i.e. there are no events between + `from_token` and `to_token`). """ assert int(limit) >= 0 @@ -905,15 +907,17 @@ def paginate_room_events( only those before direction(char): Either 'b' or 'f' to indicate whether we are paginating forwards or backwards from `from_key`. - limit (int): The maximum number of events to return. Zero or less - means no limit. + limit (int): The maximum number of events to return. event_filter (Filter|None): If provided filters the events to those that match the filter. Returns: - tuple[list[dict], str]: Returns the results as a list of dicts and - a token that points to the end of the result set. The dicts have - the keys "event_id", "topological_ordering" and "stream_orderign". + tuple[list[FrozenEvents], str]: Returns the results as a list of + dicts and a token that points to the end of the result set. The + dicts have the keys "event_id", "topological_ordering" and + "stream_ordering". If no events are returned then the end of the + stream has been reached (i.e. there are no events between + `from_key` and `to_key`). """ from_key = RoomStreamToken.parse(from_key) From eadb13d2e9caaa391f4efe2609a7d54d1723d311 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 5 Jul 2019 14:15:00 +0100 Subject: [PATCH 10/80] Remove FileExfiltrationWriter --- synapse/handlers/admin.py | 63 --------------------------------------- 1 file changed, 63 deletions(-) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index f06914a378f3..5ff02c12bfa0 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -276,66 +276,3 @@ def finished(self): `export_user_data`. """ pass - - -class FileExfiltrationWriter(ExfiltrationWriter): - """An ExfiltrationWriter that writes the user's data to a directory. - - Returns the directory location on completion. - - Args: - user_id (str): The user whose data is being exported. - directory (str|None): The directory to write the data to. If None then - will write to a temporary directory. - """ - - def __init__(self, user_id, directory=None): - self.user_id = user_id - - if directory: - self.base_directory = directory - else: - self.base_directory = tempfile.mkdtemp( - prefix="synapse-exported__%s__" % (user_id,) - ) - - os.makedirs(self.base_directory, exist_ok=True) - if list(os.listdir(self.base_directory)): - raise Exception("Directory must be empty") - - def write_events(self, room_id, events): - room_directory = os.path.join(self.base_directory, "rooms", room_id) - os.makedirs(room_directory, exist_ok=True) - events_file = os.path.join(room_directory, "events") - - with open(events_file, "a") as f: - for event in events: - print(json.dumps(event.get_pdu_json()), file=f) - - def write_state(self, room_id, event_id, state): - room_directory = os.path.join(self.base_directory, "rooms", room_id) - state_directory = os.path.join(room_directory, "state") - os.makedirs(state_directory, exist_ok=True) - - event_file = os.path.join(state_directory, event_id) - - with open(event_file, "a") as f: - for event in state.values(): - print(json.dumps(event.get_pdu_json()), file=f) - - def write_invite(self, room_id, event, state): - self.write_events(room_id, [event]) - - # We write the invite state somewhere else as they aren't full events - # and are only a subset of the state at the event. - room_directory = os.path.join(self.base_directory, "rooms", room_id) - os.makedirs(room_directory, exist_ok=True) - - invite_state = os.path.join(room_directory, "invite_state") - - with open(invite_state, "a") as f: - for event in state.values(): - print(json.dumps(event), file=f) - - def finished(self): - return self.base_directory From b4f5416dd9bd7635a4b859e3d13eaee992096ef7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 5 Jul 2019 14:41:29 +0100 Subject: [PATCH 11/80] pep8 --- synapse/handlers/admin.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 5ff02c12bfa0..e8a651e231b8 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -14,10 +14,6 @@ # limitations under the License. import logging -import os -import tempfile - -from canonicaljson import json from twisted.internet import defer From 80cc82a4459e4bbc926c6a57ae5e29278c2f9439 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 5 Jul 2019 16:47:58 +0100 Subject: [PATCH 12/80] Remove support for invite_3pid_guest. (#5625) This has never been documented, and I'm not sure it's ever been used outside sytest. It's quite a lot of poorly-maintained code, so I'd like to get rid of it. For now I haven't removed the database table; I suggest we leave that for a future clearout. --- changelog.d/5625.removal | 1 + synapse/config/registration.py | 5 +- synapse/handlers/register.py | 30 ------------ synapse/handlers/room_member.py | 39 ---------------- synapse/handlers/room_member_worker.py | 12 ----- synapse/replication/http/membership.py | 65 -------------------------- synapse/storage/registration.py | 47 ------------------- 7 files changed, 3 insertions(+), 196 deletions(-) create mode 100644 changelog.d/5625.removal diff --git a/changelog.d/5625.removal b/changelog.d/5625.removal new file mode 100644 index 000000000000..d33a778d695d --- /dev/null +++ b/changelog.d/5625.removal @@ -0,0 +1 @@ +Remove support for the `invite_3pid_guest` configuration setting. diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 4a59e6ec90f6..b895c4e9f4a9 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -71,9 +71,8 @@ def read_config(self, config, **kwargs): self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) - self.invite_3pid_guest = self.allow_guest_access and config.get( - "invite_3pid_guest", False - ) + if config.get("invite_3pid_guest", False): + raise ConfigError("invite_3pid_guest is no longer supported") self.auto_join_rooms = config.get("auto_join_rooms", []) for room_alias in self.auto_join_rooms: diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e487b90c0821..fd55eb16544a 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -556,36 +556,6 @@ def get_or_create_user(self, requester, localpart, displayname, password_hash=No defer.returnValue((user_id, token)) - @defer.inlineCallbacks - def get_or_register_3pid_guest(self, medium, address, inviter_user_id): - """Get a guest access token for a 3PID, creating a guest account if - one doesn't already exist. - - Args: - medium (str) - address (str) - inviter_user_id (str): The user ID who is trying to invite the - 3PID - - Returns: - Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the - 3PID guest account. - """ - access_token = yield self.store.get_3pid_guest_access_token(medium, address) - if access_token: - user_info = yield self.auth.get_user_by_access_token(access_token) - - defer.returnValue((user_info["user"].to_string(), access_token)) - - user_id, access_token = yield self.register( - generate_token=True, make_guest=True - ) - access_token = yield self.store.save_or_get_3pid_guest_access_token( - medium, address, access_token, inviter_user_id - ) - - defer.returnValue((user_id, access_token)) - @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): room_id = None diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 66b05b4732ba..679daaa0749d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -118,24 +118,6 @@ def _remote_reject_invite(self, remote_room_hosts, room_id, target): """ raise NotImplementedError() - @abc.abstractmethod - def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id): - """Get a guest access token for a 3PID, creating a guest account if - one doesn't already exist. - - Args: - requester (Requester) - medium (str) - address (str) - inviter_user_id (str): The user ID who is trying to invite the - 3PID - - Returns: - Deferred[(str, str)]: A 2-tuple of `(user_id, access_token)` of the - 3PID guest account. - """ - raise NotImplementedError() - @abc.abstractmethod def _user_joined_room(self, target, room_id): """Notifies distributor on master process that the user has joined the @@ -890,21 +872,6 @@ def _ask_id_server_for_third_party_invite( "sender_avatar_url": inviter_avatar_url, } - if self.config.invite_3pid_guest: - guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest( - requester=requester, - medium=medium, - address=address, - inviter_user_id=inviter_user_id, - ) - - invite_config.update( - { - "guest_access_token": guest_access_token, - "guest_user_id": guest_user_id, - } - ) - data = yield self.simple_http_client.post_urlencoded_get_json( is_url, invite_config ) @@ -1010,12 +977,6 @@ def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): yield self.store.locally_reject_invite(target.to_string(), room_id) defer.returnValue({}) - def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id): - """Implements RoomMemberHandler.get_or_register_3pid_guest - """ - rg = self.registration_handler - return rg.get_or_register_3pid_guest(medium, address, inviter_user_id) - def _user_joined_room(self, target, room_id): """Implements RoomMemberHandler._user_joined_room """ diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index da501f38c04e..fc873a3ba651 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -20,7 +20,6 @@ from synapse.api.errors import SynapseError from synapse.handlers.room_member import RoomMemberHandler from synapse.replication.http.membership import ( - ReplicationRegister3PIDGuestRestServlet as Repl3PID, ReplicationRemoteJoinRestServlet as ReplRemoteJoin, ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite, ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft, @@ -33,7 +32,6 @@ class RoomMemberWorkerHandler(RoomMemberHandler): def __init__(self, hs): super(RoomMemberWorkerHandler, self).__init__(hs) - self._get_register_3pid_client = Repl3PID.make_client(hs) self._remote_join_client = ReplRemoteJoin.make_client(hs) self._remote_reject_client = ReplRejectInvite.make_client(hs) self._notify_change_client = ReplJoinedLeft.make_client(hs) @@ -80,13 +78,3 @@ def _user_left_room(self, target, room_id): return self._notify_change_client( user_id=target.to_string(), room_id=room_id, change="left" ) - - def get_or_register_3pid_guest(self, requester, medium, address, inviter_user_id): - """Implements RoomMemberHandler.get_or_register_3pid_guest - """ - return self._get_register_3pid_client( - requester=requester, - medium=medium, - address=address, - inviter_user_id=inviter_user_id, - ) diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 0a76a3762f1c..2d9cbbaefc24 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -156,70 +156,6 @@ def _handle_request(self, request, room_id, user_id): defer.returnValue((200, ret)) -class ReplicationRegister3PIDGuestRestServlet(ReplicationEndpoint): - """Gets/creates a guest account for given 3PID. - - Request format: - - POST /_synapse/replication/get_or_register_3pid_guest/ - - { - "requester": ..., - "medium": ..., - "address": ..., - "inviter_user_id": ... - } - """ - - NAME = "get_or_register_3pid_guest" - PATH_ARGS = () - - def __init__(self, hs): - super(ReplicationRegister3PIDGuestRestServlet, self).__init__(hs) - - self.registeration_handler = hs.get_registration_handler() - self.store = hs.get_datastore() - self.clock = hs.get_clock() - - @staticmethod - def _serialize_payload(requester, medium, address, inviter_user_id): - """ - Args: - requester(Requester) - medium (str) - address (str) - inviter_user_id (str): The user ID who is trying to invite the - 3PID - """ - return { - "requester": requester.serialize(), - "medium": medium, - "address": address, - "inviter_user_id": inviter_user_id, - } - - @defer.inlineCallbacks - def _handle_request(self, request): - content = parse_json_object_from_request(request) - - medium = content["medium"] - address = content["address"] - inviter_user_id = content["inviter_user_id"] - - requester = Requester.deserialize(self.store, content["requester"]) - - if requester.user: - request.authenticated_entity = requester.user.to_string() - - logger.info("get_or_register_3pid_guest: %r", content) - - ret = yield self.registeration_handler.get_or_register_3pid_guest( - medium, address, inviter_user_id - ) - - defer.returnValue((200, ret)) - - class ReplicationUserJoinedLeftRoomRestServlet(ReplicationEndpoint): """Notifies that a user has joined or left the room @@ -272,5 +208,4 @@ def _handle_request(self, request, room_id, user_id, change): def register_servlets(hs, http_server): ReplicationRemoteJoinRestServlet(hs).register(http_server) ReplicationRemoteRejectInviteRestServlet(hs).register(http_server) - ReplicationRegister3PIDGuestRestServlet(hs).register(http_server) ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 13a3d5208bbf..aea5b3276bbc 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -432,19 +432,6 @@ def _find_next_generated_user_id(txn): ) ) - @defer.inlineCallbacks - def get_3pid_guest_access_token(self, medium, address): - ret = yield self._simple_select_one( - "threepid_guest_access_tokens", - {"medium": medium, "address": address}, - ["guest_access_token"], - True, - "get_3pid_guest_access_token", - ) - if ret: - defer.returnValue(ret["guest_access_token"]) - defer.returnValue(None) - @defer.inlineCallbacks def get_user_id_by_threepid(self, medium, address, require_verified=False): """Returns user id from threepid @@ -979,40 +966,6 @@ def is_guest(self, user_id): defer.returnValue(res if res else False) - @defer.inlineCallbacks - def save_or_get_3pid_guest_access_token( - self, medium, address, access_token, inviter_user_id - ): - """ - Gets the 3pid's guest access token if exists, else saves access_token. - - Args: - medium (str): Medium of the 3pid. Must be "email". - address (str): 3pid address. - access_token (str): The access token to persist if none is - already persisted. - inviter_user_id (str): User ID of the inviter. - - Returns: - deferred str: Whichever access token is persisted at the end - of this function call. - """ - - def insert(txn): - txn.execute( - "INSERT INTO threepid_guest_access_tokens " - "(medium, address, guest_access_token, first_inviter) " - "VALUES (?, ?, ?, ?)", - (medium, address, access_token, inviter_user_id), - ) - - try: - yield self.runInteraction("save_3pid_guest_access_token", insert) - defer.returnValue(access_token) - except self.database_engine.module.IntegrityError: - ret = yield self.get_3pid_guest_access_token(medium, address) - defer.returnValue(ret) - def add_user_pending_deactivation(self, user_id): """ Adds a user to the table of users who need to be parted from all the rooms they're From ad8b909ce98cce5e955b10a3cbe04f3e868ecb70 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 5 Jul 2019 17:20:02 +0100 Subject: [PATCH 13/80] Add origin_server_ts and sender fields to m.replace (#5613) Riot team would like some extra fields as part of m.replace, so here you go. Fixes: #5598 --- changelog.d/5613.feature | 1 + synapse/events/utils.py | 6 ++++- tests/rest/client/v2_alpha/test_relations.py | 24 +++++++++++++++----- 3 files changed, 24 insertions(+), 7 deletions(-) create mode 100644 changelog.d/5613.feature diff --git a/changelog.d/5613.feature b/changelog.d/5613.feature new file mode 100644 index 000000000000..4b7bb2745c19 --- /dev/null +++ b/changelog.d/5613.feature @@ -0,0 +1 @@ +Add `sender` and `origin_server_ts` fields to `m.replace`. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index f24f0c16f001..987de5cab7a3 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -392,7 +392,11 @@ def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs): serialized_event["content"].pop("m.relates_to", None) r = serialized_event["unsigned"].setdefault("m.relations", {}) - r[RelationTypes.REPLACE] = {"event_id": edit.event_id} + r[RelationTypes.REPLACE] = { + "event_id": edit.event_id, + "origin_server_ts": edit.origin_server_ts, + "sender": edit.sender, + } defer.returnValue(serialized_event) diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 3deeed3a70f6..6bb7d926385e 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -466,9 +466,15 @@ def test_edit(self): self.assertEquals(channel.json_body["content"], new_body) - self.assertEquals( - channel.json_body["unsigned"].get("m.relations"), - {RelationTypes.REPLACE: {"event_id": edit_event_id}}, + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) def test_multi_edit(self): @@ -518,9 +524,15 @@ def test_multi_edit(self): self.assertEquals(channel.json_body["content"], new_body) - self.assertEquals( - channel.json_body["unsigned"].get("m.relations"), - {RelationTypes.REPLACE: {"event_id": edit_event_id}}, + relations_dict = channel.json_body["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in ["event_id", "sender", "origin_server_ts"]: + self.assertIn(key, m_replace_dict) + + self.assert_dict( + {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) def _send_relation( From 9b1b79f3f59a93cde0d6f973e40515f65ff1b3b8 Mon Sep 17 00:00:00 2001 From: "J. Ryan Stinnett" Date: Fri, 5 Jul 2019 17:37:52 +0100 Subject: [PATCH 14/80] Add default push rule to ignore reactions (#5623) This adds a default push rule following the proposal in [MSC2153](https://github.com/matrix-org/matrix-doc/pull/2153). See also https://github.com/vector-im/riot-web/issues/10208 See also https://github.com/matrix-org/matrix-js-sdk/pull/976 --- changelog.d/5623.feature | 1 + synapse/push/baserules.py | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelog.d/5623.feature diff --git a/changelog.d/5623.feature b/changelog.d/5623.feature new file mode 100644 index 000000000000..b73080e88d29 --- /dev/null +++ b/changelog.d/5623.feature @@ -0,0 +1 @@ +Add default push rule to ignore reactions. diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 96d087de226a..134bf805eb8d 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -1,5 +1,6 @@ # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -248,6 +249,18 @@ def make_base_prepend_rules(kind, modified_base_rules): ], "actions": ["notify", {"set_tweak": "highlight", "value": True}], }, + { + "rule_id": "global/override/.m.rule.reaction", + "conditions": [ + { + "kind": "event_match", + "key": "type", + "pattern": "m.reaction", + "_id": "_reaction", + } + ], + "actions": ["dont_notify"], + }, ] From 589d43d9cd80d6a780f1bf8b99ae396efe23aae8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 8 Jul 2019 12:53:33 +0100 Subject: [PATCH 15/80] Add a few more common environment directory names to black exclusion (#5630) * Add a few more common environment directory names to black exclusion * Add changelog --- changelog.d/5630.misc | 1 + pyproject.toml | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/5630.misc diff --git a/changelog.d/5630.misc b/changelog.d/5630.misc new file mode 100644 index 000000000000..f112d873eb80 --- /dev/null +++ b/changelog.d/5630.misc @@ -0,0 +1 @@ +Add some more common python virtual-environment paths to the black exclusion list. diff --git a/pyproject.toml b/pyproject.toml index ec23258da8db..ef329aab41a3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,8 @@ exclude = ''' | \.git # root of the project | \.tox | \.venv + | \.env + | env | _build | _trial_temp.* | build From 1a807dfe6855b15d8574eb92541dd84b946f16bd Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 8 Jul 2019 14:19:39 +0100 Subject: [PATCH 16/80] Use application/json when querying the IS's /store-invite endpoint --- synapse/handlers/room_member.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 4d6e8838025c..b050967b8f8c 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -29,7 +29,7 @@ import synapse.server import synapse.types from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import AuthError, Codes, SynapseError +from synapse.api.errors import AuthError, Codes, HttpResponseException, SynapseError from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room @@ -904,9 +904,22 @@ def _ask_id_server_for_third_party_invite( } ) - data = yield self.simple_http_client.post_urlencoded_get_json( - is_url, invite_config - ) + try: + data = yield self.simple_http_client.post_json_get_json( + is_url, invite_config + ) + except HttpResponseException as e: + # Some identity servers may only support application/x-www-form-urlencoded + # types. This is especially true with old instances of Sydent, see + # https://github.com/matrix-org/sydent/pull/170 + logger.info( + "Failed to POST %s with JSON, falling back to urlencoded form: %s", + is_url, e, + ) + data = yield self.simple_http_client.post_urlencoded_get_json( + is_url, invite_config + ) + # TODO: Check for success token = data["token"] public_keys = data.get("public_keys", []) From f05c7d62bc2bb504f0048adf6e68decdbe31f80d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 8 Jul 2019 14:29:27 +0100 Subject: [PATCH 17/80] Lint --- synapse/handlers/room_member.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index b050967b8f8c..c3420b4b2242 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -914,7 +914,8 @@ def _ask_id_server_for_third_party_invite( # https://github.com/matrix-org/sydent/pull/170 logger.info( "Failed to POST %s with JSON, falling back to urlencoded form: %s", - is_url, e, + is_url, + e, ) data = yield self.simple_http_client.post_urlencoded_get_json( is_url, invite_config From 1af2fcd492c81075a9d75a5578d8d599af3cea0c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 8 Jul 2019 14:52:26 +0100 Subject: [PATCH 18/80] Move get_or_create_user to test code (#5628) This is only used in tests, so... --- changelog.d/5628.misc | 1 + synapse/handlers/register.py | 51 ------------------------- tests/handlers/test_register.py | 68 ++++++++++++++++++++++++++++----- 3 files changed, 60 insertions(+), 60 deletions(-) create mode 100644 changelog.d/5628.misc diff --git a/changelog.d/5628.misc b/changelog.d/5628.misc new file mode 100644 index 000000000000..fec8446793a0 --- /dev/null +++ b/changelog.d/5628.misc @@ -0,0 +1 @@ +Move RegistrationHandler.get_or_create_user to test code. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index fd55eb16544a..853020180be5 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -505,57 +505,6 @@ def _submit_captcha(self, ip_addr, private_key, challenge, response): ) defer.returnValue(data) - @defer.inlineCallbacks - def get_or_create_user(self, requester, localpart, displayname, password_hash=None): - """Creates a new user if the user does not exist, - else revokes all previous access tokens and generates a new one. - - Args: - localpart : The local part of the user ID to register. If None, - one will be randomly generated. - Returns: - A tuple of (user_id, access_token). - Raises: - RegistrationError if there was a problem registering. - - NB this is only used in tests. TODO: move it to the test package! - """ - if localpart is None: - raise SynapseError(400, "Request must include user id") - yield self.auth.check_auth_blocking() - need_register = True - - try: - yield self.check_username(localpart) - except SynapseError as e: - if e.errcode == Codes.USER_IN_USE: - need_register = False - else: - raise - - user = UserID(localpart, self.hs.hostname) - user_id = user.to_string() - token = self.macaroon_gen.generate_access_token(user_id) - - if need_register: - yield self.register_with_store( - user_id=user_id, - token=token, - password_hash=password_hash, - create_profile_with_displayname=user.localpart, - ) - else: - yield self._auth_handler.delete_access_tokens_for_user(user_id) - yield self.store.add_access_token_to_user(user_id=user_id, token=token) - - if displayname is not None: - logger.info("setting user display name: %s -> %s", user_id, displayname) - yield self.profile_handler.set_displayname( - user, requester, displayname, by_admin=True - ) - - defer.returnValue((user_id, token)) - @defer.inlineCallbacks def _join_user_to_room(self, requester, room_identifier): room_id = None diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 4edce7af435f..1c7ded73973f 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.constants import UserTypes -from synapse.api.errors import ResourceLimitError, SynapseError +from synapse.api.errors import Codes, ResourceLimitError, SynapseError from synapse.handlers.register import RegistrationHandler from synapse.types import RoomAlias, UserID, create_requester @@ -67,7 +67,7 @@ def test_user_is_created_and_logged_in_if_doesnt_exist(self): user_id = frank.to_string() requester = create_requester(user_id) result_user_id, result_token = self.get_success( - self.handler.get_or_create_user(requester, frank.localpart, "Frankie") + self.get_or_create_user(requester, frank.localpart, "Frankie") ) self.assertEquals(result_user_id, user_id) self.assertTrue(result_token is not None) @@ -87,7 +87,7 @@ def test_if_user_exists(self): user_id = frank.to_string() requester = create_requester(user_id) result_user_id, result_token = self.get_success( - self.handler.get_or_create_user(requester, local_part, None) + self.get_or_create_user(requester, local_part, None) ) self.assertEquals(result_user_id, user_id) self.assertTrue(result_token is not None) @@ -95,9 +95,7 @@ def test_if_user_exists(self): def test_mau_limits_when_disabled(self): self.hs.config.limit_usage_by_mau = False # Ensure does not throw exception - self.get_success( - self.handler.get_or_create_user(self.requester, "a", "display_name") - ) + self.get_success(self.get_or_create_user(self.requester, "a", "display_name")) def test_get_or_create_user_mau_not_blocked(self): self.hs.config.limit_usage_by_mau = True @@ -105,7 +103,7 @@ def test_get_or_create_user_mau_not_blocked(self): return_value=defer.succeed(self.hs.config.max_mau_value - 1) ) # Ensure does not throw exception - self.get_success(self.handler.get_or_create_user(self.requester, "c", "User")) + self.get_success(self.get_or_create_user(self.requester, "c", "User")) def test_get_or_create_user_mau_blocked(self): self.hs.config.limit_usage_by_mau = True @@ -113,7 +111,7 @@ def test_get_or_create_user_mau_blocked(self): return_value=defer.succeed(self.lots_of_users) ) self.get_failure( - self.handler.get_or_create_user(self.requester, "b", "display_name"), + self.get_or_create_user(self.requester, "b", "display_name"), ResourceLimitError, ) @@ -121,7 +119,7 @@ def test_get_or_create_user_mau_blocked(self): return_value=defer.succeed(self.hs.config.max_mau_value) ) self.get_failure( - self.handler.get_or_create_user(self.requester, "b", "display_name"), + self.get_or_create_user(self.requester, "b", "display_name"), ResourceLimitError, ) @@ -232,3 +230,55 @@ def test_register_not_support_user(self): def test_invalid_user_id_length(self): invalid_user_id = "x" * 256 self.get_failure(self.handler.register(localpart=invalid_user_id), SynapseError) + + @defer.inlineCallbacks + def get_or_create_user(self, requester, localpart, displayname, password_hash=None): + """Creates a new user if the user does not exist, + else revokes all previous access tokens and generates a new one. + + XXX: this used to be in the main codebase, but was only used by this file, + so got moved here. TODO: get rid of it, probably + + Args: + localpart : The local part of the user ID to register. If None, + one will be randomly generated. + Returns: + A tuple of (user_id, access_token). + Raises: + RegistrationError if there was a problem registering. + """ + if localpart is None: + raise SynapseError(400, "Request must include user id") + yield self.hs.get_auth().check_auth_blocking() + need_register = True + + try: + yield self.handler.check_username(localpart) + except SynapseError as e: + if e.errcode == Codes.USER_IN_USE: + need_register = False + else: + raise + + user = UserID(localpart, self.hs.hostname) + user_id = user.to_string() + token = self.macaroon_generator.generate_access_token(user_id) + + if need_register: + yield self.handler.register_with_store( + user_id=user_id, + token=token, + password_hash=password_hash, + create_profile_with_displayname=user.localpart, + ) + else: + yield self.hs.get_auth_handler().delete_access_tokens_for_user(user_id) + yield self.store.add_access_token_to_user(user_id=user_id, token=token) + + if displayname is not None: + # logger.info("setting user display name: %s -> %s", user_id, displayname) + yield self.hs.get_profile_handler().set_displayname( + user, requester, displayname, by_admin=True + ) + + defer.returnValue((user_id, token)) From f9e99f95340c2489ab38c6782ac16440455777f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 8 Jul 2019 14:54:22 +0100 Subject: [PATCH 19/80] Factor out some redundant code in the login impl (#5639) * Factor out some redundant code in the login impl Also fixes a redundant access_token which was generated during jwt login. * changelog --- changelog.d/5639.misc | 1 + synapse/rest/client/v1/login.py | 49 +++++++-------------------------- 2 files changed, 11 insertions(+), 39 deletions(-) create mode 100644 changelog.d/5639.misc diff --git a/changelog.d/5639.misc b/changelog.d/5639.misc new file mode 100644 index 000000000000..413b13128c6f --- /dev/null +++ b/changelog.d/5639.misc @@ -0,0 +1 @@ +Factor out some redundant code in the login implementation. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index f96117823557..b13043cc6408 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -283,19 +283,7 @@ def do_token_login(self, login_submission): yield auth_handler.validate_short_term_login_token_and_get_user_id(token) ) - device_id = login_submission.get("device_id") - initial_display_name = login_submission.get("initial_device_display_name") - device_id, access_token = yield self.registration_handler.register_device( - user_id, device_id, initial_display_name - ) - - result = { - "user_id": user_id, # may have changed - "access_token": access_token, - "home_server": self.hs.hostname, - "device_id": device_id, - } - + result = yield self._register_device_with_callback(user_id, login_submission) defer.returnValue(result) @defer.inlineCallbacks @@ -323,35 +311,18 @@ def do_jwt_login(self, login_submission): raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED) user_id = UserID(user, self.hs.hostname).to_string() - device_id = login_submission.get("device_id") - initial_display_name = login_submission.get("initial_device_display_name") - - auth_handler = self.auth_handler - registered_user_id = yield auth_handler.check_user_exists(user_id) - if registered_user_id: - device_id, access_token = yield self.registration_handler.register_device( - registered_user_id, device_id, initial_display_name - ) - result = { - "user_id": registered_user_id, - "access_token": access_token, - "home_server": self.hs.hostname, - } - else: - user_id, access_token = ( - yield self.registration_handler.register(localpart=user) - ) - device_id, access_token = yield self.registration_handler.register_device( - user_id, device_id, initial_display_name + registered_user_id = yield self.auth_handler.check_user_exists(user_id) + if not registered_user_id: + registered_user_id, _ = ( + yield self.registration_handler.register( + localpart=user, generate_token=False + ) ) - result = { - "user_id": user_id, # may have changed - "access_token": access_token, - "home_server": self.hs.hostname, - } - + result = yield self._register_device_with_callback( + registered_user_id, login_submission + ) defer.returnValue(result) From 4b1f7febc753f9438c314ef3c03d9e3a86715b93 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 8 Jul 2019 14:55:34 +0100 Subject: [PATCH 20/80] Update ModuleApi to avoid register(generate_token=True) (#5640) * Update ModuleApi to avoid register(generate_token=True) This is the only place this is still used, so I'm trying to kill it off. * changelog --- changelog.d/5640.misc | 1 + synapse/module_api/__init__.py | 64 +++++++++++++++++++++++++++++----- 2 files changed, 57 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5640.misc diff --git a/changelog.d/5640.misc b/changelog.d/5640.misc new file mode 100644 index 000000000000..7d69a1b3b640 --- /dev/null +++ b/changelog.d/5640.misc @@ -0,0 +1 @@ +Update ModuleApi to avoid register(generate_token=True). diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index bf43ca09be7c..a0be2c5ca30e 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -12,10 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging + from twisted.internet import defer from synapse.types import UserID +logger = logging.getLogger(__name__) + class ModuleApi(object): """A proxy object that gets passed to password auth providers so they @@ -76,8 +80,13 @@ def check_user_exists(self, user_id): @defer.inlineCallbacks def register(self, localpart, displayname=None, emails=[]): - """Registers a new user with given localpart and optional - displayname, emails. + """Registers a new user with given localpart and optional displayname, emails. + + Also returns an access token for the new user. + + Deprecated: avoid this, as it generates a new device with no way to + return that device to the user. Prefer separate calls to register_user and + register_device. Args: localpart (str): The localpart of the new user. @@ -85,16 +94,55 @@ def register(self, localpart, displayname=None, emails=[]): emails (List[str]): Emails to bind to the new user. Returns: - Deferred: a 2-tuple of (user_id, access_token) + Deferred[tuple[str, str]]: a 2-tuple of (user_id, access_token) """ - # Register the user - reg = self.hs.get_registration_handler() - user_id, access_token = yield reg.register( - localpart=localpart, default_display_name=displayname, bind_emails=emails + logger.warning( + "Using deprecated ModuleApi.register which creates a dummy user device." ) - + user_id = yield self.register_user(localpart, displayname, emails) + _, access_token = yield self.register_device(user_id) defer.returnValue((user_id, access_token)) + @defer.inlineCallbacks + def register_user(self, localpart, displayname=None, emails=[]): + """Registers a new user with given localpart and optional displayname, emails. + + Args: + localpart (str): The localpart of the new user. + displayname (str|None): The displayname of the new user. + emails (List[str]): Emails to bind to the new user. + + Returns: + Deferred[str]: user_id + """ + user_id, _ = yield self.hs.get_registration_handler().register( + localpart=localpart, + default_display_name=displayname, + bind_emails=emails, + generate_token=False, + ) + + defer.returnValue(user_id) + + def register_device(self, user_id, device_id=None, initial_display_name=None): + """Register a device for a user and generate an access token. + + Args: + user_id (str): full canonical @user:id + device_id (str|None): The device ID to check, or None to generate + a new one. + initial_display_name (str|None): An optional display name for the + device. + + Returns: + defer.Deferred[tuple[str, str]]: Tuple of device ID and access token + """ + return self.hs.get_registration_handler().register_device( + user_id=user_id, + device_id=device_id, + initial_display_name=initial_display_name, + ) + @defer.inlineCallbacks def invalidate_access_token(self, access_token): """Invalidate an access token for a user From c142e5d16ad4dc5a4cbc93d7d19b2902be72644b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 8 Jul 2019 14:29:37 +0100 Subject: [PATCH 21/80] Changelog --- changelog.d/5638.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5638.bugfix diff --git a/changelog.d/5638.bugfix b/changelog.d/5638.bugfix new file mode 100644 index 000000000000..66781ad9e687 --- /dev/null +++ b/changelog.d/5638.bugfix @@ -0,0 +1 @@ +Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. From b70e080b59c992197e02c6278984a96706fc6b7f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 8 Jul 2019 17:14:51 +0100 Subject: [PATCH 22/80] Better logging for auto-join. (#5643) It was pretty unclear what was going on, so I've added a couple of log lines. --- changelog.d/5643.misc | 1 + synapse/handlers/register.py | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/5643.misc diff --git a/changelog.d/5643.misc b/changelog.d/5643.misc new file mode 100644 index 000000000000..2b2316469e03 --- /dev/null +++ b/changelog.d/5643.misc @@ -0,0 +1 @@ +Improve logging for auto-join when a new user is created. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 853020180be5..c72735067fec 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -256,8 +256,14 @@ def register( user_id = None token = None attempts += 1 + if not self.hs.config.user_consent_at_registration: yield self._auto_join_rooms(user_id) + else: + logger.info( + "Skipping auto-join for %s because consent is required at registration", + user_id, + ) # Bind any specified emails to this account current_time = self.hs.get_clock().time_msec() @@ -298,6 +304,7 @@ def _auto_join_rooms(self, user_id): count = yield self.store.count_all_users() should_auto_create_rooms = count == 1 for r in self.hs.config.auto_join_rooms: + logger.info("Auto-joining %s to %s", user_id, r) try: if should_auto_create_rooms: room_alias = RoomAlias.from_string(r) From 43d175d17a96663081521b2215d1e26473135d1e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 8 Jul 2019 17:15:17 +0100 Subject: [PATCH 23/80] Unblacklist some user_directory sytests (#5637) Fixes https://github.com/matrix-org/synapse/issues/2306 --- changelog.d/5637.misc | 1 + sytest-blacklist | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/5637.misc diff --git a/changelog.d/5637.misc b/changelog.d/5637.misc new file mode 100644 index 000000000000..f18d6197e51a --- /dev/null +++ b/changelog.d/5637.misc @@ -0,0 +1 @@ +Unblacklist some user_directory sytests. diff --git a/sytest-blacklist b/sytest-blacklist index b760a48c57ca..11785fd43f90 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -24,10 +24,6 @@ Newly created users see their own presence in /initialSync (SYT-34) # Blacklisted due to https://github.com/matrix-org/synapse/issues/1396 Should reject keys claiming to belong to a different user -# Blacklisted due to https://github.com/matrix-org/synapse/issues/2306 -Users appear/disappear from directory when join_rules are changed -Users appear/disappear from directory when history_visibility are changed - # Blacklisted due to https://github.com/matrix-org/synapse/issues/1531 Enabling an unknown default rule fails with 404 From 7556851665ce623ce49f6fd8eaf22c1b4f234b9d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 8 Jul 2019 17:31:00 +0100 Subject: [PATCH 24/80] Allow newly-registered users to lookup their own profiles When a user creates an account and the 'require_auth_for_profile_requests' config flag is set, and a client that performed the registration wants to lookup the newly-created profile, the request will be denied because the user doesn't share a room with themselves yet. --- synapse/handlers/profile.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index d8462b75eca5..a2388a709160 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -303,6 +303,10 @@ def check_profile_query_allowed(self, target_user, requester=None): if not self.hs.config.require_auth_for_profile_requests or not requester: return + # Always allow the user to query their own profile. + if target_user.to_string() == requester.to_string(): + return + try: requester_rooms = yield self.store.get_rooms_for_user(requester.to_string()) target_user_rooms = yield self.store.get_rooms_for_user( From f3615a8aa5f93438629749446a49078aa487f11a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 8 Jul 2019 17:31:58 +0100 Subject: [PATCH 25/80] Changelog --- changelog.d/5644.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5644.bugfix diff --git a/changelog.d/5644.bugfix b/changelog.d/5644.bugfix new file mode 100644 index 000000000000..f6302fd08dd3 --- /dev/null +++ b/changelog.d/5644.bugfix @@ -0,0 +1 @@ +Fix newly-registered users not being able to lookup their own profile without joining a room. From 5e01e9ac1914cff89d54350df5270c1a2b7ccc42 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 8 Jul 2019 17:41:16 +0100 Subject: [PATCH 26/80] Add test case --- tests/rest/client/v1/test_profile.py | 47 ++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index dff9b2f10c55..a76dda950313 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -288,3 +288,50 @@ def ensure_requester_left_room(self): # if the user isn't already in the room), because we only want to # make sure the user isn't in the room. pass + + +class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + profile.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["require_auth_for_profile_requests"] = True + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def prepare(self, reactor, clock, hs): + # User requesting the profile. + self.requester = self.register_user("requester", "pass") + self.requester_tok = self.login("requester", "pass") + + def test_can_lookup_own_profile(self): + """Tests that a user can lookup their own profile without having to be in a room + if 'require_auth_for_profile_requests' is set to true in the server's config. + """ + request, channel = self.make_request( + "GET", "/profile/" + self.requester, access_token=self.requester_tok + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + request, channel = self.make_request( + "GET", + "/profile/" + self.requester + "/displayname", + access_token=self.requester_tok + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + request, channel = self.make_request( + "GET", + "/profile/" + self.requester + "/avatar_url", + access_token=self.requester_tok + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) From 73cb716b3c97f018efe00c6ca7a80b7c6d48c0e1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 8 Jul 2019 17:44:20 +0100 Subject: [PATCH 27/80] Lint --- tests/rest/client/v1/test_profile.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index a76dda950313..140d8b377278 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -323,7 +323,7 @@ def test_can_lookup_own_profile(self): request, channel = self.make_request( "GET", "/profile/" + self.requester + "/displayname", - access_token=self.requester_tok + access_token=self.requester_tok, ) self.render(request) self.assertEqual(channel.code, 200, channel.result) @@ -331,7 +331,7 @@ def test_can_lookup_own_profile(self): request, channel = self.make_request( "GET", "/profile/" + self.requester + "/avatar_url", - access_token=self.requester_tok + access_token=self.requester_tok, ) self.render(request) self.assertEqual(channel.code, 200, channel.result) From 824707383bea618ea809a0f13cf72168b87184f9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 8 Jul 2019 19:01:08 +0100 Subject: [PATCH 28/80] Remove access-token support from RegistrationHandler.register (#5641) Nothing uses this now, so we can remove the dead code, and clean up the API. Since we're changing the shape of the return value anyway, we take the opportunity to give the method a better name. --- changelog.d/5641.misc | 1 + synapse/handlers/register.py | 27 ++---------- synapse/module_api/__init__.py | 10 +---- synapse/replication/http/register.py | 6 --- synapse/rest/admin/__init__.py | 3 +- synapse/rest/client/v1/login.py | 14 ++----- synapse/rest/client/v2_alpha/register.py | 11 +++-- tests/handlers/test_register.py | 53 +++++++++++++----------- 8 files changed, 44 insertions(+), 81 deletions(-) create mode 100644 changelog.d/5641.misc diff --git a/changelog.d/5641.misc b/changelog.d/5641.misc new file mode 100644 index 000000000000..1899bc963d4e --- /dev/null +++ b/changelog.d/5641.misc @@ -0,0 +1 @@ +Remove access-token support from RegistrationHandler.register, and rename it. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c72735067fec..a3e553d5f51d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -138,11 +138,10 @@ def check_username(self, localpart, guest_access_token=None, assigned_user_id=No ) @defer.inlineCallbacks - def register( + def register_user( self, localpart=None, password=None, - generate_token=True, guest_access_token=None, make_guest=False, admin=False, @@ -160,11 +159,6 @@ def register( password (unicode) : The password to assign to this user so they can login again. This can be None which means they cannot login again via a password (e.g. the user is an application service user). - generate_token (bool): Whether a new access token should be - generated. Having this be True should be considered deprecated, - since it offers no means of associating a device_id with the - access_token. Instead you should call auth_handler.issue_access_token - after registration. user_type (str|None): type of user. One of the values from api.constants.UserTypes, or None for a normal user. default_display_name (unicode|None): if set, the new user's displayname @@ -172,7 +166,7 @@ def register( address (str|None): the IP address used to perform the registration. bind_emails (List[str]): list of emails to bind to this account. Returns: - A tuple of (user_id, access_token). + Deferred[str]: user_id Raises: RegistrationError if there was a problem registering. """ @@ -206,12 +200,8 @@ def register( elif default_display_name is None: default_display_name = localpart - token = None - if generate_token: - token = self.macaroon_gen.generate_access_token(user_id) yield self.register_with_store( user_id=user_id, - token=token, password_hash=password_hash, was_guest=was_guest, make_guest=make_guest, @@ -230,21 +220,17 @@ def register( else: # autogen a sequential user ID attempts = 0 - token = None user = None while not user: localpart = yield self._generate_user_id(attempts > 0) user = UserID(localpart, self.hs.hostname) user_id = user.to_string() yield self.check_user_id_not_appservice_exclusive(user_id) - if generate_token: - token = self.macaroon_gen.generate_access_token(user_id) if default_display_name is None: default_display_name = localpart try: yield self.register_with_store( user_id=user_id, - token=token, password_hash=password_hash, make_guest=make_guest, create_profile_with_displayname=default_display_name, @@ -254,7 +240,6 @@ def register( # if user id is taken, just generate another user = None user_id = None - token = None attempts += 1 if not self.hs.config.user_consent_at_registration: @@ -278,7 +263,7 @@ def register( # Bind email to new account yield self._register_email_threepid(user_id, threepid_dict, None, False) - defer.returnValue((user_id, token)) + defer.returnValue(user_id) @defer.inlineCallbacks def _auto_join_rooms(self, user_id): @@ -541,7 +526,6 @@ def _join_user_to_room(self, requester, room_identifier): def register_with_store( self, user_id, - token=None, password_hash=None, was_guest=False, make_guest=False, @@ -555,9 +539,6 @@ def register_with_store( Args: user_id (str): The desired user ID to register. - token (str): The desired access token to use for this user. If this - is not None, the given access token is associated with the user - id. password_hash (str|None): Optional. The password hash for this user. was_guest (bool): Optional. Whether this is a guest account being upgraded to a non-guest account. @@ -593,7 +574,6 @@ def register_with_store( if self.hs.config.worker_app: return self._register_client( user_id=user_id, - token=token, password_hash=password_hash, was_guest=was_guest, make_guest=make_guest, @@ -606,7 +586,6 @@ def register_with_store( else: return self.store.register( user_id=user_id, - token=token, password_hash=password_hash, was_guest=was_guest, make_guest=make_guest, diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index a0be2c5ca30e..7bb020cb45be 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -103,7 +103,6 @@ def register(self, localpart, displayname=None, emails=[]): _, access_token = yield self.register_device(user_id) defer.returnValue((user_id, access_token)) - @defer.inlineCallbacks def register_user(self, localpart, displayname=None, emails=[]): """Registers a new user with given localpart and optional displayname, emails. @@ -115,15 +114,10 @@ def register_user(self, localpart, displayname=None, emails=[]): Returns: Deferred[str]: user_id """ - user_id, _ = yield self.hs.get_registration_handler().register( - localpart=localpart, - default_display_name=displayname, - bind_emails=emails, - generate_token=False, + return self.hs.get_registration_handler().register_user( + localpart=localpart, default_display_name=displayname, bind_emails=emails ) - defer.returnValue(user_id) - def register_device(self, user_id, device_id=None, initial_display_name=None): """Register a device for a user and generate an access token. diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index f81a0f1b8f54..2bf2173895ec 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -38,7 +38,6 @@ def __init__(self, hs): @staticmethod def _serialize_payload( user_id, - token, password_hash, was_guest, make_guest, @@ -51,9 +50,6 @@ def _serialize_payload( """ Args: user_id (str): The desired user ID to register. - token (str): The desired access token to use for this user. If this - is not None, the given access token is associated with the user - id. password_hash (str|None): Optional. The password hash for this user. was_guest (bool): Optional. Whether this is a guest account being upgraded to a non-guest account. @@ -68,7 +64,6 @@ def _serialize_payload( address (str|None): the IP address used to perform the regitration. """ return { - "token": token, "password_hash": password_hash, "was_guest": was_guest, "make_guest": make_guest, @@ -85,7 +80,6 @@ def _handle_request(self, request, user_id): yield self.registration_handler.register_with_store( user_id=user_id, - token=content["token"], password_hash=content["password_hash"], was_guest=content["was_guest"], make_guest=content["make_guest"], diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 9843a902c69d..6888ae559002 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -219,11 +219,10 @@ def on_POST(self, request): register = RegisterRestServlet(self.hs) - (user_id, _) = yield register.registration_handler.register( + user_id = yield register.registration_handler.register_user( localpart=body["username"].lower(), password=body["password"], admin=bool(admin), - generate_token=False, user_type=user_type, ) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index b13043cc6408..0d05945f0aa6 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -314,10 +314,8 @@ def do_jwt_login(self, login_submission): registered_user_id = yield self.auth_handler.check_user_exists(user_id) if not registered_user_id: - registered_user_id, _ = ( - yield self.registration_handler.register( - localpart=user, generate_token=False - ) + registered_user_id = yield self.registration_handler.register_user( + localpart=user ) result = yield self._register_device_with_callback( @@ -505,12 +503,8 @@ def on_successful_auth( user_id = UserID(localpart, self._hostname).to_string() registered_user_id = yield self._auth_handler.check_user_exists(user_id) if not registered_user_id: - registered_user_id, _ = ( - yield self._registration_handler.register( - localpart=localpart, - generate_token=False, - default_display_name=user_display_name, - ) + registered_user_id = yield self._registration_handler.register_user( + localpart=localpart, default_display_name=user_display_name ) login_token = self._macaroon_gen.generate_short_term_login_token( diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5c120e4dd5d8..f327999e5990 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -464,11 +464,10 @@ def on_POST(self, request): Codes.THREEPID_IN_USE, ) - (registered_user_id, _) = yield self.registration_handler.register( + registered_user_id = yield self.registration_handler.register_user( localpart=desired_username, password=new_password, guest_access_token=guest_access_token, - generate_token=False, threepid=threepid, address=client_addr, ) @@ -542,8 +541,8 @@ def _do_shared_secret_registration(self, username, password, body): if not compare_digest(want_mac, got_mac): raise SynapseError(403, "HMAC incorrect") - (user_id, _) = yield self.registration_handler.register( - localpart=username, password=password, generate_token=False + user_id = yield self.registration_handler.register_user( + localpart=username, password=password ) result = yield self._create_registration_details(user_id, body) @@ -577,8 +576,8 @@ def _create_registration_details(self, user_id, params): def _do_guest_registration(self, params, address=None): if not self.hs.config.allow_guest_access: raise SynapseError(403, "Guest access is disabled") - user_id, _ = yield self.registration_handler.register( - generate_token=False, make_guest=True, address=address + user_id = yield self.registration_handler.register_user( + make_guest=True, address=address ) # we don't allow guests to specify their own device_id, because diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 1c7ded73973f..8197f26d4f4d 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -129,21 +129,21 @@ def test_register_mau_blocked(self): return_value=defer.succeed(self.lots_of_users) ) self.get_failure( - self.handler.register(localpart="local_part"), ResourceLimitError + self.handler.register_user(localpart="local_part"), ResourceLimitError ) self.store.get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) ) self.get_failure( - self.handler.register(localpart="local_part"), ResourceLimitError + self.handler.register_user(localpart="local_part"), ResourceLimitError ) def test_auto_create_auto_join_rooms(self): room_alias_str = "#room:test" self.hs.config.auto_join_rooms = [room_alias_str] - res = self.get_success(self.handler.register(localpart="jeff")) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) directory_handler = self.hs.get_handlers().directory_handler room_alias = RoomAlias.from_string(room_alias_str) room_id = self.get_success(directory_handler.get_association(room_alias)) @@ -154,25 +154,25 @@ def test_auto_create_auto_join_rooms(self): def test_auto_create_auto_join_rooms_with_no_rooms(self): self.hs.config.auto_join_rooms = [] frank = UserID.from_string("@frank:test") - res = self.get_success(self.handler.register(frank.localpart)) - self.assertEqual(res[0], frank.to_string()) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(frank.localpart)) + self.assertEqual(user_id, frank.to_string()) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) def test_auto_create_auto_join_where_room_is_another_domain(self): self.hs.config.auto_join_rooms = ["#room:another"] frank = UserID.from_string("@frank:test") - res = self.get_success(self.handler.register(frank.localpart)) - self.assertEqual(res[0], frank.to_string()) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(frank.localpart)) + self.assertEqual(user_id, frank.to_string()) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) def test_auto_create_auto_join_where_auto_create_is_false(self): self.hs.config.autocreate_auto_join_rooms = False room_alias_str = "#room:test" self.hs.config.auto_join_rooms = [room_alias_str] - res = self.get_success(self.handler.register(localpart="jeff")) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) def test_auto_create_auto_join_rooms_when_support_user_exists(self): @@ -180,8 +180,8 @@ def test_auto_create_auto_join_rooms_when_support_user_exists(self): self.hs.config.auto_join_rooms = [room_alias_str] self.store.is_support_user = Mock(return_value=True) - res = self.get_success(self.handler.register(localpart="support")) - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="support")) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) directory_handler = self.hs.get_handlers().directory_handler room_alias = RoomAlias.from_string(room_alias_str) @@ -209,27 +209,31 @@ def test_auto_create_auto_join_where_no_consent(self): # When:- # * the user is registered and post consent actions are called - res = self.get_success(self.handler.register(localpart="jeff")) - self.get_success(self.handler.post_consent_actions(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="jeff")) + self.get_success(self.handler.post_consent_actions(user_id)) # Then:- # * Ensure that they have not been joined to the room - rooms = self.get_success(self.store.get_rooms_for_user(res[0])) + rooms = self.get_success(self.store.get_rooms_for_user(user_id)) self.assertEqual(len(rooms), 0) def test_register_support_user(self): - res = self.get_success( - self.handler.register(localpart="user", user_type=UserTypes.SUPPORT) + user_id = self.get_success( + self.handler.register_user(localpart="user", user_type=UserTypes.SUPPORT) ) - self.assertTrue(self.store.is_support_user(res[0])) + d = self.store.is_support_user(user_id) + self.assertTrue(self.get_success(d)) def test_register_not_support_user(self): - res = self.get_success(self.handler.register(localpart="user")) - self.assertFalse(self.store.is_support_user(res[0])) + user_id = self.get_success(self.handler.register_user(localpart="user")) + d = self.store.is_support_user(user_id) + self.assertFalse(self.get_success(d)) def test_invalid_user_id_length(self): invalid_user_id = "x" * 256 - self.get_failure(self.handler.register(localpart=invalid_user_id), SynapseError) + self.get_failure( + self.handler.register_user(localpart=invalid_user_id), SynapseError + ) @defer.inlineCallbacks def get_or_create_user(self, requester, localpart, displayname, password_hash=None): @@ -267,13 +271,12 @@ def get_or_create_user(self, requester, localpart, displayname, password_hash=No if need_register: yield self.handler.register_with_store( user_id=user_id, - token=token, password_hash=password_hash, create_profile_with_displayname=user.localpart, ) else: yield self.hs.get_auth_handler().delete_access_tokens_for_user(user_id) - yield self.store.add_access_token_to_user(user_id=user_id, token=token) + yield self.store.add_access_token_to_user(user_id=user_id, token=token) if displayname is not None: # logger.info("setting user display name: %s -> %s", user_id, displayname) From d88421ab03e72a6c7f69ca38a57b4b6212f1bc82 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 9 Jul 2019 13:43:08 +0100 Subject: [PATCH 29/80] Include the original event in /relations (#5626) When asking for the relations of an event, include the original event in the response. This will mostly be used for efficiently showing edit history, but could be useful in other circumstances. --- changelog.d/5626.feature | 1 + synapse/rest/client/v2_alpha/relations.py | 8 +++++--- synapse/storage/relations.py | 2 +- tests/rest/client/v2_alpha/test_relations.py | 5 +++++ 4 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5626.feature diff --git a/changelog.d/5626.feature b/changelog.d/5626.feature new file mode 100644 index 000000000000..5ef793b943a0 --- /dev/null +++ b/changelog.d/5626.feature @@ -0,0 +1 @@ +Include the original event when asking for its relations. diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 8e362782cc3c..458afd135f96 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -145,9 +145,9 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non room_id, requester.user.to_string() ) - # This checks that a) the event exists and b) the user is allowed to - # view it. - yield self.event_handler.get_event(requester.user, room_id, parent_id) + # This gets the original event and checks that a) the event exists and + # b) the user is allowed to view it. + event = yield self.event_handler.get_event(requester.user, room_id, parent_id) limit = parse_integer(request, "limit", default=5) from_token = parse_string(request, "from") @@ -173,10 +173,12 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non ) now = self.clock.time_msec() + original_event = yield self._event_serializer.serialize_event(event, now) events = yield self._event_serializer.serialize_events(events, now) return_value = result.to_dict() return_value["chunk"] = events + return_value["original_event"] = original_event defer.returnValue((200, return_value)) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 1b01934c19e8..9954bc094f8d 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -60,7 +60,7 @@ def to_dict(self): class RelationPaginationToken(object): """Pagination token for relation pagination API. - As the results are order by topological ordering, we can use the + As the results are in topological order, we can use the `topological_ordering` and `stream_ordering` fields of the events at the boundaries of the chunk as pagination tokens. diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 6bb7d926385e..58c69518520d 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -126,6 +126,11 @@ def test_basic_paginate_relations(self): channel.json_body["chunk"][0], ) + # We also expect to get the original event (the id of which is self.parent_id) + self.assertEquals( + channel.json_body["original_event"]["event_id"], self.parent_id + ) + # Make sure next_batch has something in it that looks like it could be a # valid token. self.assertIsInstance( From 7b3bc755a34cf97138e614379234cfc47f91a5a9 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Tue, 9 Jul 2019 13:37:39 -0400 Subject: [PATCH 30/80] remove unused and unnecessary check for FederationDeniedError (#5645) FederationDeniedError is a subclass of SynapseError, which is a subclass of CodeMessageException, so if e is a FederationDeniedError, then this check for FederationDeniedError will never be reached since it will be caught by the check for CodeMessageException above. The check for CodeMessageException does almost the same thing as this check (since FederationDeniedError initialises with code=403 and msg="Federation denied with %s."), so may as well just keep allowing it to handle this case. --- changelog.d/5645.misc | 1 + synapse/handlers/e2e_keys.py | 5 +---- 2 files changed, 2 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5645.misc diff --git a/changelog.d/5645.misc b/changelog.d/5645.misc new file mode 100644 index 000000000000..4fa9699e4fae --- /dev/null +++ b/changelog.d/5645.misc @@ -0,0 +1 @@ +Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. \ No newline at end of file diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 55b4ab3a1a9f..fdfe8611b6ca 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -22,7 +22,7 @@ from twisted.internet import defer -from synapse.api.errors import CodeMessageException, FederationDeniedError, SynapseError +from synapse.api.errors import CodeMessageException, SynapseError from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import UserID, get_domain_from_id from synapse.util.retryutils import NotRetryingDestination @@ -350,9 +350,6 @@ def _exception_to_failure(e): if isinstance(e, NotRetryingDestination): return {"status": 503, "message": "Not ready for retry"} - if isinstance(e, FederationDeniedError): - return {"status": 403, "message": "Federation Denied"} - # include ConnectionRefused and other errors # # Note that some Exceptions (notably twisted's ResponseFailed etc) don't From 4d122d295c50ae933def7880d8d9d1e1a7846e6c Mon Sep 17 00:00:00 2001 From: Bruno Windels Date: Wed, 10 Jul 2019 12:55:24 +0000 Subject: [PATCH 31/80] Correct pep517 flag in readme (#5651) --- README.rst | 2 +- changelog.d/5651.doc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5651.doc diff --git a/README.rst b/README.rst index 13e11a57736e..bbff8de5ab99 100644 --- a/README.rst +++ b/README.rst @@ -272,7 +272,7 @@ to install using pip and a virtualenv:: virtualenv -p python3 env source env/bin/activate - python -m pip install --no-pep-517 -e .[all] + python -m pip install --no-use-pep517 -e .[all] This will run a process of downloading and installing all the needed dependencies into a virtual env. diff --git a/changelog.d/5651.doc b/changelog.d/5651.doc new file mode 100644 index 000000000000..e2d5a8dc8ad7 --- /dev/null +++ b/changelog.d/5651.doc @@ -0,0 +1 @@ +--no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. From 3dd61d12cdd66000b9cf078f8f485c0c40e4235e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 10 Jul 2019 14:03:18 +0100 Subject: [PATCH 32/80] Add a linting script (#5627) Add a dev script to cover all the different linting steps. --- changelog.d/5627.misc | 1 + scripts-dev/lint.sh | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 changelog.d/5627.misc create mode 100755 scripts-dev/lint.sh diff --git a/changelog.d/5627.misc b/changelog.d/5627.misc new file mode 100644 index 000000000000..730721b5ef19 --- /dev/null +++ b/changelog.d/5627.misc @@ -0,0 +1 @@ +Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh new file mode 100755 index 000000000000..ebb4d69f8689 --- /dev/null +++ b/scripts-dev/lint.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# +# Runs linting scripts over the local Synapse checkout +# isort - sorts import statements +# flake8 - lints and finds mistakes +# black - opinionated code formatter + +set -e + +isort -y -rc synapse tests scripts-dev scripts +flake8 synapse tests +python3 -m black synapse tests scripts-dev scripts From f28171458342c474e7a091fff022972afb366169 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 10 Jul 2019 14:43:11 +0100 Subject: [PATCH 33/80] Don't bundle aggregations when retrieving the original event (#5654) A fix for PR #5626, which returned the original event content as part of a call to /relations. Only problem was that we were attempting to aggregate the relations on top of it when we did so. We now set bundle_aggregations to False in the get_event call. We also do this when pulling the relation events as well, because edits of edits are not something we'd like to support here. --- changelog.d/5654.bugfix | 1 + synapse/rest/client/v2_alpha/relations.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5654.bugfix diff --git a/changelog.d/5654.bugfix b/changelog.d/5654.bugfix new file mode 100644 index 000000000000..5f76b041cd1c --- /dev/null +++ b/changelog.d/5654.bugfix @@ -0,0 +1 @@ +Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 458afd135f96..7ce485b47124 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -173,8 +173,18 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non ) now = self.clock.time_msec() - original_event = yield self._event_serializer.serialize_event(event, now) - events = yield self._event_serializer.serialize_events(events, now) + # We set bundle_aggregations to False when retrieving the original + # event because we want the content before relations were applied to + # it. + original_event = yield self._event_serializer.serialize_event( + event, now, bundle_aggregations=False + ) + # Similarly, we don't allow relations to be applied to relations, so we + # return the original relations without any aggregations on top of them + # here. + events = yield self._event_serializer.serialize_events( + events, now, bundle_aggregations=False + ) return_value = result.to_dict() return_value["chunk"] = events From f77e99761919db671960aae4792cb563ad2b8e53 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 10 Jul 2019 15:46:42 +0100 Subject: [PATCH 34/80] Send 3PID bind requests as JSON data --- changelog.d/5656.bugfix | 1 + synapse/handlers/identity.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5656.bugfix diff --git a/changelog.d/5656.bugfix b/changelog.d/5656.bugfix new file mode 100644 index 000000000000..f6ae906a9ad6 --- /dev/null +++ b/changelog.d/5656.bugfix @@ -0,0 +1 @@ +Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index c82b1933f216..ee6c2c4f8bd7 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -118,7 +118,7 @@ def bind_threepid(self, creds, mxid): raise SynapseError(400, "No client_secret in creds") try: - data = yield self.http_client.post_urlencoded_get_json( + data = yield self.http_client.post_post_get_json( "https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"), {"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid}, ) From 351d9bd3179dac7a4ad0f25b88e4314aab4527cd Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 10 Jul 2019 15:48:50 +0100 Subject: [PATCH 35/80] Rename changelog file --- changelog.d/{5656.bugfix => 5658.bugfix} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{5656.bugfix => 5658.bugfix} (100%) diff --git a/changelog.d/5656.bugfix b/changelog.d/5658.bugfix similarity index 100% rename from changelog.d/5656.bugfix rename to changelog.d/5658.bugfix From b2a2e96ea6d6a2e4f47a9ad28919371227a5dd49 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 10 Jul 2019 15:56:21 +0100 Subject: [PATCH 36/80] Typo --- synapse/handlers/identity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index ee6c2c4f8bd7..546d6169e9fa 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -118,7 +118,7 @@ def bind_threepid(self, creds, mxid): raise SynapseError(400, "No client_secret in creds") try: - data = yield self.http_client.post_post_get_json( + data = yield self.http_client.post_json_get_json( "https://%s%s" % (id_server, "/_matrix/identity/api/v1/3pid/bind"), {"sid": creds["sid"], "client_secret": client_secret, "mxid": mxid}, ) From 953dbb79808c018fe34999a662f4c7cef8ea3721 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 10 Jul 2019 16:26:49 +0100 Subject: [PATCH 37/80] Remove access-token support from RegistrationStore.register (#5642) The 'token' param is no longer used anywhere except the tests, so let's kill that off too. --- changelog.d/5642.misc | 1 + synapse/handlers/register.py | 2 +- synapse/storage/registration.py | 24 +++-------------- tests/api/test_auth.py | 2 +- tests/handlers/test_register.py | 6 +---- tests/handlers/test_user_directory.py | 16 ++++------- tests/storage/test_client_ips.py | 4 +-- tests/storage/test_monthly_active_users.py | 23 +++++++--------- tests/storage/test_registration.py | 31 +++++----------------- 9 files changed, 30 insertions(+), 79 deletions(-) create mode 100644 changelog.d/5642.misc diff --git a/changelog.d/5642.misc b/changelog.d/5642.misc new file mode 100644 index 000000000000..e7f8e214a4e6 --- /dev/null +++ b/changelog.d/5642.misc @@ -0,0 +1 @@ +Remove access-token support from `RegistrationStore.register`, and rename it. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index a3e553d5f51d..420c5cb5bce7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -584,7 +584,7 @@ def register_with_store( address=address, ) else: - return self.store.register( + return self.store.register_user( user_id=user_id, password_hash=password_hash, was_guest=was_guest, diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index aea5b3276bbc..8e217c94089e 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -698,10 +698,9 @@ def add_access_token_to_user(self, user_id, token, device_id=None): desc="add_access_token_to_user", ) - def register( + def register_user( self, user_id, - token=None, password_hash=None, was_guest=False, make_guest=False, @@ -714,9 +713,6 @@ def register( Args: user_id (str): The desired user ID to register. - token (str): The desired access token to use for this user. If this - is not None, the given access token is associated with the user - id. password_hash (str): Optional. The password hash for this user. was_guest (bool): Optional. Whether this is a guest account being upgraded to a non-guest account. @@ -733,10 +729,9 @@ def register( StoreError if the user_id could not be registered. """ return self.runInteraction( - "register", - self._register, + "register_user", + self._register_user, user_id, - token, password_hash, was_guest, make_guest, @@ -746,11 +741,10 @@ def register( user_type, ) - def _register( + def _register_user( self, txn, user_id, - token, password_hash, was_guest, make_guest, @@ -763,8 +757,6 @@ def _register( now = int(self.clock.time()) - next_id = self._access_tokens_id_gen.get_next() - try: if was_guest: # Ensure that the guest user actually exists @@ -812,14 +804,6 @@ def _register( if self._account_validity.enabled: self.set_expiration_date_for_user_txn(txn, user_id) - if token: - # it's possible for this to get a conflict, but only for a single user - # since tokens are namespaced based on their user ID - txn.execute( - "INSERT INTO access_tokens(id, user_id, token)" " VALUES (?,?,?)", - (next_id, user_id, token), - ) - if create_profile_with_displayname: # set a default displayname serverside to avoid ugly race # between auto-joins and clients trying to set displaynames diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index d4e75b5b2e72..96b26f974b7f 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -325,7 +325,7 @@ def test_reserved_threepid(self): unknown_threepid = {"medium": "email", "address": "unreserved@server.com"} self.hs.config.mau_limits_reserved_threepids = [threepid] - yield self.store.register(user_id="user1", token="123", password_hash=None) + yield self.store.register_user(user_id="user1", password_hash=None) with self.assertRaises(ResourceLimitError): yield self.auth.check_auth_blocking() diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 8197f26d4f4d..1b7e1dacee0e 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -77,11 +77,7 @@ def test_if_user_exists(self): store = self.hs.get_datastore() frank = UserID.from_string("@frank:test") self.get_success( - store.register( - user_id=frank.to_string(), - token="jkv;g498752-43gj['eamb!-5", - password_hash=None, - ) + store.register_user(user_id=frank.to_string(), password_hash=None) ) local_part = frank.localpart user_id = frank.to_string() diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index b135486c4877..c5e91a8c41b8 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -47,11 +47,8 @@ def prepare(self, reactor, clock, hs): def test_handle_local_profile_change_with_support_user(self): support_user_id = "@support:test" self.get_success( - self.store.register( - user_id=support_user_id, - token="123", - password_hash=None, - user_type=UserTypes.SUPPORT, + self.store.register_user( + user_id=support_user_id, password_hash=None, user_type=UserTypes.SUPPORT ) ) @@ -73,11 +70,8 @@ def test_handle_local_profile_change_with_support_user(self): def test_handle_user_deactivated_support_user(self): s_user_id = "@support:test" self.get_success( - self.store.register( - user_id=s_user_id, - token="123", - password_hash=None, - user_type=UserTypes.SUPPORT, + self.store.register_user( + user_id=s_user_id, password_hash=None, user_type=UserTypes.SUPPORT ) ) @@ -90,7 +84,7 @@ def test_handle_user_deactivated_support_user(self): def test_handle_user_deactivated_regular_user(self): r_user_id = "@regular:test" self.get_success( - self.store.register(user_id=r_user_id, token="123", password_hash=None) + self.store.register_user(user_id=r_user_id, password_hash=None) ) self.store.remove_from_user_dir = Mock() self.get_success(self.handler.handle_user_deactivated(r_user_id)) diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 59c6f8c227a0..09305c3bf1c9 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -185,9 +185,7 @@ def test_updating_monthly_active_user_when_space(self): self.hs.config.limit_usage_by_mau = True self.hs.config.max_mau_value = 50 user_id = "@user:server" - self.get_success( - self.store.register(user_id=user_id, token="123", password_hash=None) - ) + self.get_success(self.store.register_user(user_id=user_id, password_hash=None)) active = self.get_success(self.store.user_last_seen_monthly_active(user_id)) self.assertFalse(active) diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 0ce0b991f98a..1494650d10be 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -53,10 +53,10 @@ def test_initialise_reserved_users(self): # -1 because user3 is a support user and does not count user_num = len(threepids) - 1 - self.store.register(user_id=user1, token="123", password_hash=None) - self.store.register(user_id=user2, token="456", password_hash=None) - self.store.register( - user_id=user3, token="789", password_hash=None, user_type=UserTypes.SUPPORT + self.store.register_user(user_id=user1, password_hash=None) + self.store.register_user(user_id=user2, password_hash=None) + self.store.register_user( + user_id=user3, password_hash=None, user_type=UserTypes.SUPPORT ) self.pump() @@ -161,9 +161,7 @@ def test_reap_monthly_active_users(self): def test_populate_monthly_users_is_guest(self): # Test that guest users are not added to mau list user_id = "@user_id:host" - self.store.register( - user_id=user_id, token="123", password_hash=None, make_guest=True - ) + self.store.register_user(user_id=user_id, password_hash=None, make_guest=True) self.store.upsert_monthly_active_user = Mock() self.store.populate_monthly_active_users(user_id) self.pump() @@ -216,8 +214,8 @@ def test_get_reserved_real_user_account(self): self.assertEquals(self.get_success(count), 0) # Test reserved registed users - self.store.register(user_id=user1, token="123", password_hash=None) - self.store.register(user_id=user2, token="456", password_hash=None) + self.store.register_user(user_id=user1, password_hash=None) + self.store.register_user(user_id=user2, password_hash=None) self.pump() now = int(self.hs.get_clock().time_msec()) @@ -232,11 +230,8 @@ def test_support_user_not_add_to_mau_limits(self): self.pump() self.assertEqual(self.get_success(count), 0) - self.store.register( - user_id=support_user_id, - token="123", - password_hash=None, - user_type=UserTypes.SUPPORT, + self.store.register_user( + user_id=support_user_id, password_hash=None, user_type=UserTypes.SUPPORT ) self.store.upsert_monthly_active_user(support_user_id) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 625b651e9123..9365c4622d5c 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -37,7 +37,7 @@ def setUp(self): @defer.inlineCallbacks def test_register(self): - yield self.store.register(self.user_id, self.tokens[0], self.pwhash) + yield self.store.register_user(self.user_id, self.pwhash) self.assertEquals( { @@ -53,15 +53,9 @@ def test_register(self): (yield self.store.get_user_by_id(self.user_id)), ) - result = yield self.store.get_user_by_access_token(self.tokens[0]) - - self.assertDictContainsSubset({"name": self.user_id}, result) - - self.assertTrue("token_id" in result) - @defer.inlineCallbacks def test_add_tokens(self): - yield self.store.register(self.user_id, self.tokens[0], self.pwhash) + yield self.store.register_user(self.user_id, self.pwhash) yield self.store.add_access_token_to_user( self.user_id, self.tokens[1], self.device_id ) @@ -77,7 +71,8 @@ def test_add_tokens(self): @defer.inlineCallbacks def test_user_delete_access_tokens(self): # add some tokens - yield self.store.register(self.user_id, self.tokens[0], self.pwhash) + yield self.store.register_user(self.user_id, self.pwhash) + yield self.store.add_access_token_to_user(self.user_id, self.tokens[0]) yield self.store.add_access_token_to_user( self.user_id, self.tokens[1], self.device_id ) @@ -108,24 +103,12 @@ def test_is_support_user(self): res = yield self.store.is_support_user(None) self.assertFalse(res) - yield self.store.register(user_id=TEST_USER, token="123", password_hash=None) + yield self.store.register_user(user_id=TEST_USER, password_hash=None) res = yield self.store.is_support_user(TEST_USER) self.assertFalse(res) - yield self.store.register( - user_id=SUPPORT_USER, - token="456", - password_hash=None, - user_type=UserTypes.SUPPORT, + yield self.store.register_user( + user_id=SUPPORT_USER, password_hash=None, user_type=UserTypes.SUPPORT ) res = yield self.store.is_support_user(SUPPORT_USER) self.assertTrue(res) - - -class TokenGenerator: - def __init__(self): - self._last_issued_token = 0 - - def generate(self, user_id): - self._last_issued_token += 1 - return "%s-%d" % (user_id, self._last_issued_token) From 1890cfcf82aa3e69530e97bf9ce783e390f22fbe Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 10 Jul 2019 19:10:07 +0100 Subject: [PATCH 38/80] Inline issue_access_token (#5659) this is only used in one place, so it's clearer if we inline it and reduce the API surface. Also, fixes a buglet where we would create an access token even if we were about to block the user (we would never return the AT, so the user could never use it, but it was still created and added to the db.) --- changelog.d/5659.misc | 1 + synapse/handlers/auth.py | 10 +++------- tests/api/test_auth.py | 2 +- 3 files changed, 5 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5659.misc diff --git a/changelog.d/5659.misc b/changelog.d/5659.misc new file mode 100644 index 000000000000..686001295c33 --- /dev/null +++ b/changelog.d/5659.misc @@ -0,0 +1 @@ +Inline issue_access_token. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index ef5585aa9912..da312b188e64 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -578,9 +578,11 @@ def get_access_token_for_user_id(self, user_id, device_id=None): StoreError if there was a problem storing the token. """ logger.info("Logging in user %s on device %s", user_id, device_id) - access_token = yield self.issue_access_token(user_id, device_id) yield self.auth.check_auth_blocking(user_id) + access_token = self.macaroon_gen.generate_access_token(user_id) + yield self.store.add_access_token_to_user(user_id, access_token, device_id) + # the device *should* have been registered before we got here; however, # it's possible we raced against a DELETE operation. The thing we # really don't want is active access_tokens without a record of the @@ -831,12 +833,6 @@ def _check_local_password(self, user_id, password): defer.returnValue(None) defer.returnValue(user_id) - @defer.inlineCallbacks - def issue_access_token(self, user_id, device_id=None): - access_token = self.macaroon_gen.generate_access_token(user_id) - yield self.store.add_access_token_to_user(user_id, access_token, device_id) - defer.returnValue(access_token) - @defer.inlineCallbacks def validate_short_term_login_token_and_get_user_id(self, login_token): auth_api = self.hs.get_auth() diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 96b26f974b7f..ddf2b578b373 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -244,7 +244,7 @@ def test_cannot_use_regular_token_as_guest(self): USER_ID = "@percy:matrix.org" self.store.add_access_token_to_user = Mock() - token = yield self.hs.handlers.auth_handler.issue_access_token( + token = yield self.hs.handlers.auth_handler.get_access_token_for_user_id( USER_ID, "DEVICE" ) self.store.add_access_token_to_user.assert_called_with(USER_ID, token, "DEVICE") From 38a6d3eea7e3ce1a2e37f9f88fd6742fcadc90d0 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Thu, 11 Jul 2019 10:36:03 +0100 Subject: [PATCH 39/80] Add basic opentracing support (#5544) * Configure and initialise tracer Includes config options for the tracer and sets up JaegerClient. * Scope manager using LogContexts We piggy-back our tracer scopes by using log context. The current log context gives us the current scope. If new scope is created we create a stack of scopes in the context. * jaeger is a dependency now * Carrier inject and extraction for Twisted Headers * Trace federation requests on the way in and out. The span is created in _started_processing and closed in _finished_processing because we need a meaningful log context. * Create logcontext for new scope. Instead of having a stack of scopes in a logcontext we create a new context for a new scope if the current logcontext already has a scope. * Remove scope from logcontext if logcontext is top level * Disable tracer if not configured * typo * Remove dependence on jaeger internals * bools * Set service name * :Explicitely state that the tracer is disabled * Black is the new black * Newsfile * Code style * Use the new config setup. * Generate config. * Copyright * Rename config to opentracing * Remove user whitelisting * Empty whitelist by default * User ConfigError instead of RuntimeError * Use isinstance * Use tag constants for opentracing. * Remove debug comment and no need to explicitely record error * Two errors a "s(c)entry" * Docstrings! * Remove debugging brainslip * Homeserver Whitlisting * Better opentracing config comment * linting * Inclue worker name in service_name * Make opentracing an optional dependency * Neater config retreival * Clean up dummy tags * Instantiate tracing as object instead of global class * Inlcude opentracing as a homeserver member. * Thread opentracing to the request level * Reference opetnracing through hs * Instantiate dummy opentracin g for tests. * About to revert, just keeping the unfinished changes just in case * Revert back to global state, commit number: 9ce4a3d9067bf9889b86c360c05ac88618b85c4f * Use class level methods in tracerutils * Start and stop requests spans in a place where we have access to the authenticated entity * Seen it, isort it * Make sure to close the active span. * I'm getting black and blue from this. * Logger formatting Co-Authored-By: Erik Johnston * Outdated comment * Import opentracing at the top * Return a contextmanager * Start tracing client requests from the servlet * Return noop context manager if not tracing * Explicitely say that these are federation requests * Include servlet name in client requests * Use context manager * Move opentracing to logging/ * Seen it, isort it again! * Ignore twisted return exceptions on context exit * Escape the scope * Scopes should be entered to make them useful. * Nicer decorator names * Just one init, init? * Don't need to close something that isn't open * Docs make you smarter --- changelog.d/5544.misc | 1 + docs/sample_config.yaml | 17 ++ synapse/app/_base.py | 3 + synapse/config/homeserver.py | 2 + synapse/config/tracer.py | 50 ++++ synapse/federation/transport/server.py | 26 +- synapse/http/matrixfederationclient.py | 28 +- synapse/http/servlet.py | 7 +- synapse/logging/context.py | 8 +- synapse/logging/opentracing.py | 362 +++++++++++++++++++++++++ synapse/logging/scopecontextmanager.py | 140 ++++++++++ synapse/python_dependencies.py | 1 + 12 files changed, 633 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5544.misc create mode 100644 synapse/config/tracer.py create mode 100644 synapse/logging/opentracing.py create mode 100644 synapse/logging/scopecontextmanager.py diff --git a/changelog.d/5544.misc b/changelog.d/5544.misc new file mode 100644 index 000000000000..81d6f74c31c8 --- /dev/null +++ b/changelog.d/5544.misc @@ -0,0 +1 @@ +Added opentracing and configuration options. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7fe7c94ac4cb..0462f0a17a2b 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1395,3 +1395,20 @@ password_config: # module: "my_custom_project.SuperRulesSet" # config: # example_option: 'things' + + +## Opentracing ## +# These settings enable opentracing which implements distributed tracing +# This allows you to observe the causal chain of events across servers +# including requests, key lookups etc. across any server running +# synapse or any other other services which supports opentracing. +# (specifically those implemented with jaeger) + +#opentracing: +# # Enable / disable tracer +# tracer_enabled: false +# # The list of homeservers we wish to expose our current traces to. +# # The list is a list of regexes which are matched against the +# # servername of the homeserver +# homeserver_whitelist: +# - ".*" diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 1ebb7ae53948..bd285122eab9 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -243,6 +243,9 @@ def handle_sighup(*args, **kwargs): # Load the certificate from disk. refresh_certificate(hs) + # Start the tracer + synapse.logging.opentracing.init_tracer(hs.config) + # It is now safe to start your Synapse. hs.start_listening(listeners) hs.get_datastore().start_profiling() diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index acadef4fd316..72acad4f1895 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -40,6 +40,7 @@ from .stats import StatsConfig from .third_party_event_rules import ThirdPartyRulesConfig from .tls import TlsConfig +from .tracer import TracerConfig from .user_directory import UserDirectoryConfig from .voip import VoipConfig from .workers import WorkerConfig @@ -75,5 +76,6 @@ class HomeServerConfig( ServerNoticesConfig, RoomDirectoryConfig, ThirdPartyRulesConfig, + TracerConfig, ): pass diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py new file mode 100644 index 000000000000..63a637984aa8 --- /dev/null +++ b/synapse/config/tracer.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C.d +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ._base import Config, ConfigError + + +class TracerConfig(Config): + def read_config(self, config, **kwargs): + self.tracer_config = config.get("opentracing") + + self.tracer_config = config.get("opentracing", {"tracer_enabled": False}) + + if self.tracer_config.get("tracer_enabled", False): + # The tracer is enabled so sanitize the config + # If no whitelists are given + self.tracer_config.setdefault("homeserver_whitelist", []) + + if not isinstance(self.tracer_config.get("homeserver_whitelist"), list): + raise ConfigError("Tracer homesererver_whitelist config is malformed") + + def generate_config_section(cls, **kwargs): + return """\ + ## Opentracing ## + # These settings enable opentracing which implements distributed tracing + # This allows you to observe the causal chain of events across servers + # including requests, key lookups etc. across any server running + # synapse or any other other services which supports opentracing. + # (specifically those implemented with jaeger) + + #opentracing: + # # Enable / disable tracer + # tracer_enabled: false + # # The list of homeservers we wish to expose our current traces to. + # # The list is a list of regexes which are matched against the + # # servername of the homeserver + # homeserver_whitelist: + # - ".*" + """ diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 2efdcff7ef8b..c45d458d946d 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -21,6 +21,7 @@ from twisted.internet import defer import synapse +import synapse.logging.opentracing as opentracing from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.room_versions import RoomVersions from synapse.api.urls import ( @@ -288,14 +289,29 @@ def new_func(request, *args, **kwargs): logger.warn("authenticate_request failed: %s", e) raise - if origin: - with ratelimiter.ratelimit(origin) as d: - yield d + # Start an opentracing span + with opentracing.start_active_span_from_context( + request.requestHeaders, + "incoming-federation-request", + tags={ + "request_id": request.get_request_id(), + opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_SERVER, + opentracing.tags.HTTP_METHOD: request.get_method(), + opentracing.tags.HTTP_URL: request.get_redacted_uri(), + opentracing.tags.PEER_HOST_IPV6: request.getClientIP(), + "authenticated_entity": origin, + }, + ): + if origin: + with ratelimiter.ratelimit(origin) as d: + yield d + response = yield func( + origin, content, request.args, *args, **kwargs + ) + else: response = yield func( origin, content, request.args, *args, **kwargs ) - else: - response = yield func(origin, content, request.args, *args, **kwargs) defer.returnValue(response) diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index dee3710f682a..e60334547e96 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -36,6 +36,7 @@ from twisted.web._newclient import ResponseDone from twisted.web.http_headers import Headers +import synapse.logging.opentracing as opentracing import synapse.metrics import synapse.util.retryutils from synapse.api.errors import ( @@ -339,9 +340,25 @@ def _send_request( else: query_bytes = b"" - headers_dict = {b"User-Agent": [self.version_string_bytes]} + # Retreive current span + scope = opentracing.start_active_span( + "outgoing-federation-request", + tags={ + opentracing.tags.SPAN_KIND: opentracing.tags.SPAN_KIND_RPC_CLIENT, + opentracing.tags.PEER_ADDRESS: request.destination, + opentracing.tags.HTTP_METHOD: request.method, + opentracing.tags.HTTP_URL: request.path, + }, + finish_on_close=True, + ) + + # Inject the span into the headers + headers_dict = {} + opentracing.inject_active_span_byte_dict(headers_dict, request.destination) - with limiter: + headers_dict[b"User-Agent"] = [self.version_string_bytes] + + with limiter, scope: # XXX: Would be much nicer to retry only at the transaction-layer # (once we have reliable transactions in place) if long_retries: @@ -419,6 +436,10 @@ def _send_request( response.phrase.decode("ascii", errors="replace"), ) + opentracing.set_tag( + opentracing.tags.HTTP_STATUS_CODE, response.code + ) + if 200 <= response.code < 300: pass else: @@ -499,8 +520,7 @@ def _send_request( _flatten_response_never_received(e), ) raise - - defer.returnValue(response) + defer.returnValue(response) def build_auth_headers( self, destination, method, url_bytes, content=None, destination_is=None diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index cd8415acd5ab..889038ff2585 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -20,6 +20,7 @@ from canonicaljson import json from synapse.api.errors import Codes, SynapseError +from synapse.logging.opentracing import trace_servlet logger = logging.getLogger(__name__) @@ -290,7 +291,11 @@ def register(self, http_server): for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"): if hasattr(self, "on_%s" % (method,)): method_handler = getattr(self, "on_%s" % (method,)) - http_server.register_paths(method, patterns, method_handler) + http_server.register_paths( + method, + patterns, + trace_servlet(self.__class__.__name__, method_handler), + ) else: raise NotImplementedError("RestServlet must register something.") diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 30dfa1d6b2ed..b456c31f7071 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -186,6 +186,7 @@ class LoggingContext(object): "alive", "request", "tag", + "scope", ] thread_local = threading.local() @@ -238,6 +239,7 @@ def __init__(self, name=None, parent_context=None, request=None): self.request = None self.tag = "" self.alive = True + self.scope = None self.parent_context = parent_context @@ -322,10 +324,12 @@ def copy_to(self, record): another LoggingContext """ - # 'request' is the only field we currently use in the logger, so that's - # all we need to copy + # we track the current request record.request = self.request + # we also track the current scope: + record.scope = self.scope + def start(self): if get_thread_id() != self.main_thread: logger.warning("Started logcontext %s on different thread", self) diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py new file mode 100644 index 000000000000..f0ceea2a642a --- /dev/null +++ b/synapse/logging/opentracing.py @@ -0,0 +1,362 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C.d +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.import opentracing + + +# NOTE +# This is a small wrapper around opentracing because opentracing is not currently +# packaged downstream (specifically debian). Since opentracing instrumentation is +# fairly invasive it was awkward to make it optional. As a result we opted to encapsulate +# all opentracing state in these methods which effectively noop if opentracing is +# not present. We should strongly consider encouraging the downstream distributers +# to package opentracing and making opentracing a full dependency. In order to facilitate +# this move the methods have work very similarly to opentracing's and it should only +# be a matter of few regexes to move over to opentracing's access patterns proper. + +try: + import opentracing +except ImportError: + opentracing = None +try: + from jaeger_client import Config as JaegerConfig + from synapse.logging.scopecontextmanager import LogContextScopeManager +except ImportError: + JaegerConfig = None + LogContextScopeManager = None + +import contextlib +import logging +import re +from functools import wraps + +from twisted.internet import defer + +logger = logging.getLogger(__name__) + + +class _DumTagNames(object): + """wrapper of opentracings tags. We need to have them if we + want to reference them without opentracing around. Clearly they + should never actually show up in a trace. `set_tags` overwrites + these with the correct ones.""" + + INVALID_TAG = "invalid-tag" + COMPONENT = INVALID_TAG + DATABASE_INSTANCE = INVALID_TAG + DATABASE_STATEMENT = INVALID_TAG + DATABASE_TYPE = INVALID_TAG + DATABASE_USER = INVALID_TAG + ERROR = INVALID_TAG + HTTP_METHOD = INVALID_TAG + HTTP_STATUS_CODE = INVALID_TAG + HTTP_URL = INVALID_TAG + MESSAGE_BUS_DESTINATION = INVALID_TAG + PEER_ADDRESS = INVALID_TAG + PEER_HOSTNAME = INVALID_TAG + PEER_HOST_IPV4 = INVALID_TAG + PEER_HOST_IPV6 = INVALID_TAG + PEER_PORT = INVALID_TAG + PEER_SERVICE = INVALID_TAG + SAMPLING_PRIORITY = INVALID_TAG + SERVICE = INVALID_TAG + SPAN_KIND = INVALID_TAG + SPAN_KIND_CONSUMER = INVALID_TAG + SPAN_KIND_PRODUCER = INVALID_TAG + SPAN_KIND_RPC_CLIENT = INVALID_TAG + SPAN_KIND_RPC_SERVER = INVALID_TAG + + +def only_if_tracing(func): + """Executes the function only if we're tracing. Otherwise return. + Assumes the function wrapped may return None""" + + @wraps(func) + def _only_if_tracing_inner(*args, **kwargs): + if opentracing: + return func(*args, **kwargs) + else: + return + + return _only_if_tracing_inner + + +# Block everything by default +_homeserver_whitelist = None + +tags = _DumTagNames + + +def init_tracer(config): + """Set the whitelists and initialise the JaegerClient tracer + + Args: + config (Config) + The config used by the homeserver. Here it's used to set the service + name to the homeserver's. + """ + global opentracing + if not config.tracer_config.get("tracer_enabled", False): + # We don't have a tracer + opentracing = None + return + + if not opentracing: + logger.error( + "The server has been configure to use opentracing but opentracing is not installed." + ) + raise ModuleNotFoundError("opentracing") + + if not JaegerConfig: + logger.error( + "The server has been configure to use opentracing but opentracing is not installed." + ) + + # Include the worker name + name = config.worker_name if config.worker_name else "master" + + set_homeserver_whitelist(config.tracer_config["homeserver_whitelist"]) + jaeger_config = JaegerConfig( + config={"sampler": {"type": "const", "param": 1}, "logging": True}, + service_name="{} {}".format(config.server_name, name), + scope_manager=LogContextScopeManager(config), + ) + jaeger_config.initialize_tracer() + + # Set up tags to be opentracing's tags + global tags + tags = opentracing.tags + + +@contextlib.contextmanager +def _noop_context_manager(*args, **kwargs): + """Does absolutely nothing really well. Can be entered and exited arbitrarily. + Good substitute for an opentracing scope.""" + yield + + +# Could use kwargs but I want these to be explicit +def start_active_span( + operation_name, + child_of=None, + references=None, + tags=None, + start_time=None, + ignore_active_span=False, + finish_on_close=True, +): + """Starts an active opentracing span. Note, the scope doesn't become active + until it has been entered, however, the span starts from the time this + message is called. + Args: + See opentracing.tracer + Returns: + scope (Scope) or noop_context_manager + """ + if opentracing is None: + return _noop_context_manager() + else: + # We need to enter the scope here for the logcontext to become active + return opentracing.tracer.start_active_span( + operation_name, + child_of=child_of, + references=references, + tags=tags, + start_time=start_time, + ignore_active_span=ignore_active_span, + finish_on_close=finish_on_close, + ) + + +@only_if_tracing +def close_active_span(): + """Closes the active span. This will close it's logcontext if the context + was made for the span""" + opentracing.tracer.scope_manager.active.__exit__(None, None, None) + + +@only_if_tracing +def set_tag(key, value): + """Set's a tag on the active span""" + opentracing.tracer.active_span.set_tag(key, value) + + +@only_if_tracing +def log_kv(key_values, timestamp=None): + """Log to the active span""" + opentracing.tracer.active_span.log_kv(key_values, timestamp) + + +# Note: we don't have a get baggage items because we're trying to hide all +# scope and span state from synapse. I think this method may also be useless +# as a result +@only_if_tracing +def set_baggage_item(key, value): + """Attach baggage to the active span""" + opentracing.tracer.active_span.set_baggage_item(key, value) + + +@only_if_tracing +def set_operation_name(operation_name): + """Sets the operation name of the active span""" + opentracing.tracer.active_span.set_operation_name(operation_name) + + +@only_if_tracing +def set_homeserver_whitelist(homeserver_whitelist): + """Sets the whitelist + + Args: + homeserver_whitelist (iterable of strings): regex of whitelisted homeservers + """ + global _homeserver_whitelist + if homeserver_whitelist: + # Makes a single regex which accepts all passed in regexes in the list + _homeserver_whitelist = re.compile( + "({})".format(")|(".join(homeserver_whitelist)) + ) + + +@only_if_tracing +def whitelisted_homeserver(destination): + """Checks if a destination matches the whitelist + Args: + destination (String)""" + global _homeserver_whitelist + if _homeserver_whitelist: + return _homeserver_whitelist.match(destination) + return False + + +def start_active_span_from_context( + headers, + operation_name, + references=None, + tags=None, + start_time=None, + ignore_active_span=False, + finish_on_close=True, +): + """ + Extracts a span context from Twisted Headers. + args: + headers (twisted.web.http_headers.Headers) + returns: + span_context (opentracing.span.SpanContext) + """ + # Twisted encodes the values as lists whereas opentracing doesn't. + # So, we take the first item in the list. + # Also, twisted uses byte arrays while opentracing expects strings. + if opentracing is None: + return _noop_context_manager() + + header_dict = {k.decode(): v[0].decode() for k, v in headers.getAllRawHeaders()} + context = opentracing.tracer.extract(opentracing.Format.HTTP_HEADERS, header_dict) + + return opentracing.tracer.start_active_span( + operation_name, + child_of=context, + references=references, + tags=tags, + start_time=start_time, + ignore_active_span=ignore_active_span, + finish_on_close=finish_on_close, + ) + + +@only_if_tracing +def inject_active_span_twisted_headers(headers, destination): + """ + Injects a span context into twisted headers inplace + + Args: + headers (twisted.web.http_headers.Headers) + span (opentracing.Span) + + Returns: + Inplace modification of headers + + Note: + The headers set by the tracer are custom to the tracer implementation which + should be unique enough that they don't interfere with any headers set by + synapse or twisted. If we're still using jaeger these headers would be those + here: + https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py + """ + + if not whitelisted_homeserver(destination): + return + + span = opentracing.tracer.active_span + carrier = {} + opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) + + for key, value in carrier.items(): + headers.addRawHeaders(key, value) + + +@only_if_tracing +def inject_active_span_byte_dict(headers, destination): + """ + Injects a span context into a dict where the headers are encoded as byte + strings + + Args: + headers (dict) + span (opentracing.Span) + + Returns: + Inplace modification of headers + + Note: + The headers set by the tracer are custom to the tracer implementation which + should be unique enough that they don't interfere with any headers set by + synapse or twisted. If we're still using jaeger these headers would be those + here: + https://github.com/jaegertracing/jaeger-client-python/blob/master/jaeger_client/constants.py + """ + if not whitelisted_homeserver(destination): + return + + span = opentracing.tracer.active_span + + carrier = {} + opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) + + for key, value in carrier.items(): + headers[key.encode()] = [value.encode()] + + +def trace_servlet(servlet_name, func): + """Decorator which traces a serlet. It starts a span with some servlet specific + tags such as the servlet_name and request information""" + + @wraps(func) + @defer.inlineCallbacks + def _trace_servlet_inner(request, *args, **kwargs): + with start_active_span_from_context( + request.requestHeaders, + "incoming-client-request", + tags={ + "request_id": request.get_request_id(), + tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, + tags.HTTP_METHOD: request.get_method(), + tags.HTTP_URL: request.get_redacted_uri(), + tags.PEER_HOST_IPV6: request.getClientIP(), + "servlet_name": servlet_name, + }, + ): + result = yield defer.maybeDeferred(func, request, *args, **kwargs) + defer.returnValue(result) + + return _trace_servlet_inner diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py new file mode 100644 index 000000000000..91e14462f308 --- /dev/null +++ b/synapse/logging/scopecontextmanager.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.import logging + +import logging + +from opentracing import Scope, ScopeManager + +import twisted + +from synapse.logging.context import LoggingContext, nested_logging_context + +logger = logging.getLogger(__name__) + + +class LogContextScopeManager(ScopeManager): + """ + The LogContextScopeManager tracks the active scope in opentracing + by using the log contexts which are native to synapse. This is so + that the basic opentracing api can be used across twisted defereds. + (I would love to break logcontexts and this into an OS package. but + let's wait for twisted's contexts to be released.) + """ + + def __init__(self, config): + # Set the whitelists + logger.info(config.tracer_config) + self._homeserver_whitelist = config.tracer_config["homeserver_whitelist"] + + @property + def active(self): + """ + Returns the currently active Scope which can be used to access the + currently active Scope.span. + If there is a non-null Scope, its wrapped Span + becomes an implicit parent of any newly-created Span at + Tracer.start_active_span() time. + + Return: + (Scope) : the Scope that is active, or None if not + available. + """ + ctx = LoggingContext.current_context() + if ctx is LoggingContext.sentinel: + return None + else: + return ctx.scope + + def activate(self, span, finish_on_close): + """ + Makes a Span active. + Args + span (Span): the span that should become active. + finish_on_close (Boolean): whether Span should be automatically + finished when Scope.close() is called. + + Returns: + Scope to control the end of the active period for + *span*. It is a programming error to neglect to call + Scope.close() on the returned instance. + """ + + enter_logcontext = False + ctx = LoggingContext.current_context() + + if ctx is LoggingContext.sentinel: + # We don't want this scope to affect. + logger.error("Tried to activate scope outside of loggingcontext") + return Scope(None, span) + elif ctx.scope is not None: + # We want the logging scope to look exactly the same so we give it + # a blank suffix + ctx = nested_logging_context("") + enter_logcontext = True + + scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close) + ctx.scope = scope + return scope + + +class _LogContextScope(Scope): + """ + A custom opentracing scope. The only significant difference is that it will + close the log context it's related to if the logcontext was created specifically + for this scope. + """ + + def __init__(self, manager, span, logcontext, enter_logcontext, finish_on_close): + """ + Args: + manager (LogContextScopeManager): + the manager that is responsible for this scope. + span (Span): + the opentracing span which this scope represents the local + lifetime for. + logcontext (LogContext): + the logcontext to which this scope is attached. + enter_logcontext (Boolean): + if True the logcontext will be entered and exited when the scope + is entered and exited respectively + finish_on_close (Boolean): + if True finish the span when the scope is closed + """ + super(_LogContextScope, self).__init__(manager, span) + self.logcontext = logcontext + self._finish_on_close = finish_on_close + self._enter_logcontext = enter_logcontext + + def __enter__(self): + if self._enter_logcontext: + self.logcontext.__enter__() + + def __exit__(self, type, value, traceback): + if type == twisted.internet.defer._DefGen_Return: + super(_LogContextScope, self).__exit__(None, None, None) + else: + super(_LogContextScope, self).__exit__(type, value, traceback) + if self._enter_logcontext: + self.logcontext.__exit__(type, value, traceback) + else: # the logcontext existed before the creation of the scope + self.logcontext.scope = None + + def close(self): + if self.manager.active is not self: + logger.error("Tried to close a none active scope!") + return + + if self._finish_on_close: + self.span.finish() diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 6324c00ef184..e7618057be6d 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -95,6 +95,7 @@ "url_preview": ["lxml>=3.5.0"], "test": ["mock>=2.0", "parameterized"], "sentry": ["sentry-sdk>=0.7.2"], + "opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"], "jwt": ["pyjwt>=1.6.4"], } From 0a4001eba1eb22fc7c39f257c8d5a326b1a489ad Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 11 Jul 2019 11:06:23 +0100 Subject: [PATCH 40/80] Clean up exception handling for access_tokens (#5656) First of all, let's get rid of `TOKEN_NOT_FOUND_HTTP_STATUS`. It was a hack we did at one point when it was possible to return either a 403 or a 401 if the creds were missing. We always return a 401 in these cases now (thankfully), so it's not needed. Let's also stop abusing `AuthError` for these cases. Honestly they have nothing that relates them to the other places that `AuthError` is used, other than the fact that they are loosely under the 'Auth' banner. It makes no sense for them to share exception classes. Instead, let's add a couple of new exception classes: `InvalidClientTokenError` and `MissingClientTokenError`, for the `M_UNKNOWN_TOKEN` and `M_MISSING_TOKEN` cases respectively - and an `InvalidClientCredentialsError` base class for the two of them. --- changelog.d/5656.misc | 1 + synapse/api/auth.py | 127 +++++++++------------------- synapse/api/errors.py | 33 +++++++- synapse/rest/client/v1/directory.py | 10 ++- synapse/rest/client/v1/room.py | 9 +- tests/api/test_auth.py | 31 +++++-- 6 files changed, 111 insertions(+), 100 deletions(-) create mode 100644 changelog.d/5656.misc diff --git a/changelog.d/5656.misc b/changelog.d/5656.misc new file mode 100644 index 000000000000..a8de20a7d01e --- /dev/null +++ b/changelog.d/5656.misc @@ -0,0 +1 @@ +Clean up exception handling around client access tokens. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 86f145649cfd..afc6400948b7 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -25,7 +25,13 @@ import synapse.types from synapse import event_auth from synapse.api.constants import EventTypes, JoinRules, Membership -from synapse.api.errors import AuthError, Codes, ResourceLimitError +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientTokenError, + MissingClientTokenError, + ResourceLimitError, +) from synapse.config.server import is_threepid_reserved from synapse.types import UserID from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache @@ -63,7 +69,6 @@ def __init__(self, hs): self.clock = hs.get_clock() self.store = hs.get_datastore() self.state = hs.get_state_handler() - self.TOKEN_NOT_FOUND_HTTP_STATUS = 401 self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000) register_cache("cache", "token_cache", self.token_cache) @@ -189,18 +194,17 @@ def get_user_by_req( Returns: defer.Deferred: resolves to a ``synapse.types.Requester`` object Raises: - AuthError if no user by that token exists or the token is invalid. + InvalidClientCredentialsError if no user by that token exists or the token + is invalid. + AuthError if access is denied for the user in the access token """ - # Can optionally look elsewhere in the request (e.g. headers) try: ip_addr = self.hs.get_ip_from_request(request) user_agent = request.requestHeaders.getRawHeaders( b"User-Agent", default=[b""] )[0].decode("ascii", "surrogateescape") - access_token = self.get_access_token_from_request( - request, self.TOKEN_NOT_FOUND_HTTP_STATUS - ) + access_token = self.get_access_token_from_request(request) user_id, app_service = yield self._get_appservice_user_id(request) if user_id: @@ -264,18 +268,12 @@ def get_user_by_req( ) ) except KeyError: - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Missing access token.", - errcode=Codes.MISSING_TOKEN, - ) + raise MissingClientTokenError() @defer.inlineCallbacks def _get_appservice_user_id(self, request): app_service = self.store.get_app_service_by_token( - self.get_access_token_from_request( - request, self.TOKEN_NOT_FOUND_HTTP_STATUS - ) + self.get_access_token_from_request(request) ) if app_service is None: defer.returnValue((None, None)) @@ -313,7 +311,8 @@ def get_user_by_access_token(self, token, rights="access"): `token_id` (int|None): access token id. May be None if guest `device_id` (str|None): device corresponding to access token Raises: - AuthError if no user by that token exists or the token is invalid. + InvalidClientCredentialsError if no user by that token exists or the token + is invalid. """ if rights == "access": @@ -331,11 +330,7 @@ def get_user_by_access_token(self, token, rights="access"): if not guest: # non-guest access tokens must be in the database logger.warning("Unrecognised access token - not in store.") - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Unrecognised access token.", - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError() # Guest access tokens are not stored in the database (there can # only be one access token per guest, anyway). @@ -350,16 +345,10 @@ def get_user_by_access_token(self, token, rights="access"): # guest tokens. stored_user = yield self.store.get_user_by_id(user_id) if not stored_user: - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Unknown user_id %s" % user_id, - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError("Unknown user_id %s" % user_id) if not stored_user["is_guest"]: - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Guest access token used for regular user", - errcode=Codes.UNKNOWN_TOKEN, + raise InvalidClientTokenError( + "Guest access token used for regular user" ) ret = { "user": user, @@ -386,11 +375,7 @@ def get_user_by_access_token(self, token, rights="access"): ValueError, ) as e: logger.warning("Invalid macaroon in auth: %s %s", type(e), e) - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Invalid macaroon passed.", - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError("Invalid macaroon passed.") def _parse_and_validate_macaroon(self, token, rights="access"): """Takes a macaroon and tries to parse and validate it. This is cached @@ -430,11 +415,7 @@ def _parse_and_validate_macaroon(self, token, rights="access"): macaroon, rights, self.hs.config.expire_access_token, user_id=user_id ) except (pymacaroons.exceptions.MacaroonException, TypeError, ValueError): - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Invalid macaroon passed.", - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError("Invalid macaroon passed.") if not has_expiry and rights == "access": self.token_cache[token] = (user_id, guest) @@ -453,17 +434,14 @@ def get_user_id_from_macaroon(self, macaroon): (str) user id Raises: - AuthError if there is no user_id caveat in the macaroon + InvalidClientCredentialsError if there is no user_id caveat in the + macaroon """ user_prefix = "user_id = " for caveat in macaroon.caveats: if caveat.caveat_id.startswith(user_prefix): return caveat.caveat_id[len(user_prefix) :] - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "No user caveat in macaroon", - errcode=Codes.UNKNOWN_TOKEN, - ) + raise InvalidClientTokenError("No user caveat in macaroon") def validate_macaroon(self, macaroon, type_string, verify_expiry, user_id): """ @@ -531,22 +509,13 @@ def _look_up_user_by_access_token(self, token): defer.returnValue(user_info) def get_appservice_by_req(self, request): - try: - token = self.get_access_token_from_request( - request, self.TOKEN_NOT_FOUND_HTTP_STATUS - ) - service = self.store.get_app_service_by_token(token) - if not service: - logger.warn("Unrecognised appservice access token.") - raise AuthError( - self.TOKEN_NOT_FOUND_HTTP_STATUS, - "Unrecognised access token.", - errcode=Codes.UNKNOWN_TOKEN, - ) - request.authenticated_entity = service.sender - return defer.succeed(service) - except KeyError: - raise AuthError(self.TOKEN_NOT_FOUND_HTTP_STATUS, "Missing access token.") + token = self.get_access_token_from_request(request) + service = self.store.get_app_service_by_token(token) + if not service: + logger.warn("Unrecognised appservice access token.") + raise InvalidClientTokenError() + request.authenticated_entity = service.sender + return defer.succeed(service) def is_server_admin(self, user): """ Check if the given user is a local server admin. @@ -692,20 +661,16 @@ def has_access_token(request): return bool(query_params) or bool(auth_headers) @staticmethod - def get_access_token_from_request(request, token_not_found_http_status=401): + def get_access_token_from_request(request): """Extracts the access_token from the request. Args: request: The http request. - token_not_found_http_status(int): The HTTP status code to set in the - AuthError if the token isn't found. This is used in some of the - legacy APIs to change the status code to 403 from the default of - 401 since some of the old clients depended on auth errors returning - 403. Returns: unicode: The access_token Raises: - AuthError: If there isn't an access_token in the request. + MissingClientTokenError: If there isn't a single access_token in the + request """ auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") @@ -714,34 +679,20 @@ def get_access_token_from_request(request, token_not_found_http_status=401): # Try the get the access_token from a "Authorization: Bearer" # header if query_params is not None: - raise AuthError( - token_not_found_http_status, - "Mixing Authorization headers and access_token query parameters.", - errcode=Codes.MISSING_TOKEN, + raise MissingClientTokenError( + "Mixing Authorization headers and access_token query parameters." ) if len(auth_headers) > 1: - raise AuthError( - token_not_found_http_status, - "Too many Authorization headers.", - errcode=Codes.MISSING_TOKEN, - ) + raise MissingClientTokenError("Too many Authorization headers.") parts = auth_headers[0].split(b" ") if parts[0] == b"Bearer" and len(parts) == 2: return parts[1].decode("ascii") else: - raise AuthError( - token_not_found_http_status, - "Invalid Authorization header.", - errcode=Codes.MISSING_TOKEN, - ) + raise MissingClientTokenError("Invalid Authorization header.") else: # Try to get the access_token from the query params. if not query_params: - raise AuthError( - token_not_found_http_status, - "Missing access token.", - errcode=Codes.MISSING_TOKEN, - ) + raise MissingClientTokenError() return query_params[0].decode("ascii") diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 28b5c2af9b39..41fd04cd548f 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -210,7 +210,9 @@ def __init__(self, msg="Not found", errcode=Codes.NOT_FOUND): class AuthError(SynapseError): - """An error raised when there was a problem authorising an event.""" + """An error raised when there was a problem authorising an event, and at various + other poorly-defined times. + """ def __init__(self, *args, **kwargs): if "errcode" not in kwargs: @@ -218,6 +220,35 @@ def __init__(self, *args, **kwargs): super(AuthError, self).__init__(*args, **kwargs) +class InvalidClientCredentialsError(SynapseError): + """An error raised when there was a problem with the authorisation credentials + in a client request. + + https://matrix.org/docs/spec/client_server/r0.5.0#using-access-tokens: + + When credentials are required but missing or invalid, the HTTP call will + return with a status of 401 and the error code, M_MISSING_TOKEN or + M_UNKNOWN_TOKEN respectively. + """ + + def __init__(self, msg, errcode): + super().__init__(code=401, msg=msg, errcode=errcode) + + +class MissingClientTokenError(InvalidClientCredentialsError): + """Raised when we couldn't find the access token in a request""" + + def __init__(self, msg="Missing access token"): + super().__init__(msg=msg, errcode="M_MISSING_TOKEN") + + +class InvalidClientTokenError(InvalidClientCredentialsError): + """Raised when we didn't understand the access token in a request""" + + def __init__(self, msg="Unrecognised access token"): + super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN") + + class ResourceLimitError(SynapseError): """ Any error raised when there is a problem with resource usage. diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index dd0d38ea5c12..57542c2b4b9c 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -18,7 +18,13 @@ from twisted.internet import defer -from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientCredentialsError, + NotFoundError, + SynapseError, +) from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import RoomAlias @@ -97,7 +103,7 @@ def on_DELETE(self, request, room_alias): room_alias.to_string(), ) defer.returnValue((200, {})) - except AuthError: + except InvalidClientCredentialsError: # fallback to default user behaviour if they aren't an AS pass diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index cca7e45ddbec..7709c2d705c2 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -24,7 +24,12 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import AuthError, Codes, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientCredentialsError, + SynapseError, +) from synapse.api.filtering import Filter from synapse.events.utils import format_event_for_client_v2 from synapse.http.servlet import ( @@ -307,7 +312,7 @@ def on_GET(self, request): try: yield self.auth.get_user_by_req(request, allow_guest=True) - except AuthError as e: + except InvalidClientCredentialsError as e: # Option to allow servers to require auth when accessing # /publicRooms via CS API. This is especially helpful in private # federations. diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index ddf2b578b373..ee92ceeb60ef 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -21,7 +21,14 @@ import synapse.handlers.auth from synapse.api.auth import Auth -from synapse.api.errors import AuthError, Codes, ResourceLimitError +from synapse.api.errors import ( + AuthError, + Codes, + InvalidClientCredentialsError, + InvalidClientTokenError, + MissingClientTokenError, + ResourceLimitError, +) from synapse.types import UserID from tests import unittest @@ -70,7 +77,9 @@ def test_get_user_by_req_user_bad_token(self): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, InvalidClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") def test_get_user_by_req_user_missing_token(self): user_info = {"name": self.test_user, "token_id": "ditto"} @@ -79,7 +88,9 @@ def test_get_user_by_req_user_missing_token(self): request = Mock(args={}) request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, MissingClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_MISSING_TOKEN") @defer.inlineCallbacks def test_get_user_by_req_appservice_valid_token(self): @@ -133,7 +144,9 @@ def test_get_user_by_req_appservice_valid_token_bad_ip(self): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, InvalidClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") def test_get_user_by_req_appservice_bad_token(self): self.store.get_app_service_by_token = Mock(return_value=None) @@ -143,7 +156,9 @@ def test_get_user_by_req_appservice_bad_token(self): request.args[b"access_token"] = [self.test_token] request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, InvalidClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") def test_get_user_by_req_appservice_missing_token(self): app_service = Mock(token="foobar", url="a_url", sender=self.test_user) @@ -153,7 +168,9 @@ def test_get_user_by_req_appservice_missing_token(self): request = Mock(args={}) request.requestHeaders.getRawHeaders = mock_getRawHeaders() d = self.auth.get_user_by_req(request) - self.failureResultOf(d, AuthError) + f = self.failureResultOf(d, MissingClientTokenError).value + self.assertEqual(f.code, 401) + self.assertEqual(f.errcode, "M_MISSING_TOKEN") @defer.inlineCallbacks def test_get_user_by_req_appservice_valid_token_valid_user_id(self): @@ -280,7 +297,7 @@ def get_user(tok): request.args[b"access_token"] = [guest_tok.encode("ascii")] request.requestHeaders.getRawHeaders = mock_getRawHeaders() - with self.assertRaises(AuthError) as cm: + with self.assertRaises(InvalidClientCredentialsError) as cm: yield self.auth.get_user_by_req(request, allow_guest=True) self.assertEqual(401, cm.exception.code) From 78a1cd36b568c3021b311703df147b68f5fd1c19 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 11 Jul 2019 13:33:23 +0100 Subject: [PATCH 41/80] small typo fix (#5655) --- changelog.d/5655.doc | 1 + synapse/storage/events_worker.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5655.doc diff --git a/changelog.d/5655.doc b/changelog.d/5655.doc new file mode 100644 index 000000000000..acab6aee92bf --- /dev/null +++ b/changelog.d/5655.doc @@ -0,0 +1 @@ +Fix a small typo in a code comment. \ No newline at end of file diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 09db8725112f..874d0a56bcbb 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -327,7 +327,7 @@ def _get_events_from_cache(self, events, allow_rejected, update_metrics=True): Args: events (list(str)): list of event_ids to fetch - allow_rejected (bool): Whether to teturn events that were rejected + allow_rejected (bool): Whether to return events that were rejected update_metrics (bool): Whether to update the cache hit ratio metrics Returns: From 39e9839a04c416ac70cdbd48f33ff574d56c7fbe Mon Sep 17 00:00:00 2001 From: Lrizika Date: Thu, 11 Jul 2019 06:31:36 -0700 Subject: [PATCH 42/80] Improved docs on setting up Postgresql (#5661) Added that synapse_user needs a database to access before it can auth Noted you'll need to enable password auth, linked to pg_hba.conf docs --- changelog.d/5661.docs | 1 + docs/postgres.rst | 19 +++++++++++++++---- 2 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5661.docs diff --git a/changelog.d/5661.docs b/changelog.d/5661.docs new file mode 100644 index 000000000000..c70e62014e40 --- /dev/null +++ b/changelog.d/5661.docs @@ -0,0 +1 @@ +Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! diff --git a/docs/postgres.rst b/docs/postgres.rst index 33f58e3acea3..0ae52ccbd814 100644 --- a/docs/postgres.rst +++ b/docs/postgres.rst @@ -34,9 +34,14 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user su - postgres createuser --pwprompt synapse_user -The PostgreSQL database used *must* have the correct encoding set, otherwise it -would not be able to store UTF8 strings. To create a database with the correct -encoding use, e.g.:: +Before you can authenticate with the ``synapse_user``, you must create a +database that it can access. To create a database, first connect to the database +with your database user:: + + su - postgres + psql + +and then run:: CREATE DATABASE synapse ENCODING 'UTF8' @@ -46,7 +51,13 @@ encoding use, e.g.:: OWNER synapse_user; This would create an appropriate database named ``synapse`` owned by the -``synapse_user`` user (which must already exist). +``synapse_user`` user (which must already have been created as above). + +Note that the PostgreSQL database *must* have the correct encoding set (as +shown above), otherwise it will not be able to store UTF8 strings. + +You may need to enable password authentication so ``synapse_user`` can connect +to the database. See https://www.postgresql.org/docs/11/auth-pg-hba-conf.html. Tuning Postgres =============== From a83577d64f23c260bd7899a4dee5ff00d1058253 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 11 Jul 2019 23:43:41 +1000 Subject: [PATCH 43/80] Use /src for checking out synapse during sytests (#5664) --- .buildkite/pipeline.yml | 3 +++ changelog.d/5664.misc | 1 + 2 files changed, 4 insertions(+) create mode 100644 changelog.d/5664.misc diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index dd0f98cba0ec..7f42fad909d1 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -179,6 +179,7 @@ steps: image: "matrixdotorg/sytest-synapse:py35" propagate-environment: true always-pull: true + workdir: "/src" retry: automatic: - exit_status: -1 @@ -199,6 +200,7 @@ steps: image: "matrixdotorg/sytest-synapse:py35" propagate-environment: true always-pull: true + workdir: "/src" retry: automatic: - exit_status: -1 @@ -220,6 +222,7 @@ steps: image: "matrixdotorg/sytest-synapse:py35" propagate-environment: true always-pull: true + workdir: "/src" soft_fail: true retry: automatic: diff --git a/changelog.d/5664.misc b/changelog.d/5664.misc new file mode 100644 index 000000000000..0ca7a0fbd0c7 --- /dev/null +++ b/changelog.d/5664.misc @@ -0,0 +1 @@ +Update the sytest BuildKite configuration to checkout Synapse in `/src`. From 6bb0357c949dcc433e2316c395199644ef75e02c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 12 Jul 2019 10:16:23 +0100 Subject: [PATCH 44/80] Add a mechanism for per-test configs (#5657) It's useful to be able to tweak the homeserver config to be used for each test. This PR adds a mechanism to do so. --- changelog.d/5657.misc | 1 + tests/unittest.py | 55 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5657.misc diff --git a/changelog.d/5657.misc b/changelog.d/5657.misc new file mode 100644 index 000000000000..bdec9ae4c0b6 --- /dev/null +++ b/changelog.d/5657.misc @@ -0,0 +1 @@ +Add a mechanism for per-test homeserver configuration in the unit tests. diff --git a/tests/unittest.py b/tests/unittest.py index a09e76c7c21d..0f0c2ad69d03 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -157,6 +157,21 @@ class HomeserverTestCase(TestCase): """ A base TestCase that reduces boilerplate for HomeServer-using test cases. + Defines a setUp method which creates a mock reactor, and instantiates a homeserver + running on that reactor. + + There are various hooks for modifying the way that the homeserver is instantiated: + + * override make_homeserver, for example by making it pass different parameters into + setup_test_homeserver. + + * override default_config, to return a modified configuration dictionary for use + by setup_test_homeserver. + + * On a per-test basis, you can use the @override_config decorator to give a + dictionary containing additional configuration settings to be added to the basic + config dict. + Attributes: servlets (list[function]): List of servlet registration function. user_id (str): The user ID to assume if auth is hijacked. @@ -168,6 +183,13 @@ class HomeserverTestCase(TestCase): hijack_auth = True needs_threadpool = False + def __init__(self, methodName, *args, **kwargs): + super().__init__(methodName, *args, **kwargs) + + # see if we have any additional config for this test + method = getattr(self, methodName) + self._extra_config = getattr(method, "_extra_config", None) + def setUp(self): """ Set up the TestCase by calling the homeserver constructor, optionally @@ -276,7 +298,14 @@ def default_config(self, name="test"): Args: name (str): The homeserver name/domain. """ - return default_config(name) + config = default_config(name) + + # apply any additional config which was specified via the override_config + # decorator. + if self._extra_config is not None: + config.update(self._extra_config) + + return config def prepare(self, reactor, clock, homeserver): """ @@ -534,3 +563,27 @@ def attempt_wrong_password_login(self, username, password): ) self.render(request) self.assertEqual(channel.code, 403, channel.result) + + +def override_config(extra_config): + """A decorator which can be applied to test functions to give additional HS config + + For use + + For example: + + class MyTestCase(HomeserverTestCase): + @override_config({"enable_registration": False, ...}) + def test_foo(self): + ... + + Args: + extra_config(dict): Additional config settings to be merged into the default + config dict before instantiating the test homeserver. + """ + + def decorator(func): + func._extra_config = extra_config + return func + + return decorator From f36916476151309c7e1c00ae35fd3bf551f61d42 Mon Sep 17 00:00:00 2001 From: Slavi Pantaleev Date: Fri, 12 Jul 2019 13:38:25 +0300 Subject: [PATCH 45/80] Upgrade Alpine Linux used in the Docker image (3.8 -> 3.10) (#5619) Alpine Linux 3.8 is still supported, but it seems like it's quite outdated now. While Python should be the same on both, all other libraries, etc., are much newer in Alpine 3.9 and 3.10. Signed-off-by: Slavi Pantaleev --- changelog.d/5619.misc | 1 + docker/Dockerfile | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5619.misc diff --git a/changelog.d/5619.misc b/changelog.d/5619.misc new file mode 100644 index 000000000000..c5e22d2051ed --- /dev/null +++ b/changelog.d/5619.misc @@ -0,0 +1 @@ +Base Docker image on a newer Alpine Linux version (3.8 -> 3.10) diff --git a/docker/Dockerfile b/docker/Dockerfile index 79276209f6d0..e5a0d6d5f662 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -16,7 +16,7 @@ ARG PYTHON_VERSION=3.7 ### ### Stage 0: builder ### -FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder +FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 as builder # install the OS build deps @@ -55,7 +55,7 @@ RUN pip install --prefix="/install" --no-warn-script-location \ ### Stage 1: runtime ### -FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 +FROM docker.io/python:${PYTHON_VERSION}-alpine3.10 # xmlsec is required for saml support RUN apk add --no-cache --virtual .runtime_deps \ From 59f15309ca09c6f46602b47eafd1e5527fff224d Mon Sep 17 00:00:00 2001 From: Slavi Pantaleev Date: Fri, 12 Jul 2019 13:43:42 +0300 Subject: [PATCH 46/80] Add missing space in default logging file format generated by the Docker image (#5620) This adds a missing space, without which log lines appear uglier. Signed-off-by: Slavi Pantaleev --- changelog.d/5620.bugfix | 1 + docker/conf/log.config | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5620.bugfix diff --git a/changelog.d/5620.bugfix b/changelog.d/5620.bugfix new file mode 100644 index 000000000000..17e1f133e847 --- /dev/null +++ b/changelog.d/5620.bugfix @@ -0,0 +1 @@ +Add missing space in default logging file format generated by the Docker image diff --git a/docker/conf/log.config b/docker/conf/log.config index ea5ccfd68b9d..db35e475a470 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -2,7 +2,7 @@ version: 1 formatters: precise: - format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s' + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' filters: context: From d445b3ae57e589903ead3563a76f5c4e9322de19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ulrik=20G=C3=BCnther?= Date: Fri, 12 Jul 2019 12:46:18 +0200 Subject: [PATCH 47/80] Update reverse_proxy.rst (#5397) Updates reverse_proxy.rst with information about nginx' URI normalisation. --- changelog.d/5397.doc | 1 + docs/reverse_proxy.rst | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/5397.doc diff --git a/changelog.d/5397.doc b/changelog.d/5397.doc new file mode 100644 index 000000000000..c2b500b482d8 --- /dev/null +++ b/changelog.d/5397.doc @@ -0,0 +1 @@ +Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! diff --git a/docs/reverse_proxy.rst b/docs/reverse_proxy.rst index e4b870411c02..4b640ffc4f6f 100644 --- a/docs/reverse_proxy.rst +++ b/docs/reverse_proxy.rst @@ -48,6 +48,8 @@ Let's assume that we expect clients to connect to our server at proxy_set_header X-Forwarded-For $remote_addr; } } + + Do not add a `/` after the port in `proxy_pass`, otherwise nginx will canonicalise/normalise the URI. * Caddy:: From 4c17a87606603a7128066d673a08e9d1765f5556 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 12 Jul 2019 11:47:24 +0100 Subject: [PATCH 48/80] fix changelog name --- changelog.d/{5661.docs => 5661.doc} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{5661.docs => 5661.doc} (100%) diff --git a/changelog.d/5661.docs b/changelog.d/5661.doc similarity index 100% rename from changelog.d/5661.docs rename to changelog.d/5661.doc From 24aa0e0a5bac33df5a9a2e50a74aa500af320953 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 12 Jul 2019 15:29:32 +0100 Subject: [PATCH 49/80] fix typo: backgroud -> background --- synapse/rest/media/v1/storage_provider.py | 2 +- synapse/storage/registration.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index e8f559acc194..37687ea7f4ec 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -67,7 +67,7 @@ class StorageProviderWrapper(StorageProvider): backend (StorageProvider) store_local (bool): Whether to store new local files or not. store_synchronous (bool): Whether to wait for file to be successfully - uploaded, or todo the upload in the backgroud. + uploaded, or todo the upload in the background. store_remote (bool): Whether remote media should be uploaded """ diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 8e217c94089e..73580f17257e 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -603,7 +603,7 @@ def __init__(self, db_conn, hs): ) self.register_background_update_handler( - "users_set_deactivated_flag", self._backgroud_update_set_deactivated_flag + "users_set_deactivated_flag", self._background_update_set_deactivated_flag ) # Create a background job for culling expired 3PID validity tokens @@ -618,14 +618,14 @@ def start_cull(): hs.get_clock().looping_call(start_cull, THIRTY_MINUTES_IN_MS) @defer.inlineCallbacks - def _backgroud_update_set_deactivated_flag(self, progress, batch_size): + def _background_update_set_deactivated_flag(self, progress, batch_size): """Retrieves a list of all deactivated users and sets the 'deactivated' flag to 1 for each of them. """ last_user = progress.get("user_id", "") - def _backgroud_update_set_deactivated_flag_txn(txn): + def _background_update_set_deactivated_flag_txn(txn): txn.execute( """ SELECT @@ -670,7 +670,7 @@ def _backgroud_update_set_deactivated_flag_txn(txn): return False end = yield self.runInteraction( - "users_set_deactivated_flag", _backgroud_update_set_deactivated_flag_txn + "users_set_deactivated_flag", _background_update_set_deactivated_flag_txn ) if end: From db0a50bc40e4aca4a03fa3a09d33497f57f95c2d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 12 Jul 2019 16:59:59 +0100 Subject: [PATCH 50/80] Fixup docstrings --- synapse/storage/stream.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index f8e3007d671b..a0465484df5e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -835,7 +835,7 @@ def _paginate_room_events_txn( as a list of _EventDictReturn and a token that points to the end of the result set. If no events are returned then the end of the stream has been reached (i.e. there are no events between - `from_token` and `to_token`). + `from_token` and `to_token`), or `limit` is zero. """ assert int(limit) >= 0 @@ -912,12 +912,10 @@ def paginate_room_events( those that match the filter. Returns: - tuple[list[FrozenEvents], str]: Returns the results as a list of - dicts and a token that points to the end of the result set. The - dicts have the keys "event_id", "topological_ordering" and - "stream_ordering". If no events are returned then the end of the - stream has been reached (i.e. there are no events between - `from_key` and `to_key`). + tuple[list[FrozenEvent], str]: Returns the results as a list of + events and a token that points to the end of the result set. If no + events are returned then the end of the stream has been reached + (i.e. there are no events between `from_key` and `to_key`). """ from_key = RoomStreamToken.parse(from_key) From 5f158ec039e4753959aad9b8d288b3d8cb4959a1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 12 Jul 2019 17:26:02 +0100 Subject: [PATCH 51/80] Implement access token expiry (#5660) Record how long an access token is valid for, and raise a soft-logout once it expires. --- changelog.d/5660.feature | 1 + docs/sample_config.yaml | 11 ++ synapse/api/auth.py | 12 ++ synapse/api/errors.py | 8 +- synapse/config/registration.py | 16 +++ synapse/handlers/auth.py | 17 ++- synapse/handlers/register.py | 35 ++++-- synapse/storage/registration.py | 19 ++- .../schema/delta/55/access_token_expiry.sql | 18 +++ tests/api/test_auth.py | 6 +- tests/handlers/test_auth.py | 20 +++- tests/handlers/test_register.py | 5 +- tests/rest/client/v1/test_login.py | 108 ++++++++++++++++++ tests/storage/test_registration.py | 8 +- 14 files changed, 253 insertions(+), 31 deletions(-) create mode 100644 changelog.d/5660.feature create mode 100644 synapse/storage/schema/delta/55/access_token_expiry.sql diff --git a/changelog.d/5660.feature b/changelog.d/5660.feature new file mode 100644 index 000000000000..82889fdaf16b --- /dev/null +++ b/changelog.d/5660.feature @@ -0,0 +1 @@ +Implement `session_lifetime` configuration option, after which access tokens will expire. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 0462f0a17a2b..663ff31622f4 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -786,6 +786,17 @@ uploads_path: "DATADIR/uploads" # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" +# Time that a user's session remains valid for, after they log in. +# +# Note that this is not currently compatible with guest logins. +# +# Note also that this is calculated at login time: changes are not applied +# retrospectively to users who have already logged in. +# +# By default, this is infinite. +# +#session_lifetime: 24h + # The user must provide all of the below types of 3PID when registering. # #registrations_require_3pid: diff --git a/synapse/api/auth.py b/synapse/api/auth.py index afc6400948b7..d9e943c39c83 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -319,6 +319,17 @@ def get_user_by_access_token(self, token, rights="access"): # first look in the database r = yield self._look_up_user_by_access_token(token) if r: + valid_until_ms = r["valid_until_ms"] + if ( + valid_until_ms is not None + and valid_until_ms < self.clock.time_msec() + ): + # there was a valid access token, but it has expired. + # soft-logout the user. + raise InvalidClientTokenError( + msg="Access token has expired", soft_logout=True + ) + defer.returnValue(r) # otherwise it needs to be a valid macaroon @@ -505,6 +516,7 @@ def _look_up_user_by_access_token(self, token): "token_id": ret.get("token_id", None), "is_guest": False, "device_id": ret.get("device_id"), + "valid_until_ms": ret.get("valid_until_ms"), } defer.returnValue(user_info) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 41fd04cd548f..a6e753c30c26 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -245,8 +245,14 @@ def __init__(self, msg="Missing access token"): class InvalidClientTokenError(InvalidClientCredentialsError): """Raised when we didn't understand the access token in a request""" - def __init__(self, msg="Unrecognised access token"): + def __init__(self, msg="Unrecognised access token", soft_logout=False): super().__init__(msg=msg, errcode="M_UNKNOWN_TOKEN") + self._soft_logout = soft_logout + + def error_dict(self): + d = super().error_dict() + d["soft_logout"] = self._soft_logout + return d class ResourceLimitError(SynapseError): diff --git a/synapse/config/registration.py b/synapse/config/registration.py index b895c4e9f4a9..34cb11468cdb 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -84,6 +84,11 @@ def read_config(self, config, **kwargs): "disable_msisdn_registration", False ) + session_lifetime = config.get("session_lifetime") + if session_lifetime is not None: + session_lifetime = self.parse_duration(session_lifetime) + self.session_lifetime = session_lifetime + def generate_config_section(self, generate_secrets=False, **kwargs): if generate_secrets: registration_shared_secret = 'registration_shared_secret: "%s"' % ( @@ -141,6 +146,17 @@ def generate_config_section(self, generate_secrets=False, **kwargs): # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" + # Time that a user's session remains valid for, after they log in. + # + # Note that this is not currently compatible with guest logins. + # + # Note also that this is calculated at login time: changes are not applied + # retrospectively to users who have already logged in. + # + # By default, this is infinite. + # + #session_lifetime: 24h + # The user must provide all of the below types of 3PID when registering. # #registrations_require_3pid: diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index da312b188e64..b74a6e9c6225 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -15,6 +15,7 @@ # limitations under the License. import logging +import time import unicodedata import attr @@ -558,7 +559,7 @@ def _get_session_info(self, session_id): return self.sessions[session_id] @defer.inlineCallbacks - def get_access_token_for_user_id(self, user_id, device_id=None): + def get_access_token_for_user_id(self, user_id, device_id, valid_until_ms): """ Creates a new access token for the user with the given user ID. @@ -572,16 +573,26 @@ def get_access_token_for_user_id(self, user_id, device_id=None): device_id (str|None): the device ID to associate with the tokens. None to leave the tokens unassociated with a device (deprecated: we should always have a device ID) + valid_until_ms (int|None): when the token is valid until. None for + no expiry. Returns: The access token for the user's session. Raises: StoreError if there was a problem storing the token. """ - logger.info("Logging in user %s on device %s", user_id, device_id) + fmt_expiry = "" + if valid_until_ms is not None: + fmt_expiry = time.strftime( + " until %Y-%m-%d %H:%M:%S", time.localtime(valid_until_ms / 1000.0) + ) + logger.info("Logging in user %s on device %s%s", user_id, device_id, fmt_expiry) + yield self.auth.check_auth_blocking(user_id) access_token = self.macaroon_gen.generate_access_token(user_id) - yield self.store.add_access_token_to_user(user_id, access_token, device_id) + yield self.store.add_access_token_to_user( + user_id, access_token, device_id, valid_until_ms + ) # the device *should* have been registered before we got here; however, # it's possible we raced against a DELETE operation. The thing we diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 420c5cb5bce7..bb7cfd71b91d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -84,6 +84,8 @@ def __init__(self, hs): self.device_handler = hs.get_device_handler() self.pusher_pool = hs.get_pusherpool() + self.session_lifetime = hs.config.session_lifetime + @defer.inlineCallbacks def check_username(self, localpart, guest_access_token=None, assigned_user_id=None): if types.contains_invalid_mxid_characters(localpart): @@ -599,6 +601,8 @@ def register_with_store( def register_device(self, user_id, device_id, initial_display_name, is_guest=False): """Register a device for a user and generate an access token. + The access token will be limited by the homeserver's session_lifetime config. + Args: user_id (str): full canonical @user:id device_id (str|None): The device ID to check, or None to generate @@ -619,20 +623,29 @@ def register_device(self, user_id, device_id, initial_display_name, is_guest=Fal is_guest=is_guest, ) defer.returnValue((r["device_id"], r["access_token"])) - else: - device_id = yield self.device_handler.check_device_registered( - user_id, device_id, initial_display_name - ) + + valid_until_ms = None + if self.session_lifetime is not None: if is_guest: - access_token = self.macaroon_gen.generate_access_token( - user_id, ["guest = true"] - ) - else: - access_token = yield self._auth_handler.get_access_token_for_user_id( - user_id, device_id=device_id + raise Exception( + "session_lifetime is not currently implemented for guest access" ) + valid_until_ms = self.clock.time_msec() + self.session_lifetime + + device_id = yield self.device_handler.check_device_registered( + user_id, device_id, initial_display_name + ) + if is_guest: + assert valid_until_ms is None + access_token = self.macaroon_gen.generate_access_token( + user_id, ["guest = true"] + ) + else: + access_token = yield self._auth_handler.get_access_token_for_user_id( + user_id, device_id=device_id, valid_until_ms=valid_until_ms + ) - defer.returnValue((device_id, access_token)) + defer.returnValue((device_id, access_token)) @defer.inlineCallbacks def post_registration_actions( diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 73580f17257e..8b2c2a97ab46 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -90,7 +90,8 @@ def get_user_by_access_token(self, token): token (str): The access token of a user. Returns: defer.Deferred: None, if the token did not match, otherwise dict - including the keys `name`, `is_guest`, `device_id`, `token_id`. + including the keys `name`, `is_guest`, `device_id`, `token_id`, + `valid_until_ms`. """ return self.runInteraction( "get_user_by_access_token", self._query_for_auth, token @@ -284,7 +285,7 @@ def is_server_admin(self, user): def _query_for_auth(self, txn, token): sql = ( "SELECT users.name, users.is_guest, access_tokens.id as token_id," - " access_tokens.device_id" + " access_tokens.device_id, access_tokens.valid_until_ms" " FROM users" " INNER JOIN access_tokens on users.name = access_tokens.user_id" " WHERE token = ?" @@ -679,14 +680,16 @@ def _background_update_set_deactivated_flag_txn(txn): defer.returnValue(batch_size) @defer.inlineCallbacks - def add_access_token_to_user(self, user_id, token, device_id=None): + def add_access_token_to_user(self, user_id, token, device_id, valid_until_ms): """Adds an access token for the given user. Args: user_id (str): The user ID. token (str): The new access token to add. device_id (str): ID of the device to associate with the access - token + token + valid_until_ms (int|None): when the token is valid until. None for + no expiry. Raises: StoreError if there was a problem adding this. """ @@ -694,7 +697,13 @@ def add_access_token_to_user(self, user_id, token, device_id=None): yield self._simple_insert( "access_tokens", - {"id": next_id, "user_id": user_id, "token": token, "device_id": device_id}, + { + "id": next_id, + "user_id": user_id, + "token": token, + "device_id": device_id, + "valid_until_ms": valid_until_ms, + }, desc="add_access_token_to_user", ) diff --git a/synapse/storage/schema/delta/55/access_token_expiry.sql b/synapse/storage/schema/delta/55/access_token_expiry.sql new file mode 100644 index 000000000000..4590604bfd8a --- /dev/null +++ b/synapse/storage/schema/delta/55/access_token_expiry.sql @@ -0,0 +1,18 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- when this access token can be used until, in ms since the epoch. NULL means the token +-- never expires. +ALTER TABLE access_tokens ADD COLUMN valid_until_ms BIGINT; diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index ee92ceeb60ef..c0cb8ef296ff 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -262,9 +262,11 @@ def test_cannot_use_regular_token_as_guest(self): self.store.add_access_token_to_user = Mock() token = yield self.hs.handlers.auth_handler.get_access_token_for_user_id( - USER_ID, "DEVICE" + USER_ID, "DEVICE", valid_until_ms=None + ) + self.store.add_access_token_to_user.assert_called_with( + USER_ID, token, "DEVICE", None ) - self.store.add_access_token_to_user.assert_called_with(USER_ID, token, "DEVICE") def get_user(tok): if token != tok: diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index b204a0700d25..b03103d96ff5 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -117,7 +117,9 @@ def test_short_term_login_token_cannot_replace_user_id(self): def test_mau_limits_disabled(self): self.hs.config.limit_usage_by_mau = False # Ensure does not throw exception - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) yield self.auth_handler.validate_short_term_login_token_and_get_user_id( self._get_macaroon().serialize() @@ -131,7 +133,9 @@ def test_mau_limits_exceeded_large(self): ) with self.assertRaises(ResourceLimitError): - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) self.hs.get_datastore().get_monthly_active_count = Mock( return_value=defer.succeed(self.large_number_of_users) @@ -150,7 +154,9 @@ def test_mau_limits_parity(self): return_value=defer.succeed(self.hs.config.max_mau_value) ) with self.assertRaises(ResourceLimitError): - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) self.hs.get_datastore().get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) @@ -166,7 +172,9 @@ def test_mau_limits_parity(self): self.hs.get_datastore().get_monthly_active_count = Mock( return_value=defer.succeed(self.hs.config.max_mau_value) ) - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) self.hs.get_datastore().user_last_seen_monthly_active = Mock( return_value=defer.succeed(self.hs.get_clock().time_msec()) ) @@ -185,7 +193,9 @@ def test_mau_limits_not_exceeded(self): return_value=defer.succeed(self.small_number_of_users) ) # Ensure does not raise exception - yield self.auth_handler.get_access_token_for_user_id("user_a") + yield self.auth_handler.get_access_token_for_user_id( + "user_a", device_id=None, valid_until_ms=None + ) self.hs.get_datastore().get_monthly_active_count = Mock( return_value=defer.succeed(self.small_number_of_users) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 1b7e1dacee0e..90d012937404 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -272,7 +272,10 @@ def get_or_create_user(self, requester, localpart, displayname, password_hash=No ) else: yield self.hs.get_auth_handler().delete_access_tokens_for_user(user_id) - yield self.store.add_access_token_to_user(user_id=user_id, token=token) + + yield self.store.add_access_token_to_user( + user_id=user_id, token=token, device_id=None, valid_until_ms=None + ) if displayname is not None: # logger.info("setting user display name: %s -> %s", user_id, displayname) diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 0397f91a9e69..eae5411325fc 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -2,10 +2,14 @@ import synapse.rest.admin from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import devices +from synapse.rest.client.v2_alpha.account import WhoamiRestServlet from tests import unittest +from tests.unittest import override_config LOGIN_URL = b"/_matrix/client/r0/login" +TEST_URL = b"/_matrix/client/r0/account/whoami" class LoginRestServletTestCase(unittest.HomeserverTestCase): @@ -13,6 +17,8 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, + devices.register_servlets, + lambda hs, http_server: WhoamiRestServlet(hs).register(http_server), ] def make_homeserver(self, reactor, clock): @@ -144,3 +150,105 @@ def test_POST_ratelimiting_per_account_failed_attempts(self): self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) + + @override_config({"session_lifetime": "24h"}) + def test_soft_logout(self): + self.register_user("kermit", "monkey") + + # we shouldn't be able to make requests without an access token + request, channel = self.make_request(b"GET", TEST_URL) + self.render(request) + self.assertEquals(channel.result["code"], b"401", channel.result) + self.assertEquals(channel.json_body["errcode"], "M_MISSING_TOKEN") + + # log in as normal + params = { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": "kermit"}, + "password": "monkey", + } + request, channel = self.make_request(b"POST", LOGIN_URL, params) + self.render(request) + + self.assertEquals(channel.code, 200, channel.result) + access_token = channel.json_body["access_token"] + device_id = channel.json_body["device_id"] + + # we should now be able to make requests with the access token + request, channel = self.make_request( + b"GET", TEST_URL, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 200, channel.result) + + # time passes + self.reactor.advance(24 * 3600) + + # ... and we should be soft-logouted + request, channel = self.make_request( + b"GET", TEST_URL, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 401, channel.result) + self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEquals(channel.json_body["soft_logout"], True) + + # + # test behaviour after deleting the expired device + # + + # we now log in as a different device + access_token_2 = self.login("kermit", "monkey") + + # more requests with the expired token should still return a soft-logout + self.reactor.advance(3600) + request, channel = self.make_request( + b"GET", TEST_URL, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 401, channel.result) + self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEquals(channel.json_body["soft_logout"], True) + + # ... but if we delete that device, it will be a proper logout + self._delete_device(access_token_2, "kermit", "monkey", device_id) + + request, channel = self.make_request( + b"GET", TEST_URL, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 401, channel.result) + self.assertEquals(channel.json_body["errcode"], "M_UNKNOWN_TOKEN") + self.assertEquals(channel.json_body["soft_logout"], False) + + def _delete_device(self, access_token, user_id, password, device_id): + """Perform the UI-Auth to delete a device""" + request, channel = self.make_request( + b"DELETE", "devices/" + device_id, access_token=access_token + ) + self.render(request) + self.assertEquals(channel.code, 401, channel.result) + # check it's a UI-Auth fail + self.assertEqual( + set(channel.json_body.keys()), + {"flows", "params", "session"}, + channel.result, + ) + + auth = { + "type": "m.login.password", + # https://github.com/matrix-org/synapse/issues/5665 + # "identifier": {"type": "m.id.user", "user": user_id}, + "user": user_id, + "password": password, + "session": channel.json_body["session"], + } + + request, channel = self.make_request( + b"DELETE", + "devices/" + device_id, + access_token=access_token, + content={"auth": auth}, + ) + self.render(request) + self.assertEquals(channel.code, 200, channel.result) diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index 9365c4622d5c..0253c4ac05b4 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -57,7 +57,7 @@ def test_register(self): def test_add_tokens(self): yield self.store.register_user(self.user_id, self.pwhash) yield self.store.add_access_token_to_user( - self.user_id, self.tokens[1], self.device_id + self.user_id, self.tokens[1], self.device_id, valid_until_ms=None ) result = yield self.store.get_user_by_access_token(self.tokens[1]) @@ -72,9 +72,11 @@ def test_add_tokens(self): def test_user_delete_access_tokens(self): # add some tokens yield self.store.register_user(self.user_id, self.pwhash) - yield self.store.add_access_token_to_user(self.user_id, self.tokens[0]) yield self.store.add_access_token_to_user( - self.user_id, self.tokens[1], self.device_id + self.user_id, self.tokens[0], device_id=None, valid_until_ms=None + ) + yield self.store.add_access_token_to_user( + self.user_id, self.tokens[1], self.device_id, valid_until_ms=None ) # now delete some From d336b51331d5cf40a577397e8945223d1949e965 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 12 Jul 2019 17:27:07 +0100 Subject: [PATCH 52/80] Add a `docker` type to the towncrier configuration (#5673) ... and certain other changelog-related fixes --- CONTRIBUTING.rst | 31 +++++++++++++++--------- changelog.d/5619.docker | 1 + changelog.d/5619.misc | 1 - changelog.d/{5620.bugfix => 5620.docker} | 2 +- changelog.d/{5655.doc => 5655.misc} | 0 changelog.d/5673.misc | 1 + pyproject.toml | 5 ++++ 7 files changed, 27 insertions(+), 14 deletions(-) create mode 100644 changelog.d/5619.docker delete mode 100644 changelog.d/5619.misc rename changelog.d/{5620.bugfix => 5620.docker} (80%) rename changelog.d/{5655.doc => 5655.misc} (100%) create mode 100644 changelog.d/5673.misc diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 2c44422a0e84..94dc6504855f 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -30,11 +30,10 @@ use github's pull request workflow to review the contribution, and either ask you to make any refinements needed or merge it and make them ourselves. The changes will then land on master when we next do a release. -We use `CircleCI `_ and `Buildkite -`_ for continuous integration. -Buildkite builds need to be authorised by a maintainer. If your change breaks -the build, this will be shown in GitHub, so please keep an eye on the pull -request for feedback. +We use `Buildkite `_ for +continuous integration. Buildkite builds need to be authorised by a +maintainer. If your change breaks the build, this will be shown in GitHub, so +please keep an eye on the pull request for feedback. To run unit tests in a local development environment, you can use: @@ -70,13 +69,21 @@ All changes, even minor ones, need a corresponding changelog / newsfragment entry. These are managed by Towncrier (https://github.com/hawkowl/towncrier). -To create a changelog entry, make a new file in the ``changelog.d`` -file named in the format of ``PRnumber.type``. The type can be -one of ``feature``, ``bugfix``, ``removal`` (also used for -deprecations), or ``misc`` (for internal-only changes). - -The content of the file is your changelog entry, which can contain Markdown -formatting. The entry should end with a full stop ('.') for consistency. +To create a changelog entry, make a new file in the ``changelog.d`` file named +in the format of ``PRnumber.type``. The type can be one of the following: + +* ``feature``. +* ``bugfix``. +* ``docker`` (for updates to the Docker image). +* ``doc`` (for updates to the documentation). +* ``removal`` (also used for deprecations). +* ``misc`` (for internal-only changes). + +The content of the file is your changelog entry, which should be a short +description of your change in the same style as the rest of our `changelog +`_. The file can +contain Markdown formatting, and should end with a full stop ('.') for +consistency. Adding credits to the changelog is encouraged, we value your contributions and would like to have you shouted out in the release notes! diff --git a/changelog.d/5619.docker b/changelog.d/5619.docker new file mode 100644 index 000000000000..b69e5cc57cb5 --- /dev/null +++ b/changelog.d/5619.docker @@ -0,0 +1 @@ +Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). diff --git a/changelog.d/5619.misc b/changelog.d/5619.misc deleted file mode 100644 index c5e22d2051ed..000000000000 --- a/changelog.d/5619.misc +++ /dev/null @@ -1 +0,0 @@ -Base Docker image on a newer Alpine Linux version (3.8 -> 3.10) diff --git a/changelog.d/5620.bugfix b/changelog.d/5620.docker similarity index 80% rename from changelog.d/5620.bugfix rename to changelog.d/5620.docker index 17e1f133e847..cbb5a75d6ad5 100644 --- a/changelog.d/5620.bugfix +++ b/changelog.d/5620.docker @@ -1 +1 @@ -Add missing space in default logging file format generated by the Docker image +Add missing space in default logging file format generated by the Docker image. diff --git a/changelog.d/5655.doc b/changelog.d/5655.misc similarity index 100% rename from changelog.d/5655.doc rename to changelog.d/5655.misc diff --git a/changelog.d/5673.misc b/changelog.d/5673.misc new file mode 100644 index 000000000000..194225635834 --- /dev/null +++ b/changelog.d/5673.misc @@ -0,0 +1 @@ +Add a `docker` type to the towncrier configuration. diff --git a/pyproject.toml b/pyproject.toml index ef329aab41a3..db4a2e41e439 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,11 @@ name = "Bugfixes" showcontent = true + [[tool.towncrier.type]] + directory = "docker" + name = "Updates to the Docker image" + showcontent = true + [[tool.towncrier.type]] directory = "doc" name = "Improved Documentation" From 18c516698e78004df39ec62c3fc08ff03313ba4e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 15 Jul 2019 11:45:29 +0100 Subject: [PATCH 53/80] Return a different error from Invalid Password when a user is deactivated (#5674) Return `This account has been deactivated` instead of `Invalid password` when a user is deactivated. --- changelog.d/5674.feature | 1 + synapse/api/errors.py | 16 ++++++++++++++++ synapse/handlers/auth.py | 9 +++++++++ 3 files changed, 26 insertions(+) create mode 100644 changelog.d/5674.feature diff --git a/changelog.d/5674.feature b/changelog.d/5674.feature new file mode 100644 index 000000000000..04bdfa4ad5eb --- /dev/null +++ b/changelog.d/5674.feature @@ -0,0 +1 @@ +Return "This account has been deactivated" when a deactivated user tries to login. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index a6e753c30c26..ad3e262041df 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -139,6 +139,22 @@ def error_dict(self): return cs_error(self.msg, self.errcode, consent_uri=self._consent_uri) +class UserDeactivatedError(SynapseError): + """The error returned to the client when the user attempted to access an + authenticated endpoint, but the account has been deactivated. + """ + + def __init__(self, msg): + """Constructs a UserDeactivatedError + + Args: + msg (str): The human-readable error message + """ + super(UserDeactivatedError, self).__init__( + code=http_client.FORBIDDEN, msg=msg, errcode=Codes.UNKNOWN + ) + + class RegistrationError(SynapseError): """An error raised when a registration event fails.""" diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index b74a6e9c6225..d4d65749754e 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -35,6 +35,7 @@ LoginError, StoreError, SynapseError, + UserDeactivatedError, ) from synapse.api.ratelimiting import Ratelimiter from synapse.logging.context import defer_to_thread @@ -623,6 +624,7 @@ def check_user_exists(self, user_id): Raises: LimitExceededError if the ratelimiter's login requests count for this user is too high too proceed. + UserDeactivatedError if a user is found but is deactivated. """ self.ratelimit_login_per_account(user_id) res = yield self._find_user_id_and_pwd_hash(user_id) @@ -838,6 +840,13 @@ def _check_local_password(self, user_id, password): if not lookupres: defer.returnValue(None) (user_id, password_hash) = lookupres + + # If the password hash is None, the account has likely been deactivated + if not password_hash: + deactivated = yield self.store.get_user_deactivated_status(user_id) + if deactivated: + raise UserDeactivatedError("This account has been deactivated") + result = yield self.validate_hash(password, password_hash) if not result: logger.warn("Failed password login for user %s", user_id) From 823e13ddf49b28eb0dab5d7be3921acc92fd0e44 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2019 13:15:34 +0100 Subject: [PATCH 54/80] Change add_arguments to be a static method --- synapse/config/_base.py | 32 +++++++++++++++++++++++++++++++- synapse/config/database.py | 3 ++- synapse/config/logger.py | 3 ++- synapse/config/registration.py | 3 ++- synapse/config/server.py | 3 ++- 5 files changed, 39 insertions(+), 5 deletions(-) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 14d3f7c1fed7..e588f829812e 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -137,12 +137,42 @@ def read_file(cls, file_path, config_name): return file_stream.read() def invoke_all(self, name, *args, **kargs): + """Invoke all instance methods with the given name and arguments in the + class's MRO. + + Args: + name (str): Name of function to invoke + *args + **kwargs + + Returns: + list: The list of the return values from each method called + """ results = [] for cls in type(self).mro(): if name in cls.__dict__: results.append(getattr(cls, name)(self, *args, **kargs)) return results + @classmethod + def invoke_all_static(cls, name, *args, **kargs): + """Invoke all static methods with the given name and arguments in the + class's MRO. + + Args: + name (str): Name of function to invoke + *args + **kwargs + + Returns: + list: The list of the return values from each method called + """ + results = [] + for c in cls.mro(): + if name in c.__dict__: + results.append(getattr(c, name)(*args, **kargs)) + return results + def generate_config( self, config_dir_path, @@ -241,7 +271,7 @@ def create_argument_parser(cls, description): # We can only invoke `add_arguments` on an actual object, but # `add_arguments` should be side effect free so this is probably fine. - cls().invoke_all("add_arguments", config_parser) + cls.invoke_all_static("add_arguments", config_parser) return config_parser diff --git a/synapse/config/database.py b/synapse/config/database.py index bcb2089dd727..746a6cd1f42a 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -69,7 +69,8 @@ def set_databasepath(self, database_path): if database_path is not None: self.database_config["args"]["database"] = database_path - def add_arguments(self, parser): + @staticmethod + def add_arguments(parser): db_group = parser.add_argument_group("database") db_group.add_argument( "-d", diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 931aec41c09c..52cf691227c7 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -103,7 +103,8 @@ def read_arguments(self, args): if args.log_file is not None: self.log_file = args.log_file - def add_arguments(cls, parser): + @staticmethod + def add_arguments(parser): logging_group = parser.add_argument_group("logging") logging_group.add_argument( "-v", diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 4a59e6ec90f6..ee5885251588 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -222,7 +222,8 @@ def generate_config_section(self, generate_secrets=False, **kwargs): % locals() ) - def add_arguments(self, parser): + @staticmethod + def add_arguments(parser): reg_group = parser.add_argument_group("registration") reg_group.add_argument( "--enable-registration", diff --git a/synapse/config/server.py b/synapse/config/server.py index 2a74dea2ea6b..080d0630bd96 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -639,7 +639,8 @@ def read_arguments(self, args): if args.print_pidfile is not None: self.print_pidfile = args.print_pidfile - def add_arguments(self, parser): + @staticmethod + def add_arguments(parser): server_group = parser.add_argument_group("server") server_group.add_argument( "-D", From 37b524f9718b0faeaaac0dccab62c9a5e270c4a2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2019 13:19:57 +0100 Subject: [PATCH 55/80] Fix up comments --- synapse/app/admin_cmd.py | 4 ++-- synapse/config/_base.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index bd73c47ae20a..e618a62432c3 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Copyright 2016 OpenMarket Ltd +# Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -109,7 +109,7 @@ def start(config_options): subparser = parser.add_subparsers( title="Admin Commands", - description="Choose and admin command to perform.", + description="Choose an admin command to perform.", required=True, dest="command", metavar="", diff --git a/synapse/config/_base.py b/synapse/config/_base.py index e588f829812e..74a7980ebe39 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -284,7 +284,7 @@ def load_config_with_parser(cls, config_parser, argv): Used for workers where we want to add extra flags/subcommands. Args: - conifg_parser (ArgumentParser) + config_parser (ArgumentParser) argv (list[str]) Returns: From fdefb9e29a7c28e5b218fc8d9745a2ec58bc3d27 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2019 13:43:25 +0100 Subject: [PATCH 56/80] Move creation of ArgumentParser to caller --- synapse/app/admin_cmd.py | 4 +++- synapse/config/_base.py | 15 +++++---------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index e618a62432c3..c4d752593a11 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import argparse import logging import sys @@ -105,7 +106,8 @@ def export_data_command(hs, user_id, directory): def start(config_options): - parser = HomeServerConfig.create_argument_parser("Synapse Admin Command") + parser = argparse.ArgumentParser(description="Synapse Admin Command") + HomeServerConfig.add_arguments_to_parser(parser) subparser = parser.add_subparsers( title="Admin Commands", diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 74a7980ebe39..8c3acff03e6d 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -231,27 +231,24 @@ def load_config(cls, description, argv): Returns: Config object. """ - config_parser = cls.create_argument_parser(description) + config_parser = argparse.ArgumentParser(description=description) + cls.add_arguments_to_parser(config_parser) obj, _ = cls.load_config_with_parser(config_parser, argv) return obj @classmethod - def create_argument_parser(cls, description): - """Create an ArgumentParser instance with all the config flags. + def add_arguments_to_parser(cls, config_parser): + """Adds all the config flags to an ArgumentParser. Doesn't support config-file-generation: used by the worker apps. Used for workers where we want to add extra flags/subcommands. Args: - description (str): App description - - Returns: - ArgumentParser + config_parser (ArgumentParser): App description """ - config_parser = argparse.ArgumentParser(description=description) config_parser.add_argument( "-c", "--config-path", @@ -273,8 +270,6 @@ def create_argument_parser(cls, description): # `add_arguments` should be side effect free so this is probably fine. cls.invoke_all_static("add_arguments", config_parser) - return config_parser - @classmethod def load_config_with_parser(cls, config_parser, argv): """Parse the commandline and config files with the given parser From c8f35d8d38c91b3493bd34363096a6e26756d8d7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2019 13:49:18 +0100 Subject: [PATCH 57/80] Use set_defaults(func=) style --- synapse/app/admin_cmd.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index c4d752593a11..611a196e54bc 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -90,15 +90,17 @@ def get_streams_to_replicate(self): @defer.inlineCallbacks -def export_data_command(hs, user_id, directory): +def export_data_command(hs, args): """Export data for a user. Args: - user_id (str) - directory (str|None): Directory to write output to. Will create a temp - directory if not specified. + hs (HomeServer) + args (argparse.Namespace) """ + user_id = args.user_id + directory = args.output_directory + res = yield hs.get_handlers().admin_handler.exfiltrate_user_data( user_id, FileExfiltrationWriter(user_id, directory=directory) ) @@ -129,6 +131,7 @@ def start(config_options): help="The directory to store the exported data in. Must be emtpy. Defaults" " to creating a temp directory.", ) + export_data_parser.set_defaults(func=export_data_command) try: config, args = HomeServerConfig.load_config_with_parser(parser, config_options) @@ -173,12 +176,6 @@ def start(config_options): ss.setup() - if args.command == "export-data": - command = lambda: export_data_command(ss, args.user_id, args.output_directory) - else: - # This shouldn't happen. - raise ConfigError("Unknown admin command %s" % (args.command,)) - # We use task.react as the basic run command as it correctly handles tearing # down the reactor when the deferreds resolve and setting the return value. # We also make sure that `_base.start` gets run before we actually run the @@ -188,7 +185,7 @@ def start(config_options): def run(_reactor): with LoggingContext("command"): yield _base.start(ss, []) - yield command() + yield args.func(ss, args) _base.start_worker_reactor( "synapse-admin-cmd", config, run_command=lambda: task.react(run) From 1b2067f53d772de1cadca22bdee176b9509d5b6f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2019 14:15:22 +0100 Subject: [PATCH 58/80] Add FileExfiltrationWriter --- synapse/app/admin_cmd.py | 70 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 611a196e54bc..13281d0af9f1 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -15,7 +15,11 @@ # limitations under the License. import argparse import logging +import os import sys +import tempfile + +from canonicaljson import json from twisted.internet import defer, task @@ -24,7 +28,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging -from synapse.handlers.admin import FileExfiltrationWriter +from synapse.handlers.admin import ExfiltrationWriter from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore @@ -107,6 +111,70 @@ def export_data_command(hs, args): print(res) +class FileExfiltrationWriter(ExfiltrationWriter): + """An ExfiltrationWriter that writes the users data to a directory. + Returns the directory location on completion. + + Note: This writes to disk on the main reactor thread. + + Args: + user_id (str): The user whose data is being exfiltrated. + directory (str|None): The directory to write the data to, if None then + will write to a temporary directory. + """ + + def __init__(self, user_id, directory=None): + self.user_id = user_id + + if directory: + self.base_directory = directory + else: + self.base_directory = tempfile.mkdtemp( + prefix="synapse-exfiltrate__%s__" % (user_id,) + ) + + os.makedirs(self.base_directory, exist_ok=True) + if list(os.listdir(self.base_directory)): + raise Exception("Directory must be empty") + + def write_events(self, room_id, events): + room_directory = os.path.join(self.base_directory, "rooms", room_id) + os.makedirs(room_directory, exist_ok=True) + events_file = os.path.join(room_directory, "events") + + with open(events_file, "a") as f: + for event in events: + print(json.dumps(event.get_pdu_json()), file=f) + + def write_state(self, room_id, event_id, state): + room_directory = os.path.join(self.base_directory, "rooms", room_id) + state_directory = os.path.join(room_directory, "state") + os.makedirs(state_directory, exist_ok=True) + + event_file = os.path.join(state_directory, event_id) + + with open(event_file, "a") as f: + for event in state.values(): + print(json.dumps(event.get_pdu_json()), file=f) + + def write_invite(self, room_id, event, state): + self.write_events(room_id, [event]) + + # We write the invite state somewhere else as they aren't full events + # and are only a subset of the state at the event. + room_directory = os.path.join(self.base_directory, "rooms", room_id) + os.makedirs(room_directory, exist_ok=True) + + invite_state = os.path.join(room_directory, "invite_state") + + with open(invite_state, "a") as f: + for event in state.values(): + print(json.dumps(event), file=f) + + def finished(self): + return self.base_directory + + def start(config_options): parser = argparse.ArgumentParser(description="Synapse Admin Command") HomeServerConfig.add_arguments_to_parser(parser) From eca4f5ac730d9ecf29d25888a40a7264b6041a54 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2019 14:17:28 +0100 Subject: [PATCH 59/80] s/exfiltrate_user_data/export_user_data/ --- synapse/app/admin_cmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 13281d0af9f1..3ff19f18e57b 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -105,7 +105,7 @@ def export_data_command(hs, args): user_id = args.user_id directory = args.output_directory - res = yield hs.get_handlers().admin_handler.exfiltrate_user_data( + res = yield hs.get_handlers().admin_handler.export_user_data( user_id, FileExfiltrationWriter(user_id, directory=directory) ) print(res) From 03cc8c4b5d187129904dd76a525f111424c31de0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Jul 2019 14:25:05 +0100 Subject: [PATCH 60/80] Fix invoking add_argument from homeserver.py --- synapse/config/_base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 8c3acff03e6d..ba1025f86ecd 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -468,7 +468,7 @@ def load_or_generate_config(cls, description, argv): formatter_class=argparse.RawDescriptionHelpFormatter, ) - obj.invoke_all("add_arguments", parser) + obj.invoke_all_static("add_arguments", parser) args = parser.parse_args(remaining_args) config_dict = read_config_files(config_files) From d0d479c1af78af2204a7ed5aee1e187225a5af90 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Jul 2019 09:52:56 +0100 Subject: [PATCH 61/80] Fix typo in synapse/app/admin_cmd.py Co-Authored-By: Aaron Raimist --- synapse/app/admin_cmd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 3ff19f18e57b..68802ca75181 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -196,7 +196,7 @@ def start(config_options): action="store", metavar="DIRECTORY", required=False, - help="The directory to store the exported data in. Must be emtpy. Defaults" + help="The directory to store the exported data in. Must be empty. Defaults" " to creating a temp directory.", ) export_data_parser.set_defaults(func=export_data_command) From f44354e17f11c2a5949861052db1f49d8b67233b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Jul 2019 11:39:13 +0100 Subject: [PATCH 62/80] Clean up arg name and remove lying comment --- synapse/config/_base.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/synapse/config/_base.py b/synapse/config/_base.py index ba1025f86ecd..6ce5cd07fb90 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -266,12 +266,10 @@ def add_arguments_to_parser(cls, config_parser): " Defaults to the directory containing the last config file", ) - # We can only invoke `add_arguments` on an actual object, but - # `add_arguments` should be side effect free so this is probably fine. cls.invoke_all_static("add_arguments", config_parser) @classmethod - def load_config_with_parser(cls, config_parser, argv): + def load_config_with_parser(cls, parser, argv): """Parse the commandline and config files with the given parser Doesn't support config-file-generation: used by the worker apps. @@ -279,23 +277,23 @@ def load_config_with_parser(cls, config_parser, argv): Used for workers where we want to add extra flags/subcommands. Args: - config_parser (ArgumentParser) + parser (ArgumentParser) argv (list[str]) Returns: tuple[HomeServerConfig, argparse.Namespace]: Returns the parsed config object and the parsed argparse.Namespace object from - `config_parser.parse_args(..)` + `parser.parse_args(..)` """ obj = cls() - config_args = config_parser.parse_args(argv) + config_args = parser.parse_args(argv) config_files = find_config_files(search_paths=config_args.config_path) if not config_files: - config_parser.error("Must supply a config file.") + parser.error("Must supply a config file.") if config_args.keys_directory: config_dir_path = config_args.keys_directory From 5ed7853bb018b58e99c6dd3ca91905b877968494 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Jul 2019 11:45:57 +0100 Subject: [PATCH 63/80] Remove pointless description --- synapse/app/admin_cmd.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 68802ca75181..1fd52a552693 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -181,7 +181,6 @@ def start(config_options): subparser = parser.add_subparsers( title="Admin Commands", - description="Choose an admin command to perform.", required=True, dest="command", metavar="", From 65c5592b8ed11e5ed4c3d6e29aa530d089c463ac Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 17 Jul 2019 16:49:19 +0100 Subject: [PATCH 64/80] Refactor `get_events_as_list` (#5699) A couple of changes here: * get rid of a redundant `allow_rejected` condition - we should already have filtered out any rejected events before we get to that point in the code, and the redundancy is confusing. Instead, let's stick in an assertion just to make double-sure we aren't leaking rejected events by mistake. * factor out a `_get_events_from_cache_or_db` method, which is going to be important for a forthcoming fix to redactions. --- changelog.d/5699.bugfix | 1 + synapse/storage/events_worker.py | 122 +++++++++++++++++++------------ 2 files changed, 75 insertions(+), 48 deletions(-) create mode 100644 changelog.d/5699.bugfix diff --git a/changelog.d/5699.bugfix b/changelog.d/5699.bugfix new file mode 100644 index 000000000000..30d5e67f6774 --- /dev/null +++ b/changelog.d/5699.bugfix @@ -0,0 +1 @@ +Fix some problems with authenticating redactions in recent room versions. \ No newline at end of file diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 874d0a56bcbb..6d6bb1d5d398 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -218,37 +218,23 @@ def get_events_as_list( if not event_ids: defer.returnValue([]) - event_id_list = event_ids - event_ids = set(event_ids) - - event_entry_map = self._get_events_from_cache( - event_ids, allow_rejected=allow_rejected + # there may be duplicates so we cast the list to a set + event_entry_map = yield self._get_events_from_cache_or_db( + set(event_ids), allow_rejected=allow_rejected ) - missing_events_ids = [e for e in event_ids if e not in event_entry_map] - - if missing_events_ids: - log_ctx = LoggingContext.current_context() - log_ctx.record_event_fetch(len(missing_events_ids)) - - # Note that _enqueue_events is also responsible for turning db rows - # into FrozenEvents (via _get_event_from_row), which involves seeing if - # the events have been redacted, and if so pulling the redaction event out - # of the database to check it. - # - # _enqueue_events is a bit of a rubbish name but naming is hard. - missing_events = yield self._enqueue_events( - missing_events_ids, allow_rejected=allow_rejected - ) - - event_entry_map.update(missing_events) - events = [] - for event_id in event_id_list: + for event_id in event_ids: entry = event_entry_map.get(event_id, None) if not entry: continue + if not allow_rejected: + assert not entry.event.rejected_reason, ( + "rejected event returned from _get_events_from_cache_or_db despite " + "allow_rejected=False" + ) + # Starting in room version v3, some redactions need to be rechecked if we # didn't have the redacted event at the time, so we recheck on read # instead. @@ -291,34 +277,74 @@ def get_events_as_list( # recheck. entry.event.internal_metadata.recheck_redaction = False else: - # We don't have the event that is being redacted, so we - # assume that the event isn't authorized for now. (If we - # later receive the event, then we will always redact - # it anyway, since we have this redaction) + # We either don't have the event that is being redacted (so we + # assume that the event isn't authorised for now), or the + # senders don't match (so it will never be authorised). Either + # way, we shouldn't return it. + # + # (If we later receive the event, then we will redact it anyway, + # since we have this redaction) continue - if allow_rejected or not entry.event.rejected_reason: - if check_redacted and entry.redacted_event: - event = entry.redacted_event - else: - event = entry.event - - events.append(event) - - if get_prev_content: - if "replaces_state" in event.unsigned: - prev = yield self.get_event( - event.unsigned["replaces_state"], - get_prev_content=False, - allow_none=True, - ) - if prev: - event.unsigned = dict(event.unsigned) - event.unsigned["prev_content"] = prev.content - event.unsigned["prev_sender"] = prev.sender + if check_redacted and entry.redacted_event: + event = entry.redacted_event + else: + event = entry.event + + events.append(event) + + if get_prev_content: + if "replaces_state" in event.unsigned: + prev = yield self.get_event( + event.unsigned["replaces_state"], + get_prev_content=False, + allow_none=True, + ) + if prev: + event.unsigned = dict(event.unsigned) + event.unsigned["prev_content"] = prev.content + event.unsigned["prev_sender"] = prev.sender defer.returnValue(events) + @defer.inlineCallbacks + def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False): + """Fetch a bunch of events from the cache or the database. + + If events are pulled from the database, they will be cached for future lookups. + + Args: + event_ids (Iterable[str]): The event_ids of the events to fetch + allow_rejected (bool): Whether to include rejected events + + Returns: + Deferred[Dict[str, _EventCacheEntry]]: + map from event id to result + """ + event_entry_map = self._get_events_from_cache( + event_ids, allow_rejected=allow_rejected + ) + + missing_events_ids = [e for e in event_ids if e not in event_entry_map] + + if missing_events_ids: + log_ctx = LoggingContext.current_context() + log_ctx.record_event_fetch(len(missing_events_ids)) + + # Note that _enqueue_events is also responsible for turning db rows + # into FrozenEvents (via _get_event_from_row), which involves seeing if + # the events have been redacted, and if so pulling the redaction event out + # of the database to check it. + # + # _enqueue_events is a bit of a rubbish name but naming is hard. + missing_events = yield self._enqueue_events( + missing_events_ids, allow_rejected=allow_rejected + ) + + event_entry_map.update(missing_events) + + return event_entry_map + def _invalidate_get_event_cache(self, event_id): self._get_event_cache.invalidate((event_id,)) @@ -326,7 +352,7 @@ def _get_events_from_cache(self, events, allow_rejected, update_metrics=True): """Fetch events from the caches Args: - events (list(str)): list of event_ids to fetch + events (Iterable[str]): list of event_ids to fetch allow_rejected (bool): Whether to return events that were rejected update_metrics (bool): Whether to update the cache hit ratio metrics From 375162b3c36482b006d28ab8bf719617945c4b1e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 17 Jul 2019 16:52:02 +0100 Subject: [PATCH 65/80] Fix redaction authentication (#5700) Ensures that redactions are correctly authenticated for recent room versions. There are a few things going on here: * `_fetch_event_rows` is updated to return a dict rather than a list of rows. * Rather than returning multiple copies of an event which was redacted multiple times, it returns the redactions as a list within the dict. * It also returns the actual rejection reason, rather than merely the fact that it was rejected, so that we don't have to query the table again in `_get_event_from_row`. * The redaction handling is factored out of `_get_event_from_row`, and now checks if any of the redactions are valid. --- changelog.d/5700.bugfix | 2 + synapse/storage/events_worker.py | 214 +++++++++++++++++++------------ 2 files changed, 131 insertions(+), 85 deletions(-) create mode 100644 changelog.d/5700.bugfix diff --git a/changelog.d/5700.bugfix b/changelog.d/5700.bugfix new file mode 100644 index 000000000000..51bce8d441e1 --- /dev/null +++ b/changelog.d/5700.bugfix @@ -0,0 +1,2 @@ +Fix some problems with authenticating redactions in recent room versions. + diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 6d6bb1d5d398..e2fc7bc047a1 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -37,6 +37,7 @@ ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import get_domain_from_id +from synapse.util import batch_iter from synapse.util.metrics import Measure from ._base import SQLBaseStore @@ -240,27 +241,8 @@ def get_events_as_list( # instead. if not allow_rejected and entry.event.type == EventTypes.Redaction: if entry.event.internal_metadata.need_to_check_redaction(): - # XXX: we need to avoid calling get_event here. - # - # The problem is that we end up at this point when an event - # which has been redacted is pulled out of the database by - # _enqueue_events, because _enqueue_events needs to check - # the redaction before it can cache the redacted event. So - # obviously, calling get_event to get the redacted event out - # of the database gives us an infinite loop. - # - # For now (quick hack to fix during 0.99 release cycle), we - # just go and fetch the relevant row from the db, but it - # would be nice to think about how we can cache this rather - # than hit the db every time we access a redaction event. - # - # One thought on how to do this: - # 1. split get_events_as_list up so that it is divided into - # (a) get the rawish event from the db/cache, (b) do the - # redaction/rejection filtering - # 2. have _get_event_from_row just call the first half of - # that - + # XXX: we should probably use _get_events_from_cache_or_db here, + # so that we can benefit from caching. orig_sender = yield self._simple_select_one_onecol( table="events", keyvalues={"event_id": entry.event.redacts}, @@ -410,19 +392,16 @@ def _fetch_event_list(self, conn, event_list): The fetch requests. Each entry consists of a list of event ids to be fetched, and a deferred to be completed once the events have been fetched. - """ with Measure(self._clock, "_fetch_event_list"): try: event_id_lists = list(zip(*event_list))[0] event_ids = [item for sublist in event_id_lists for item in sublist] - rows = self._new_transaction( + row_dict = self._new_transaction( conn, "do_fetch", [], [], self._fetch_event_rows, event_ids ) - row_dict = {r["event_id"]: r for r in rows} - # We only want to resolve deferreds from the main thread def fire(lst, res): for ids, d in lst: @@ -480,7 +459,7 @@ def _enqueue_events(self, events, allow_rejected=False): logger.debug("Loaded %d events (%d rows)", len(events), len(rows)) if not allow_rejected: - rows[:] = [r for r in rows if not r["rejects"]] + rows[:] = [r for r in rows if r["rejected_reason"] is None] res = yield make_deferred_yieldable( defer.gatherResults( @@ -489,8 +468,8 @@ def _enqueue_events(self, events, allow_rejected=False): self._get_event_from_row, row["internal_metadata"], row["json"], - row["redacts"], - rejected_reason=row["rejects"], + row["redactions"], + rejected_reason=row["rejected_reason"], format_version=row["format_version"], ) for row in rows @@ -501,49 +480,98 @@ def _enqueue_events(self, events, allow_rejected=False): defer.returnValue({e.event.event_id: e for e in res if e}) - def _fetch_event_rows(self, txn, events): - rows = [] - N = 200 - for i in range(1 + len(events) // N): - evs = events[i * N : (i + 1) * N] - if not evs: - break + def _fetch_event_rows(self, txn, event_ids): + """Fetch event rows from the database + + Events which are not found are omitted from the result. + + The returned per-event dicts contain the following keys: + + * event_id (str) + + * json (str): json-encoded event structure + + * internal_metadata (str): json-encoded internal metadata dict + + * format_version (int|None): The format of the event. Hopefully one + of EventFormatVersions. 'None' means the event predates + EventFormatVersions (so the event is format V1). + + * rejected_reason (str|None): if the event was rejected, the reason + why. + + * redactions (List[str]): a list of event-ids which (claim to) redact + this event. + + Args: + txn (twisted.enterprise.adbapi.Connection): + event_ids (Iterable[str]): event IDs to fetch + Returns: + Dict[str, Dict]: a map from event id to event info. + """ + event_dict = {} + for evs in batch_iter(event_ids, 200): sql = ( "SELECT " - " e.event_id as event_id, " + " e.event_id, " " e.internal_metadata," " e.json," " e.format_version, " - " r.redacts as redacts," - " rej.event_id as rejects " + " rej.reason " " FROM event_json as e" " LEFT JOIN rejections as rej USING (event_id)" - " LEFT JOIN redactions as r ON e.event_id = r.redacts" " WHERE e.event_id IN (%s)" ) % (",".join(["?"] * len(evs)),) txn.execute(sql, evs) - rows.extend(self.cursor_to_dict(txn)) - return rows + for row in txn: + event_id = row[0] + event_dict[event_id] = { + "event_id": event_id, + "internal_metadata": row[1], + "json": row[2], + "format_version": row[3], + "rejected_reason": row[4], + "redactions": [], + } + + # check for redactions + redactions_sql = ( + "SELECT event_id, redacts FROM redactions WHERE redacts IN (%s)" + ) % (",".join(["?"] * len(evs)),) + + txn.execute(redactions_sql, evs) + + for (redacter, redacted) in txn: + d = event_dict.get(redacted) + if d: + d["redactions"].append(redacter) + + return event_dict @defer.inlineCallbacks def _get_event_from_row( - self, internal_metadata, js, redacted, format_version, rejected_reason=None + self, internal_metadata, js, redactions, format_version, rejected_reason=None ): + """Parse an event row which has been read from the database + + Args: + internal_metadata (str): json-encoded internal_metadata column + js (str): json-encoded event body from event_json + redactions (list[str]): a list of the events which claim to have redacted + this event, from the redactions table + format_version: (str): the 'format_version' column + rejected_reason (str|None): the reason this event was rejected, if any + + Returns: + _EventCacheEntry + """ with Measure(self._clock, "_get_event_from_row"): d = json.loads(js) internal_metadata = json.loads(internal_metadata) - if rejected_reason: - rejected_reason = yield self._simple_select_one_onecol( - table="rejections", - keyvalues={"event_id": rejected_reason}, - retcol="reason", - desc="_get_event_from_row_rejected_reason", - ) - if format_version is None: # This means that we stored the event before we had the concept # of a event format version, so it must be a V1 event. @@ -555,41 +583,7 @@ def _get_event_from_row( rejected_reason=rejected_reason, ) - redacted_event = None - if redacted: - redacted_event = prune_event(original_ev) - - redaction_id = yield self._simple_select_one_onecol( - table="redactions", - keyvalues={"redacts": redacted_event.event_id}, - retcol="event_id", - desc="_get_event_from_row_redactions", - ) - - redacted_event.unsigned["redacted_by"] = redaction_id - # Get the redaction event. - - because = yield self.get_event( - redaction_id, check_redacted=False, allow_none=True - ) - - if because: - # It's fine to do add the event directly, since get_pdu_json - # will serialise this field correctly - redacted_event.unsigned["redacted_because"] = because - - # Starting in room version v3, some redactions need to be - # rechecked if we didn't have the redacted event at the - # time, so we recheck on read instead. - if because.internal_metadata.need_to_check_redaction(): - expected_domain = get_domain_from_id(original_ev.sender) - if get_domain_from_id(because.sender) == expected_domain: - # This redaction event is allowed. Mark as not needing a - # recheck. - because.internal_metadata.recheck_redaction = False - else: - # Senders don't match, so the event isn't actually redacted - redacted_event = None + redacted_event = yield self._maybe_redact_event_row(original_ev, redactions) cache_entry = _EventCacheEntry( event=original_ev, redacted_event=redacted_event @@ -599,6 +593,56 @@ def _get_event_from_row( defer.returnValue(cache_entry) + @defer.inlineCallbacks + def _maybe_redact_event_row(self, original_ev, redactions): + """Given an event object and a list of possible redacting event ids, + determine whether to honour any of those redactions and if so return a redacted + event. + + Args: + original_ev (EventBase): + redactions (iterable[str]): list of event ids of potential redaction events + + Returns: + Deferred[EventBase|None]: if the event should be redacted, a pruned + event object. Otherwise, None. + """ + redaction_map = yield self._get_events_from_cache_or_db(redactions) + + for redaction_id in redactions: + redaction_entry = redaction_map.get(redaction_id) + if not redaction_entry: + # we don't have the redaction event, or the redaction event was not + # authorized. + continue + + redaction_event = redaction_entry.event + + # Starting in room version v3, some redactions need to be + # rechecked if we didn't have the redacted event at the + # time, so we recheck on read instead. + if redaction_event.internal_metadata.need_to_check_redaction(): + expected_domain = get_domain_from_id(original_ev.sender) + if get_domain_from_id(redaction_event.sender) == expected_domain: + # This redaction event is allowed. Mark as not needing a recheck. + redaction_event.internal_metadata.recheck_redaction = False + else: + # Senders don't match, so the event isn't actually redacted + continue + + # we found a good redaction event. Redact! + redacted_event = prune_event(original_ev) + redacted_event.unsigned["redacted_by"] = redaction_id + + # It's fine to add the event directly, since get_pdu_json + # will serialise this field correctly + redacted_event.unsigned["redacted_because"] = redaction_event + + return redacted_event + + # no valid redaction found for this event + return None + @defer.inlineCallbacks def have_events_in_timeline(self, event_ids): """Given a list of event ids, check if we have already processed and From 2091c91fdec0c90360992b4dbbd71048b940bcb0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 17 Jul 2019 17:34:13 +0100 Subject: [PATCH 66/80] More refactoring in `get_events_as_list` (#5707) We can now use `_get_events_from_cache_or_db` rather than going right back to the database, which means that (a) we can benefit from caching, and (b) it opens the way forward to more extensive checks on the original event. We now always require the original event to exist before we will serve up a redaction. --- changelog.d/5707.bugfix | 1 + synapse/storage/events_worker.py | 64 ++++++----- tests/rest/client/test_redactions.py | 159 +++++++++++++++++++++++++++ tests/unittest.py | 1 + 4 files changed, 198 insertions(+), 27 deletions(-) create mode 100644 changelog.d/5707.bugfix create mode 100644 tests/rest/client/test_redactions.py diff --git a/changelog.d/5707.bugfix b/changelog.d/5707.bugfix new file mode 100644 index 000000000000..aa3046c5e112 --- /dev/null +++ b/changelog.d/5707.bugfix @@ -0,0 +1 @@ +Fix some problems with authenticating redactions in recent room versions. diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index e2fc7bc047a1..1d969d70be4d 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -236,38 +236,48 @@ def get_events_as_list( "allow_rejected=False" ) - # Starting in room version v3, some redactions need to be rechecked if we - # didn't have the redacted event at the time, so we recheck on read - # instead. + # We may not have had the original event when we received a redaction, so + # we have to recheck auth now. + if not allow_rejected and entry.event.type == EventTypes.Redaction: - if entry.event.internal_metadata.need_to_check_redaction(): - # XXX: we should probably use _get_events_from_cache_or_db here, - # so that we can benefit from caching. - orig_sender = yield self._simple_select_one_onecol( - table="events", - keyvalues={"event_id": entry.event.redacts}, - retcol="sender", - allow_none=True, + redacted_event_id = entry.event.redacts + event_map = yield self._get_events_from_cache_or_db([redacted_event_id]) + original_event_entry = event_map.get(redacted_event_id) + if not original_event_entry: + # we don't have the redacted event (or it was rejected). + # + # We assume that the redaction isn't authorized for now; if the + # redacted event later turns up, the redaction will be re-checked, + # and if it is found valid, the original will get redacted before it + # is served to the client. + logger.debug( + "Withholding redaction event %s since we don't (yet) have the " + "original %s", + event_id, + redacted_event_id, ) + continue - expected_domain = get_domain_from_id(entry.event.sender) - if ( - orig_sender - and get_domain_from_id(orig_sender) == expected_domain - ): - # This redaction event is allowed. Mark as not needing a - # recheck. - entry.event.internal_metadata.recheck_redaction = False - else: - # We either don't have the event that is being redacted (so we - # assume that the event isn't authorised for now), or the - # senders don't match (so it will never be authorised). Either - # way, we shouldn't return it. - # - # (If we later receive the event, then we will redact it anyway, - # since we have this redaction) + original_event = original_event_entry.event + + if entry.event.internal_metadata.need_to_check_redaction(): + original_domain = get_domain_from_id(original_event.sender) + redaction_domain = get_domain_from_id(entry.event.sender) + if original_domain != redaction_domain: + # the senders don't match, so this is forbidden + logger.info( + "Withholding redaction %s whose sender domain %s doesn't " + "match that of redacted event %s %s", + event_id, + redaction_domain, + redacted_event_id, + original_domain, + ) continue + # Update the cache to save doing the checks again. + entry.event.internal_metadata.recheck_redaction = False + if check_redacted and entry.redacted_event: event = entry.redacted_event else: diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py new file mode 100644 index 000000000000..7d5df9585503 --- /dev/null +++ b/tests/rest/client/test_redactions.py @@ -0,0 +1,159 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import sync + +from tests.unittest import HomeserverTestCase + + +class RedactionsTestCase(HomeserverTestCase): + """Tests that various redaction events are handled correctly""" + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + # register a couple of users + self.mod_user_id = self.register_user("user1", "pass") + self.mod_access_token = self.login("user1", "pass") + self.other_user_id = self.register_user("otheruser", "pass") + self.other_access_token = self.login("otheruser", "pass") + + # Create a room + self.room_id = self.helper.create_room_as( + self.mod_user_id, tok=self.mod_access_token + ) + + # Invite the other user + self.helper.invite( + room=self.room_id, + src=self.mod_user_id, + tok=self.mod_access_token, + targ=self.other_user_id, + ) + # The other user joins + self.helper.join( + room=self.room_id, user=self.other_user_id, tok=self.other_access_token + ) + + def _redact_event(self, access_token, room_id, event_id, expect_code=200): + """Helper function to send a redaction event. + + Returns the json body. + """ + path = "/_matrix/client/r0/rooms/%s/redact/%s" % (room_id, event_id) + + request, channel = self.make_request( + "POST", path, content={}, access_token=access_token + ) + self.render(request) + self.assertEqual(int(channel.result["code"]), expect_code) + return channel.json_body + + def _sync_room_timeline(self, access_token, room_id): + request, channel = self.make_request( + "GET", "sync", access_token=self.mod_access_token + ) + self.render(request) + self.assertEqual(channel.result["code"], b"200") + room_sync = channel.json_body["rooms"]["join"][room_id] + return room_sync["timeline"]["events"] + + def test_redact_event_as_moderator(self): + # as a regular user, send a message to redact + b = self.helper.send(room_id=self.room_id, tok=self.other_access_token) + msg_id = b["event_id"] + + # as the moderator, send a redaction + b = self._redact_event(self.mod_access_token, self.room_id, msg_id) + redaction_id = b["event_id"] + + # now sync + timeline = self._sync_room_timeline(self.mod_access_token, self.room_id) + + # the last event should be the redaction + self.assertEqual(timeline[-1]["event_id"], redaction_id) + self.assertEqual(timeline[-1]["redacts"], msg_id) + + # and the penultimate should be the redacted original + self.assertEqual(timeline[-2]["event_id"], msg_id) + self.assertEqual(timeline[-2]["unsigned"]["redacted_by"], redaction_id) + self.assertEqual(timeline[-2]["content"], {}) + + def test_redact_event_as_normal(self): + # as a regular user, send a message to redact + b = self.helper.send(room_id=self.room_id, tok=self.other_access_token) + normal_msg_id = b["event_id"] + + # also send one as the admin + b = self.helper.send(room_id=self.room_id, tok=self.mod_access_token) + admin_msg_id = b["event_id"] + + # as a normal, try to redact the admin's event + self._redact_event( + self.other_access_token, self.room_id, admin_msg_id, expect_code=403 + ) + + # now try to redact our own event + b = self._redact_event(self.other_access_token, self.room_id, normal_msg_id) + redaction_id = b["event_id"] + + # now sync + timeline = self._sync_room_timeline(self.other_access_token, self.room_id) + + # the last event should be the redaction of the normal event + self.assertEqual(timeline[-1]["event_id"], redaction_id) + self.assertEqual(timeline[-1]["redacts"], normal_msg_id) + + # the penultimate should be the unredacted one from the admin + self.assertEqual(timeline[-2]["event_id"], admin_msg_id) + self.assertNotIn("redacted_by", timeline[-2]["unsigned"]) + self.assertTrue(timeline[-2]["content"]["body"], {}) + + # and the antepenultimate should be the redacted normal + self.assertEqual(timeline[-3]["event_id"], normal_msg_id) + self.assertEqual(timeline[-3]["unsigned"]["redacted_by"], redaction_id) + self.assertEqual(timeline[-3]["content"], {}) + + def test_redact_nonexistent_event(self): + # control case: an existing event + b = self.helper.send(room_id=self.room_id, tok=self.other_access_token) + msg_id = b["event_id"] + b = self._redact_event(self.other_access_token, self.room_id, msg_id) + redaction_id = b["event_id"] + + # room moderators can send redactions for non-existent events + self._redact_event(self.mod_access_token, self.room_id, "$zzz") + + # ... but normals cannot + self._redact_event( + self.other_access_token, self.room_id, "$zzz", expect_code=404 + ) + + # when we sync, we should see only the valid redaction + timeline = self._sync_room_timeline(self.other_access_token, self.room_id) + self.assertEqual(timeline[-1]["event_id"], redaction_id) + self.assertEqual(timeline[-1]["redacts"], msg_id) + + # and the penultimate should be the redacted original + self.assertEqual(timeline[-2]["event_id"], msg_id) + self.assertEqual(timeline[-2]["unsigned"]["redacted_by"], redaction_id) + self.assertEqual(timeline[-2]["content"], {}) diff --git a/tests/unittest.py b/tests/unittest.py index cabe787cb414..f5fae2131771 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -447,6 +447,7 @@ def register_user(self, username, password, admin=False): # Create the user request, channel = self.make_request("GET", "/_matrix/client/r0/admin/register") self.render(request) + self.assertEqual(channel.code, 200) nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) From 1def298119502a8aaf58ba431286e63a7f38e046 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 17 Jul 2019 17:47:07 +0100 Subject: [PATCH 67/80] Improve `Depends` specs in debian package. (#5675) This is basically a contrived way of adding a `Recommends` on `libpq5`, to fix #5653. The way this is supposed to happen in debhelper is to run `dh_shlibdeps`, which in turn runs `dpkg-shlibdeps`, which spits things out into `debian/.substvars` whence they can later be included by `control`. Previously, we had disabled `dh_shlibdeps`, mostly because `dpkg-shlibdeps` gets confused about PIL's interdependent objects, but that's not really the right thing to do and there is another way to work around that. Since we don't always use postgres, we don't necessarily want a hard Depends on libpq5, so I've actually ended up adding an explicit invocation of `dpkg-shlibdeps` for `psycopg2`. I've also updated the build-depends list for the package, which was missing a couple of entries. --- changelog.d/5675.doc | 1 + debian/changelog | 3 +++ debian/control | 7 +++++++ debian/rules | 14 ++++++++++++++ docker/Dockerfile-dhvirtualenv | 3 +++ docs/postgres.rst | 8 +++++--- 6 files changed, 33 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5675.doc diff --git a/changelog.d/5675.doc b/changelog.d/5675.doc new file mode 100644 index 000000000000..4cd4d0be1a5a --- /dev/null +++ b/changelog.d/5675.doc @@ -0,0 +1 @@ +Minor tweaks to postgres documentation. diff --git a/debian/changelog b/debian/changelog index b964cf90a2b8..8aba444f1d67 100644 --- a/debian/changelog +++ b/debian/changelog @@ -3,6 +3,9 @@ matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium [ Amber Brown ] * Update logging config defaults to match API changes in Synapse. + [ Richard van der Hoff ] + * Add Recommends and Depends for some libraries which you probably want. + -- Erik Johnston Thu, 04 Jul 2019 13:59:02 +0100 matrix-synapse-py3 (1.1.0) stable; urgency=medium diff --git a/debian/control b/debian/control index 4abfa020515f..9e679c9d428f 100644 --- a/debian/control +++ b/debian/control @@ -2,16 +2,20 @@ Source: matrix-synapse-py3 Section: contrib/python Priority: extra Maintainer: Synapse Packaging team +# keep this list in sync with the build dependencies in docker/Dockerfile-dhvirtualenv. Build-Depends: debhelper (>= 9), dh-systemd, dh-virtualenv (>= 1.1), + libsystemd-dev, + libpq-dev, lsb-release, python3-dev, python3, python3-setuptools, python3-pip, python3-venv, + libsqlite3-dev, tar, Standards-Version: 3.9.8 Homepage: https://github.com/matrix-org/synapse @@ -28,9 +32,12 @@ Depends: debconf, python3-distutils|libpython3-stdlib (<< 3.6), ${misc:Depends}, + ${shlibs:Depends}, ${synapse:pydepends}, # some of our scripts use perl, but none of them are important, # so we put perl:Depends in Suggests rather than Depends. +Recommends: + ${shlibs1:Recommends}, Suggests: sqlite3, ${perl:Depends}, diff --git a/debian/rules b/debian/rules index 05cbbdde0844..a4d2ce2ba4cd 100755 --- a/debian/rules +++ b/debian/rules @@ -3,15 +3,29 @@ # Build Debian package using https://github.com/spotify/dh-virtualenv # +# assume we only have one package +PACKAGE_NAME:=`dh_listpackages` + override_dh_systemd_enable: dh_systemd_enable --name=matrix-synapse override_dh_installinit: dh_installinit --name=matrix-synapse +# we don't really want to strip the symbols from our object files. override_dh_strip: override_dh_shlibdeps: + # make the postgres package's dependencies a recommendation + # rather than a hard dependency. + find debian/$(PACKAGE_NAME)/ -path '*/site-packages/psycopg2/*.so' | \ + xargs dpkg-shlibdeps -Tdebian/$(PACKAGE_NAME).substvars \ + -pshlibs1 -dRecommends + + # all the other dependencies can be normal 'Depends' requirements, + # except for PIL's, which is self-contained and which confuses + # dpkg-shlibdeps. + dh_shlibdeps -X site-packages/PIL/.libs -X site-packages/psycopg2 override_dh_virtualenv: ./debian/build_virtualenv diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index ceedbad68afd..0117ab8bcc07 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -43,6 +43,9 @@ RUN cd dh-virtualenv-1.1 && dpkg-buildpackage -us -uc -b FROM ${distro} # Install the build dependencies +# +# NB: keep this list in sync with the list of build-deps in debian/control +# TODO: it would be nice to do that automatically. RUN apt-get update -qq -o Acquire::Languages=none \ && env DEBIAN_FRONTEND=noninteractive apt-get install \ -yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \ diff --git a/docs/postgres.rst b/docs/postgres.rst index 0ae52ccbd814..e08a5116b98f 100644 --- a/docs/postgres.rst +++ b/docs/postgres.rst @@ -11,7 +11,9 @@ a postgres database. * If you are using the `matrix.org debian/ubuntu packages <../INSTALL.md#matrixorg-packages>`_, - the necessary libraries will already be installed. + the necessary python library will already be installed, but you will need to + ensure the low-level postgres library is installed, which you can do with + ``apt install libpq5``. * For other pre-built packages, please consult the documentation from the relevant package. @@ -34,7 +36,7 @@ Assuming your PostgreSQL database user is called ``postgres``, create a user su - postgres createuser --pwprompt synapse_user -Before you can authenticate with the ``synapse_user``, you must create a +Before you can authenticate with the ``synapse_user``, you must create a database that it can access. To create a database, first connect to the database with your database user:: @@ -53,7 +55,7 @@ and then run:: This would create an appropriate database named ``synapse`` owned by the ``synapse_user`` user (which must already have been created as above). -Note that the PostgreSQL database *must* have the correct encoding set (as +Note that the PostgreSQL database *must* have the correct encoding set (as shown above), otherwise it will not be able to store UTF8 strings. You may need to enable password authentication so ``synapse_user`` can connect From 9c70a02a9cddf36521c3d6ae6f72e3b46a5f4c2d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 17 Jul 2019 19:08:02 +0100 Subject: [PATCH 68/80] Ignore redactions of m.room.create events (#5701) --- changelog.d/5701.bugfix | 1 + synapse/api/auth.py | 15 ------------- synapse/handlers/message.py | 33 ++++++++++++++++++++-------- synapse/storage/events_worker.py | 12 ++++++++++ tests/rest/client/test_redactions.py | 20 +++++++++++++++++ 5 files changed, 57 insertions(+), 24 deletions(-) create mode 100644 changelog.d/5701.bugfix diff --git a/changelog.d/5701.bugfix b/changelog.d/5701.bugfix new file mode 100644 index 000000000000..fd2866e16abe --- /dev/null +++ b/changelog.d/5701.bugfix @@ -0,0 +1 @@ +Ignore redactions of m.room.create events. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index d9e943c39c83..7ce6540bddd3 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -606,21 +606,6 @@ def compute_auth_events(self, event, current_state_ids, for_verification=False): defer.returnValue(auth_ids) - def check_redaction(self, room_version, event, auth_events): - """Check whether the event sender is allowed to redact the target event. - - Returns: - True if the the sender is allowed to redact the target event if the - target event was created by them. - False if the sender is allowed to redact the target event with no - further checks. - - Raises: - AuthError if the event sender is definitely not allowed to redact - the target event. - """ - return event_auth.check_redaction(room_version, event, auth_events) - @defer.inlineCallbacks def check_can_change_room_list(self, room_id, user): """Check if the user is allowed to edit the room's entry in the diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index eaeda7a5cbb2..6d7a987f1333 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -23,6 +23,7 @@ from twisted.internet import defer from twisted.internet.defer import succeed +from synapse import event_auth from synapse.api.constants import EventTypes, Membership, RelationTypes from synapse.api.errors import ( AuthError, @@ -784,6 +785,20 @@ def is_inviter_member_event(e): event.signatures.update(returned_invite.signatures) if event.type == EventTypes.Redaction: + original_event = yield self.store.get_event( + event.redacts, + check_redacted=False, + get_prev_content=False, + allow_rejected=False, + allow_none=True, + check_room_id=event.room_id, + ) + + # we can make some additional checks now if we have the original event. + if original_event: + if original_event.type == EventTypes.Create: + raise AuthError(403, "Redacting create events is not permitted") + prev_state_ids = yield context.get_prev_state_ids(self.store) auth_events_ids = yield self.auth.compute_auth_events( event, prev_state_ids, for_verification=True @@ -791,18 +806,18 @@ def is_inviter_member_event(e): auth_events = yield self.store.get_events(auth_events_ids) auth_events = {(e.type, e.state_key): e for e in auth_events.values()} room_version = yield self.store.get_room_version(event.room_id) - if self.auth.check_redaction(room_version, event, auth_events=auth_events): - original_event = yield self.store.get_event( - event.redacts, - check_redacted=False, - get_prev_content=False, - allow_rejected=False, - allow_none=False, - ) + + if event_auth.check_redaction(room_version, event, auth_events=auth_events): + # this user doesn't have 'redact' rights, so we need to do some more + # checks on the original event. Let's start by checking the original + # event exists. + if not original_event: + raise NotFoundError("Could not find event %s" % (event.redacts,)) + if event.user_id != original_event.user_id: raise AuthError(403, "You don't have permission to redact events") - # We've already checked. + # all the checks are done. event.internal_metadata.recheck_redaction = False if event.type == EventTypes.Create: diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 1d969d70be4d..858fc755a121 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -259,6 +259,14 @@ def get_events_as_list( continue original_event = original_event_entry.event + if original_event.type == EventTypes.Create: + # we never serve redactions of Creates to clients. + logger.info( + "Withholding redaction %s of create event %s", + event_id, + redacted_event_id, + ) + continue if entry.event.internal_metadata.need_to_check_redaction(): original_domain = get_domain_from_id(original_event.sender) @@ -617,6 +625,10 @@ def _maybe_redact_event_row(self, original_ev, redactions): Deferred[EventBase|None]: if the event should be redacted, a pruned event object. Otherwise, None. """ + if original_ev.type == "m.room.create": + # we choose to ignore redactions of m.room.create events. + return None + redaction_map = yield self._get_events_from_cache_or_db(redactions) for redaction_id in redactions: diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 7d5df9585503..fe66e397c4fc 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -157,3 +157,23 @@ def test_redact_nonexistent_event(self): self.assertEqual(timeline[-2]["event_id"], msg_id) self.assertEqual(timeline[-2]["unsigned"]["redacted_by"], redaction_id) self.assertEqual(timeline[-2]["content"], {}) + + def test_redact_create_event(self): + # control case: an existing event + b = self.helper.send(room_id=self.room_id, tok=self.mod_access_token) + msg_id = b["event_id"] + self._redact_event(self.mod_access_token, self.room_id, msg_id) + + # sync the room, to get the id of the create event + timeline = self._sync_room_timeline(self.other_access_token, self.room_id) + create_event_id = timeline[0]["event_id"] + + # room moderators cannot send redactions for create events + self._redact_event( + self.mod_access_token, self.room_id, create_event_id, expect_code=403 + ) + + # and nor can normals + self._redact_event( + self.other_access_token, self.room_id, create_event_id, expect_code=403 + ) From fa8271c5ac034bdffe080dd6b0591bef7aeadd81 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 18 Jul 2019 11:46:47 +0100 Subject: [PATCH 69/80] Convert synapse.federation.transport.server to async (#5689) * Convert BaseFederationServlet._wrap to async Empirically, this fixes some lost stacktraces. It should be safe because the wrapped function is called from JsonResource._async_render, which is already async. * Convert the rest of synapse.federation.transport.server to async We may as well do the whole file while we're here. * changelog * flake8 --- changelog.d/5689.misc | 1 + synapse/federation/transport/server.py | 430 +++++++++++-------------- 2 files changed, 189 insertions(+), 242 deletions(-) create mode 100644 changelog.d/5689.misc diff --git a/changelog.d/5689.misc b/changelog.d/5689.misc new file mode 100644 index 000000000000..8aa3e3f6a220 --- /dev/null +++ b/changelog.d/5689.misc @@ -0,0 +1 @@ +Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. \ No newline at end of file diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index c45d458d946d..663264dec48d 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,8 +19,6 @@ import logging import re -from twisted.internet import defer - import synapse import synapse.logging.opentracing as opentracing from synapse.api.errors import Codes, FederationDeniedError, SynapseError @@ -103,8 +102,7 @@ def __init__(self, hs): self.federation_domain_whitelist = hs.config.federation_domain_whitelist # A method just so we can pass 'self' as the authenticator to the Servlets - @defer.inlineCallbacks - def authenticate_request(self, request, content): + async def authenticate_request(self, request, content): now = self._clock.time_msec() json_request = { "method": request.method.decode("ascii"), @@ -142,7 +140,7 @@ def authenticate_request(self, request, content): 401, "Missing Authorization headers", Codes.UNAUTHORIZED ) - yield self.keyring.verify_json_for_server( + await self.keyring.verify_json_for_server( origin, json_request, now, "Incoming request" ) @@ -151,17 +149,16 @@ def authenticate_request(self, request, content): # If we get a valid signed request from the other side, its probably # alive - retry_timings = yield self.store.get_destination_retry_timings(origin) + retry_timings = await self.store.get_destination_retry_timings(origin) if retry_timings and retry_timings["retry_last_ts"]: run_in_background(self._reset_retry_timings, origin) - defer.returnValue(origin) + return origin - @defer.inlineCallbacks - def _reset_retry_timings(self, origin): + async def _reset_retry_timings(self, origin): try: logger.info("Marking origin %r as up", origin) - yield self.store.set_destination_retry_timings(origin, 0, 0) + await self.store.set_destination_retry_timings(origin, 0, 0) except Exception: logger.exception("Error resetting retry timings on %s", origin) @@ -215,7 +212,8 @@ class BaseFederationServlet(object): match against the request path (excluding the /federation/v1 prefix). The servlet should also implement one or more of on_GET, on_POST, on_PUT, to match - the appropriate HTTP method. These methods have the signature: + the appropriate HTTP method. These methods must be *asynchronous* and have the + signature: on_(self, origin, content, query, **kwargs) @@ -235,7 +233,7 @@ class BaseFederationServlet(object): components as specified in the path match regexp. Returns: - Deferred[(int, object)|None]: either (response code, response object) to + Optional[Tuple[int, object]]: either (response code, response object) to return a JSON response, or None if the request has already been handled. Raises: @@ -258,10 +256,9 @@ def _wrap(self, func): authenticator = self.authenticator ratelimiter = self.ratelimiter - @defer.inlineCallbacks @functools.wraps(func) - def new_func(request, *args, **kwargs): - """ A callback which can be passed to HttpServer.RegisterPaths + async def new_func(request, *args, **kwargs): + """A callback which can be passed to HttpServer.RegisterPaths Args: request (twisted.web.http.Request): @@ -270,8 +267,8 @@ def new_func(request, *args, **kwargs): components as specified in the path match regexp. Returns: - Deferred[(int, object)|None]: (response code, response object) as returned - by the callback method. None if the request has already been handled. + Tuple[int, object]|None: (response code, response object) as returned by + the callback method. None if the request has already been handled. """ content = None if request.method in [b"PUT", b"POST"]: @@ -279,7 +276,7 @@ def new_func(request, *args, **kwargs): content = parse_json_object_from_request(request) try: - origin = yield authenticator.authenticate_request(request, content) + origin = await authenticator.authenticate_request(request, content) except NoAuthenticationError: origin = None if self.REQUIRE_AUTH: @@ -304,16 +301,16 @@ def new_func(request, *args, **kwargs): ): if origin: with ratelimiter.ratelimit(origin) as d: - yield d - response = yield func( + await d + response = await func( origin, content, request.args, *args, **kwargs ) else: - response = yield func( + response = await func( origin, content, request.args, *args, **kwargs ) - defer.returnValue(response) + return response # Extra logic that functools.wraps() doesn't finish new_func.__self__ = func.__self__ @@ -341,8 +338,7 @@ def __init__(self, handler, server_name, **kwargs): self.server_name = server_name # This is when someone is trying to send us a bunch of data. - @defer.inlineCallbacks - def on_PUT(self, origin, content, query, transaction_id): + async def on_PUT(self, origin, content, query, transaction_id): """ Called on PUT /send// Args: @@ -351,7 +347,7 @@ def on_PUT(self, origin, content, query, transaction_id): request. This is *not* None. Returns: - Deferred: Results in a tuple of `(code, response)`, where + Tuple of `(code, response)`, where `response` is a python dict to be converted into JSON that is used as the response body. """ @@ -380,34 +376,33 @@ def on_PUT(self, origin, content, query, transaction_id): except Exception as e: logger.exception(e) - defer.returnValue((400, {"error": "Invalid transaction"})) - return + return 400, {"error": "Invalid transaction"} try: - code, response = yield self.handler.on_incoming_transaction( + code, response = await self.handler.on_incoming_transaction( origin, transaction_data ) except Exception: logger.exception("on_incoming_transaction failed") raise - defer.returnValue((code, response)) + return code, response class FederationEventServlet(BaseFederationServlet): PATH = "/event/(?P[^/]*)/?" # This is when someone asks for a data item for a given server data_id pair. - def on_GET(self, origin, content, query, event_id): - return self.handler.on_pdu_request(origin, event_id) + async def on_GET(self, origin, content, query, event_id): + return await self.handler.on_pdu_request(origin, event_id) class FederationStateServlet(BaseFederationServlet): PATH = "/state/(?P[^/]*)/?" # This is when someone asks for all data for a given context. - def on_GET(self, origin, content, query, context): - return self.handler.on_context_state_request( + async def on_GET(self, origin, content, query, context): + return await self.handler.on_context_state_request( origin, context, parse_string_from_args(query, "event_id", None, required=True), @@ -417,8 +412,8 @@ def on_GET(self, origin, content, query, context): class FederationStateIdsServlet(BaseFederationServlet): PATH = "/state_ids/(?P[^/]*)/?" - def on_GET(self, origin, content, query, room_id): - return self.handler.on_state_ids_request( + async def on_GET(self, origin, content, query, room_id): + return await self.handler.on_state_ids_request( origin, room_id, parse_string_from_args(query, "event_id", None, required=True), @@ -428,22 +423,22 @@ def on_GET(self, origin, content, query, room_id): class FederationBackfillServlet(BaseFederationServlet): PATH = "/backfill/(?P[^/]*)/?" - def on_GET(self, origin, content, query, context): + async def on_GET(self, origin, content, query, context): versions = [x.decode("ascii") for x in query[b"v"]] limit = parse_integer_from_args(query, "limit", None) if not limit: - return defer.succeed((400, {"error": "Did not include limit param"})) + return 400, {"error": "Did not include limit param"} - return self.handler.on_backfill_request(origin, context, versions, limit) + return await self.handler.on_backfill_request(origin, context, versions, limit) class FederationQueryServlet(BaseFederationServlet): PATH = "/query/(?P[^/]*)" # This is when we receive a server-server Query - def on_GET(self, origin, content, query, query_type): - return self.handler.on_query_request( + async def on_GET(self, origin, content, query, query_type): + return await self.handler.on_query_request( query_type, {k.decode("utf8"): v[0].decode("utf-8") for k, v in query.items()}, ) @@ -452,8 +447,7 @@ def on_GET(self, origin, content, query, query_type): class FederationMakeJoinServlet(BaseFederationServlet): PATH = "/make_join/(?P[^/]*)/(?P[^/]*)" - @defer.inlineCallbacks - def on_GET(self, origin, _content, query, context, user_id): + async def on_GET(self, origin, _content, query, context, user_id): """ Args: origin (unicode): The authenticated server_name of the calling server @@ -466,8 +460,7 @@ def on_GET(self, origin, _content, query, context, user_id): components as specified in the path match regexp. Returns: - Deferred[(int, object)|None]: either (response code, response object) to - return a JSON response, or None if the request has already been handled. + Tuple[int, object]: (response code, response object) """ versions = query.get(b"ver") if versions is not None: @@ -475,64 +468,60 @@ def on_GET(self, origin, _content, query, context, user_id): else: supported_versions = ["1"] - content = yield self.handler.on_make_join_request( + content = await self.handler.on_make_join_request( origin, context, user_id, supported_versions=supported_versions ) - defer.returnValue((200, content)) + return 200, content class FederationMakeLeaveServlet(BaseFederationServlet): PATH = "/make_leave/(?P[^/]*)/(?P[^/]*)" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, context, user_id): - content = yield self.handler.on_make_leave_request(origin, context, user_id) - defer.returnValue((200, content)) + async def on_GET(self, origin, content, query, context, user_id): + content = await self.handler.on_make_leave_request(origin, context, user_id) + return 200, content class FederationSendLeaveServlet(BaseFederationServlet): PATH = "/send_leave/(?P[^/]*)/(?P[^/]*)" - @defer.inlineCallbacks - def on_PUT(self, origin, content, query, room_id, event_id): - content = yield self.handler.on_send_leave_request(origin, content, room_id) - defer.returnValue((200, content)) + async def on_PUT(self, origin, content, query, room_id, event_id): + content = await self.handler.on_send_leave_request(origin, content, room_id) + return 200, content class FederationEventAuthServlet(BaseFederationServlet): PATH = "/event_auth/(?P[^/]*)/(?P[^/]*)" - def on_GET(self, origin, content, query, context, event_id): - return self.handler.on_event_auth(origin, context, event_id) + async def on_GET(self, origin, content, query, context, event_id): + return await self.handler.on_event_auth(origin, context, event_id) class FederationSendJoinServlet(BaseFederationServlet): PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" - @defer.inlineCallbacks - def on_PUT(self, origin, content, query, context, event_id): + async def on_PUT(self, origin, content, query, context, event_id): # TODO(paul): assert that context/event_id parsed from path actually # match those given in content - content = yield self.handler.on_send_join_request(origin, content, context) - defer.returnValue((200, content)) + content = await self.handler.on_send_join_request(origin, content, context) + return 200, content class FederationV1InviteServlet(BaseFederationServlet): PATH = "/invite/(?P[^/]*)/(?P[^/]*)" - @defer.inlineCallbacks - def on_PUT(self, origin, content, query, context, event_id): + async def on_PUT(self, origin, content, query, context, event_id): # We don't get a room version, so we have to assume its EITHER v1 or # v2. This is "fine" as the only difference between V1 and V2 is the # state resolution algorithm, and we don't use that for processing # invites - content = yield self.handler.on_invite_request( + content = await self.handler.on_invite_request( origin, content, room_version=RoomVersions.V1.identifier ) # V1 federation API is defined to return a content of `[200, {...}]` # due to a historical bug. - defer.returnValue((200, (200, content))) + return 200, (200, content) class FederationV2InviteServlet(BaseFederationServlet): @@ -540,8 +529,7 @@ class FederationV2InviteServlet(BaseFederationServlet): PREFIX = FEDERATION_V2_PREFIX - @defer.inlineCallbacks - def on_PUT(self, origin, content, query, context, event_id): + async def on_PUT(self, origin, content, query, context, event_id): # TODO(paul): assert that context/event_id parsed from path actually # match those given in content @@ -554,69 +542,65 @@ def on_PUT(self, origin, content, query, context, event_id): event.setdefault("unsigned", {})["invite_room_state"] = invite_room_state - content = yield self.handler.on_invite_request( + content = await self.handler.on_invite_request( origin, event, room_version=room_version ) - defer.returnValue((200, content)) + return 200, content class FederationThirdPartyInviteExchangeServlet(BaseFederationServlet): PATH = "/exchange_third_party_invite/(?P[^/]*)" - @defer.inlineCallbacks - def on_PUT(self, origin, content, query, room_id): - content = yield self.handler.on_exchange_third_party_invite_request( + async def on_PUT(self, origin, content, query, room_id): + content = await self.handler.on_exchange_third_party_invite_request( origin, room_id, content ) - defer.returnValue((200, content)) + return 200, content class FederationClientKeysQueryServlet(BaseFederationServlet): PATH = "/user/keys/query" - def on_POST(self, origin, content, query): - return self.handler.on_query_client_keys(origin, content) + async def on_POST(self, origin, content, query): + return await self.handler.on_query_client_keys(origin, content) class FederationUserDevicesQueryServlet(BaseFederationServlet): PATH = "/user/devices/(?P[^/]*)" - def on_GET(self, origin, content, query, user_id): - return self.handler.on_query_user_devices(origin, user_id) + async def on_GET(self, origin, content, query, user_id): + return await self.handler.on_query_user_devices(origin, user_id) class FederationClientKeysClaimServlet(BaseFederationServlet): PATH = "/user/keys/claim" - @defer.inlineCallbacks - def on_POST(self, origin, content, query): - response = yield self.handler.on_claim_client_keys(origin, content) - defer.returnValue((200, response)) + async def on_POST(self, origin, content, query): + response = await self.handler.on_claim_client_keys(origin, content) + return 200, response class FederationQueryAuthServlet(BaseFederationServlet): PATH = "/query_auth/(?P[^/]*)/(?P[^/]*)" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, context, event_id): - new_content = yield self.handler.on_query_auth_request( + async def on_POST(self, origin, content, query, context, event_id): + new_content = await self.handler.on_query_auth_request( origin, content, context, event_id ) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGetMissingEventsServlet(BaseFederationServlet): # TODO(paul): Why does this path alone end with "/?" optional? PATH = "/get_missing_events/(?P[^/]*)/?" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, room_id): + async def on_POST(self, origin, content, query, room_id): limit = int(content.get("limit", 10)) earliest_events = content.get("earliest_events", []) latest_events = content.get("latest_events", []) - content = yield self.handler.on_get_missing_events( + content = await self.handler.on_get_missing_events( origin, room_id=room_id, earliest_events=earliest_events, @@ -624,7 +608,7 @@ def on_POST(self, origin, content, query, room_id): limit=limit, ) - defer.returnValue((200, content)) + return 200, content class On3pidBindServlet(BaseFederationServlet): @@ -632,8 +616,7 @@ class On3pidBindServlet(BaseFederationServlet): REQUIRE_AUTH = False - @defer.inlineCallbacks - def on_POST(self, origin, content, query): + async def on_POST(self, origin, content, query): if "invites" in content: last_exception = None for invite in content["invites"]: @@ -645,7 +628,7 @@ def on_POST(self, origin, content, query): ) logger.info(message) raise SynapseError(400, message) - yield self.handler.exchange_third_party_invite( + await self.handler.exchange_third_party_invite( invite["sender"], invite["mxid"], invite["room_id"], @@ -655,7 +638,7 @@ def on_POST(self, origin, content, query): last_exception = e if last_exception: raise last_exception - defer.returnValue((200, {})) + return 200, {} class OpenIdUserInfo(BaseFederationServlet): @@ -679,29 +662,26 @@ class OpenIdUserInfo(BaseFederationServlet): REQUIRE_AUTH = False - @defer.inlineCallbacks - def on_GET(self, origin, content, query): + async def on_GET(self, origin, content, query): token = query.get(b"access_token", [None])[0] if token is None: - defer.returnValue( - (401, {"errcode": "M_MISSING_TOKEN", "error": "Access Token required"}) + return ( + 401, + {"errcode": "M_MISSING_TOKEN", "error": "Access Token required"}, ) - return - user_id = yield self.handler.on_openid_userinfo(token.decode("ascii")) + user_id = await self.handler.on_openid_userinfo(token.decode("ascii")) if user_id is None: - defer.returnValue( - ( - 401, - { - "errcode": "M_UNKNOWN_TOKEN", - "error": "Access Token unknown or expired", - }, - ) + return ( + 401, + { + "errcode": "M_UNKNOWN_TOKEN", + "error": "Access Token unknown or expired", + }, ) - defer.returnValue((200, {"sub": user_id})) + return 200, {"sub": user_id} class PublicRoomList(BaseFederationServlet): @@ -743,8 +723,7 @@ def __init__(self, handler, authenticator, ratelimiter, server_name, allow_acces ) self.allow_access = allow_access - @defer.inlineCallbacks - def on_GET(self, origin, content, query): + async def on_GET(self, origin, content, query): if not self.allow_access: raise FederationDeniedError(origin) @@ -764,10 +743,10 @@ def on_GET(self, origin, content, query): else: network_tuple = ThirdPartyInstanceID(None, None) - data = yield self.handler.get_local_public_room_list( + data = await self.handler.get_local_public_room_list( limit, since_token, network_tuple=network_tuple, from_federation=True ) - defer.returnValue((200, data)) + return 200, data class FederationVersionServlet(BaseFederationServlet): @@ -775,12 +754,10 @@ class FederationVersionServlet(BaseFederationServlet): REQUIRE_AUTH = False - def on_GET(self, origin, content, query): - return defer.succeed( - ( - 200, - {"server": {"name": "Synapse", "version": get_version_string(synapse)}}, - ) + async def on_GET(self, origin, content, query): + return ( + 200, + {"server": {"name": "Synapse", "version": get_version_string(synapse)}}, ) @@ -790,41 +767,38 @@ class FederationGroupsProfileServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/profile" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id): + async def on_GET(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.get_group_profile(group_id, requester_user_id) + new_content = await self.handler.get_group_profile(group_id, requester_user_id) - defer.returnValue((200, new_content)) + return 200, new_content - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id): + async def on_POST(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.update_group_profile( + new_content = await self.handler.update_group_profile( group_id, requester_user_id, content ) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsSummaryServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/summary" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id): + async def on_GET(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.get_group_summary(group_id, requester_user_id) + new_content = await self.handler.get_group_summary(group_id, requester_user_id) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsRoomsServlet(BaseFederationServlet): @@ -833,15 +807,14 @@ class FederationGroupsRoomsServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/rooms" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id): + async def on_GET(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.get_rooms_in_group(group_id, requester_user_id) + new_content = await self.handler.get_rooms_in_group(group_id, requester_user_id) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsAddRoomsServlet(BaseFederationServlet): @@ -850,29 +823,27 @@ class FederationGroupsAddRoomsServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/room/(?P[^/]*)" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, room_id): + async def on_POST(self, origin, content, query, group_id, room_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.add_room_to_group( + new_content = await self.handler.add_room_to_group( group_id, requester_user_id, room_id, content ) - defer.returnValue((200, new_content)) + return 200, new_content - @defer.inlineCallbacks - def on_DELETE(self, origin, content, query, group_id, room_id): + async def on_DELETE(self, origin, content, query, group_id, room_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.remove_room_from_group( + new_content = await self.handler.remove_room_from_group( group_id, requester_user_id, room_id ) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet): @@ -884,17 +855,16 @@ class FederationGroupsAddRoomsConfigServlet(BaseFederationServlet): "/config/(?P[^/]*)" ) - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, room_id, config_key): + async def on_POST(self, origin, content, query, group_id, room_id, config_key): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - result = yield self.groups_handler.update_room_in_group( + result = await self.groups_handler.update_room_in_group( group_id, requester_user_id, room_id, config_key, content ) - defer.returnValue((200, result)) + return 200, result class FederationGroupsUsersServlet(BaseFederationServlet): @@ -903,15 +873,14 @@ class FederationGroupsUsersServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/users" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id): + async def on_GET(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.get_users_in_group(group_id, requester_user_id) + new_content = await self.handler.get_users_in_group(group_id, requester_user_id) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsInvitedUsersServlet(BaseFederationServlet): @@ -920,17 +889,16 @@ class FederationGroupsInvitedUsersServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/invited_users" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id): + async def on_GET(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.get_invited_users_in_group( + new_content = await self.handler.get_invited_users_in_group( group_id, requester_user_id ) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsInviteServlet(BaseFederationServlet): @@ -939,17 +907,16 @@ class FederationGroupsInviteServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/invite" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, user_id): + async def on_POST(self, origin, content, query, group_id, user_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.invite_to_group( + new_content = await self.handler.invite_to_group( group_id, user_id, requester_user_id, content ) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsAcceptInviteServlet(BaseFederationServlet): @@ -958,14 +925,13 @@ class FederationGroupsAcceptInviteServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/accept_invite" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, user_id): + async def on_POST(self, origin, content, query, group_id, user_id): if get_domain_from_id(user_id) != origin: raise SynapseError(403, "user_id doesn't match origin") - new_content = yield self.handler.accept_invite(group_id, user_id, content) + new_content = await self.handler.accept_invite(group_id, user_id, content) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsJoinServlet(BaseFederationServlet): @@ -974,14 +940,13 @@ class FederationGroupsJoinServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/join" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, user_id): + async def on_POST(self, origin, content, query, group_id, user_id): if get_domain_from_id(user_id) != origin: raise SynapseError(403, "user_id doesn't match origin") - new_content = yield self.handler.join_group(group_id, user_id, content) + new_content = await self.handler.join_group(group_id, user_id, content) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsRemoveUserServlet(BaseFederationServlet): @@ -990,17 +955,16 @@ class FederationGroupsRemoveUserServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/users/(?P[^/]*)/remove" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, user_id): + async def on_POST(self, origin, content, query, group_id, user_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.remove_user_from_group( + new_content = await self.handler.remove_user_from_group( group_id, user_id, requester_user_id, content ) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsLocalInviteServlet(BaseFederationServlet): @@ -1009,14 +973,13 @@ class FederationGroupsLocalInviteServlet(BaseFederationServlet): PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/invite" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, user_id): + async def on_POST(self, origin, content, query, group_id, user_id): if get_domain_from_id(group_id) != origin: raise SynapseError(403, "group_id doesn't match origin") - new_content = yield self.handler.on_invite(group_id, user_id, content) + new_content = await self.handler.on_invite(group_id, user_id, content) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet): @@ -1025,16 +988,15 @@ class FederationGroupsRemoveLocalUserServlet(BaseFederationServlet): PATH = "/groups/local/(?P[^/]*)/users/(?P[^/]*)/remove" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, user_id): + async def on_POST(self, origin, content, query, group_id, user_id): if get_domain_from_id(group_id) != origin: raise SynapseError(403, "user_id doesn't match origin") - new_content = yield self.handler.user_removed_from_group( + new_content = await self.handler.user_removed_from_group( group_id, user_id, content ) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): @@ -1043,15 +1005,14 @@ class FederationGroupsRenewAttestaionServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/renew_attestation/(?P[^/]*)" - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, user_id): + async def on_POST(self, origin, content, query, group_id, user_id): # We don't need to check auth here as we check the attestation signatures - new_content = yield self.handler.on_renew_attestation( + new_content = await self.handler.on_renew_attestation( group_id, user_id, content ) - defer.returnValue((200, new_content)) + return 200, new_content class FederationGroupsSummaryRoomsServlet(BaseFederationServlet): @@ -1068,8 +1029,7 @@ class FederationGroupsSummaryRoomsServlet(BaseFederationServlet): "/rooms/(?P[^/]*)" ) - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, category_id, room_id): + async def on_POST(self, origin, content, query, group_id, category_id, room_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1077,7 +1037,7 @@ def on_POST(self, origin, content, query, group_id, category_id, room_id): if category_id == "": raise SynapseError(400, "category_id cannot be empty string") - resp = yield self.handler.update_group_summary_room( + resp = await self.handler.update_group_summary_room( group_id, requester_user_id, room_id=room_id, @@ -1085,10 +1045,9 @@ def on_POST(self, origin, content, query, group_id, category_id, room_id): content=content, ) - defer.returnValue((200, resp)) + return 200, resp - @defer.inlineCallbacks - def on_DELETE(self, origin, content, query, group_id, category_id, room_id): + async def on_DELETE(self, origin, content, query, group_id, category_id, room_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1096,11 +1055,11 @@ def on_DELETE(self, origin, content, query, group_id, category_id, room_id): if category_id == "": raise SynapseError(400, "category_id cannot be empty string") - resp = yield self.handler.delete_group_summary_room( + resp = await self.handler.delete_group_summary_room( group_id, requester_user_id, room_id=room_id, category_id=category_id ) - defer.returnValue((200, resp)) + return 200, resp class FederationGroupsCategoriesServlet(BaseFederationServlet): @@ -1109,15 +1068,14 @@ class FederationGroupsCategoriesServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/categories/?" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id): + async def on_GET(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - resp = yield self.handler.get_group_categories(group_id, requester_user_id) + resp = await self.handler.get_group_categories(group_id, requester_user_id) - defer.returnValue((200, resp)) + return 200, resp class FederationGroupsCategoryServlet(BaseFederationServlet): @@ -1126,20 +1084,18 @@ class FederationGroupsCategoryServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/categories/(?P[^/]+)" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id, category_id): + async def on_GET(self, origin, content, query, group_id, category_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - resp = yield self.handler.get_group_category( + resp = await self.handler.get_group_category( group_id, requester_user_id, category_id ) - defer.returnValue((200, resp)) + return 200, resp - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, category_id): + async def on_POST(self, origin, content, query, group_id, category_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1147,14 +1103,13 @@ def on_POST(self, origin, content, query, group_id, category_id): if category_id == "": raise SynapseError(400, "category_id cannot be empty string") - resp = yield self.handler.upsert_group_category( + resp = await self.handler.upsert_group_category( group_id, requester_user_id, category_id, content ) - defer.returnValue((200, resp)) + return 200, resp - @defer.inlineCallbacks - def on_DELETE(self, origin, content, query, group_id, category_id): + async def on_DELETE(self, origin, content, query, group_id, category_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1162,11 +1117,11 @@ def on_DELETE(self, origin, content, query, group_id, category_id): if category_id == "": raise SynapseError(400, "category_id cannot be empty string") - resp = yield self.handler.delete_group_category( + resp = await self.handler.delete_group_category( group_id, requester_user_id, category_id ) - defer.returnValue((200, resp)) + return 200, resp class FederationGroupsRolesServlet(BaseFederationServlet): @@ -1175,15 +1130,14 @@ class FederationGroupsRolesServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/roles/?" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id): + async def on_GET(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - resp = yield self.handler.get_group_roles(group_id, requester_user_id) + resp = await self.handler.get_group_roles(group_id, requester_user_id) - defer.returnValue((200, resp)) + return 200, resp class FederationGroupsRoleServlet(BaseFederationServlet): @@ -1192,18 +1146,16 @@ class FederationGroupsRoleServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/roles/(?P[^/]+)" - @defer.inlineCallbacks - def on_GET(self, origin, content, query, group_id, role_id): + async def on_GET(self, origin, content, query, group_id, role_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - resp = yield self.handler.get_group_role(group_id, requester_user_id, role_id) + resp = await self.handler.get_group_role(group_id, requester_user_id, role_id) - defer.returnValue((200, resp)) + return 200, resp - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, role_id): + async def on_POST(self, origin, content, query, group_id, role_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1211,14 +1163,13 @@ def on_POST(self, origin, content, query, group_id, role_id): if role_id == "": raise SynapseError(400, "role_id cannot be empty string") - resp = yield self.handler.update_group_role( + resp = await self.handler.update_group_role( group_id, requester_user_id, role_id, content ) - defer.returnValue((200, resp)) + return 200, resp - @defer.inlineCallbacks - def on_DELETE(self, origin, content, query, group_id, role_id): + async def on_DELETE(self, origin, content, query, group_id, role_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1226,11 +1177,11 @@ def on_DELETE(self, origin, content, query, group_id, role_id): if role_id == "": raise SynapseError(400, "role_id cannot be empty string") - resp = yield self.handler.delete_group_role( + resp = await self.handler.delete_group_role( group_id, requester_user_id, role_id ) - defer.returnValue((200, resp)) + return 200, resp class FederationGroupsSummaryUsersServlet(BaseFederationServlet): @@ -1247,8 +1198,7 @@ class FederationGroupsSummaryUsersServlet(BaseFederationServlet): "/users/(?P[^/]*)" ) - @defer.inlineCallbacks - def on_POST(self, origin, content, query, group_id, role_id, user_id): + async def on_POST(self, origin, content, query, group_id, role_id, user_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1256,7 +1206,7 @@ def on_POST(self, origin, content, query, group_id, role_id, user_id): if role_id == "": raise SynapseError(400, "role_id cannot be empty string") - resp = yield self.handler.update_group_summary_user( + resp = await self.handler.update_group_summary_user( group_id, requester_user_id, user_id=user_id, @@ -1264,10 +1214,9 @@ def on_POST(self, origin, content, query, group_id, role_id, user_id): content=content, ) - defer.returnValue((200, resp)) + return 200, resp - @defer.inlineCallbacks - def on_DELETE(self, origin, content, query, group_id, role_id, user_id): + async def on_DELETE(self, origin, content, query, group_id, role_id, user_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") @@ -1275,11 +1224,11 @@ def on_DELETE(self, origin, content, query, group_id, role_id, user_id): if role_id == "": raise SynapseError(400, "role_id cannot be empty string") - resp = yield self.handler.delete_group_summary_user( + resp = await self.handler.delete_group_summary_user( group_id, requester_user_id, user_id=user_id, role_id=role_id ) - defer.returnValue((200, resp)) + return 200, resp class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): @@ -1288,13 +1237,12 @@ class FederationGroupsBulkPublicisedServlet(BaseFederationServlet): PATH = "/get_groups_publicised" - @defer.inlineCallbacks - def on_POST(self, origin, content, query): - resp = yield self.handler.bulk_get_publicised_groups( + async def on_POST(self, origin, content, query): + resp = await self.handler.bulk_get_publicised_groups( content["user_ids"], proxy=False ) - defer.returnValue((200, resp)) + return 200, resp class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): @@ -1303,17 +1251,16 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): PATH = "/groups/(?P[^/]*)/settings/m.join_policy" - @defer.inlineCallbacks - def on_PUT(self, origin, content, query, group_id): + async def on_PUT(self, origin, content, query, group_id): requester_user_id = parse_string_from_args(query, "requester_user_id") if get_domain_from_id(requester_user_id) != origin: raise SynapseError(403, "requester_user_id doesn't match origin") - new_content = yield self.handler.set_group_join_policy( + new_content = await self.handler.set_group_join_policy( group_id, requester_user_id, content ) - defer.returnValue((200, new_content)) + return 200, new_content class RoomComplexityServlet(BaseFederationServlet): @@ -1325,18 +1272,17 @@ class RoomComplexityServlet(BaseFederationServlet): PATH = "/rooms/(?P[^/]*)/complexity" PREFIX = FEDERATION_UNSTABLE_PREFIX - @defer.inlineCallbacks - def on_GET(self, origin, content, query, room_id): + async def on_GET(self, origin, content, query, room_id): store = self.handler.hs.get_datastore() - is_public = yield store.is_room_world_readable_or_publicly_joinable(room_id) + is_public = await store.is_room_world_readable_or_publicly_joinable(room_id) if not is_public: raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM) - complexity = yield store.get_room_complexity(room_id) - defer.returnValue((200, complexity)) + complexity = await store.get_room_complexity(room_id) + return 200, complexity FEDERATION_SERVLET_CLASSES = ( From b2a382efdb0cf7b68e194070b616f10732fa0f36 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 18 Jul 2019 14:41:42 +0100 Subject: [PATCH 70/80] Remove the ability to query relations when the original event was redacted. (#5629) Fixes #5594 Forbid viewing relations on an event once it has been redacted. --- changelog.d/5629.bugfix | 1 + synapse/events/__init__.py | 11 ++ synapse/events/utils.py | 16 ++- synapse/rest/client/v2_alpha/relations.py | 75 +++++++----- tests/rest/client/v2_alpha/test_relations.py | 116 ++++++++++++++++++- 5 files changed, 180 insertions(+), 39 deletions(-) create mode 100644 changelog.d/5629.bugfix diff --git a/changelog.d/5629.bugfix b/changelog.d/5629.bugfix new file mode 100644 index 000000000000..672eabad4085 --- /dev/null +++ b/changelog.d/5629.bugfix @@ -0,0 +1 @@ +Forbid viewing relations on an event once it has been redacted. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index d3de70e671c9..88ed6d764f34 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -104,6 +104,17 @@ def should_proactively_send(self): """ return getattr(self, "proactively_send", True) + def is_redacted(self): + """Whether the event has been redacted. + + This is used for efficiently checking whether an event has been + marked as redacted without needing to make another database call. + + Returns: + bool + """ + return getattr(self, "redacted", False) + def _event_dict_property(key): # We want to be able to use hasattr with the event dict properties. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 987de5cab7a3..9487a886f580 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -52,10 +52,15 @@ def prune_event(event): from . import event_type_from_format_version - return event_type_from_format_version(event.format_version)( + pruned_event = event_type_from_format_version(event.format_version)( pruned_event_dict, event.internal_metadata.get_dict() ) + # Mark the event as redacted + pruned_event.internal_metadata.redacted = True + + return pruned_event + def prune_event_dict(event_dict): """Redacts the event_dict in the same way as `prune_event`, except it @@ -360,9 +365,12 @@ def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs): event_id = event.event_id serialized_event = serialize_event(event, time_now, **kwargs) - # If MSC1849 is enabled then we need to look if thre are any relations - # we need to bundle in with the event - if self.experimental_msc1849_support_enabled and bundle_aggregations: + # If MSC1849 is enabled then we need to look if there are any relations + # we need to bundle in with the event. + # Do not bundle relations if the event has been redacted + if not event.internal_metadata.is_redacted() and ( + self.experimental_msc1849_support_enabled and bundle_aggregations + ): annotations = yield self.store.get_aggregation_groups_for_event(event_id) references = yield self.store.get_relations_for_event( event_id, RelationTypes.REFERENCE, direction="f" diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 7ce485b47124..6e52f6d284f7 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -34,6 +34,7 @@ from synapse.rest.client.transactions import HttpTransactionCache from synapse.storage.relations import ( AggregationPaginationToken, + PaginationChunk, RelationPaginationToken, ) @@ -153,23 +154,28 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non from_token = parse_string(request, "from") to_token = parse_string(request, "to") - if from_token: - from_token = RelationPaginationToken.from_string(from_token) - - if to_token: - to_token = RelationPaginationToken.from_string(to_token) - - result = yield self.store.get_relations_for_event( - event_id=parent_id, - relation_type=relation_type, - event_type=event_type, - limit=limit, - from_token=from_token, - to_token=to_token, - ) + if event.internal_metadata.is_redacted(): + # If the event is redacted, return an empty list of relations + pagination_chunk = PaginationChunk(chunk=[]) + else: + # Return the relations + if from_token: + from_token = RelationPaginationToken.from_string(from_token) + + if to_token: + to_token = RelationPaginationToken.from_string(to_token) + + pagination_chunk = yield self.store.get_relations_for_event( + event_id=parent_id, + relation_type=relation_type, + event_type=event_type, + limit=limit, + from_token=from_token, + to_token=to_token, + ) events = yield self.store.get_events_as_list( - [c["event_id"] for c in result.chunk] + [c["event_id"] for c in pagination_chunk.chunk] ) now = self.clock.time_msec() @@ -186,7 +192,7 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non events, now, bundle_aggregations=False ) - return_value = result.to_dict() + return_value = pagination_chunk.to_dict() return_value["chunk"] = events return_value["original_event"] = original_event @@ -234,7 +240,7 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non # This checks that a) the event exists and b) the user is allowed to # view it. - yield self.event_handler.get_event(requester.user, room_id, parent_id) + event = yield self.event_handler.get_event(requester.user, room_id, parent_id) if relation_type not in (RelationTypes.ANNOTATION, None): raise SynapseError(400, "Relation type must be 'annotation'") @@ -243,21 +249,26 @@ def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=Non from_token = parse_string(request, "from") to_token = parse_string(request, "to") - if from_token: - from_token = AggregationPaginationToken.from_string(from_token) - - if to_token: - to_token = AggregationPaginationToken.from_string(to_token) - - res = yield self.store.get_aggregation_groups_for_event( - event_id=parent_id, - event_type=event_type, - limit=limit, - from_token=from_token, - to_token=to_token, - ) - - defer.returnValue((200, res.to_dict())) + if event.internal_metadata.is_redacted(): + # If the event is redacted, return an empty list of relations + pagination_chunk = PaginationChunk(chunk=[]) + else: + # Return the relations + if from_token: + from_token = AggregationPaginationToken.from_string(from_token) + + if to_token: + to_token = AggregationPaginationToken.from_string(to_token) + + pagination_chunk = yield self.store.get_aggregation_groups_for_event( + event_id=parent_id, + event_type=event_type, + limit=limit, + from_token=from_token, + to_token=to_token, + ) + + defer.returnValue((200, pagination_chunk.to_dict())) class RelationAggregationGroupPaginationServlet(RestServlet): diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 58c69518520d..c7e585997083 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -93,7 +93,7 @@ def test_deny_membership(self): def test_deny_double_react(self): """Test that we deny relations on membership events """ - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") self.assertEquals(200, channel.code, channel.json_body) channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") @@ -540,14 +540,122 @@ def test_multi_edit(self): {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict ) + def test_relations_redaction_redacts_edits(self): + """Test that edits of an event are redacted when the original event + is redacted. + """ + # Send a new event + res = self.helper.send(self.room, body="Heyo!", tok=self.user_token) + original_event_id = res["event_id"] + + # Add a relation + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + parent_id=original_event_id, + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": {"msgtype": "m.text", "body": "First edit"}, + }, + ) + self.assertEquals(200, channel.code, channel.json_body) + + # Check the relation is returned + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/relations/%s/m.replace/m.room.message" + % (self.room, original_event_id), + access_token=self.user_token, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertIn("chunk", channel.json_body) + self.assertEquals(len(channel.json_body["chunk"]), 1) + + # Redact the original event + request, channel = self.make_request( + "PUT", + "/rooms/%s/redact/%s/%s" + % (self.room, original_event_id, "test_relations_redaction_redacts_edits"), + access_token=self.user_token, + content="{}", + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + # Try to check for remaining m.replace relations + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/relations/%s/m.replace/m.room.message" + % (self.room, original_event_id), + access_token=self.user_token, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + # Check that no relations are returned + self.assertIn("chunk", channel.json_body) + self.assertEquals(channel.json_body["chunk"], []) + + def test_aggregations_redaction_prevents_access_to_aggregations(self): + """Test that annotations of an event are redacted when the original event + is redacted. + """ + # Send a new event + res = self.helper.send(self.room, body="Hello!", tok=self.user_token) + original_event_id = res["event_id"] + + # Add a relation + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", key="👍", parent_id=original_event_id + ) + self.assertEquals(200, channel.code, channel.json_body) + + # Redact the original + request, channel = self.make_request( + "PUT", + "/rooms/%s/redact/%s/%s" + % ( + self.room, + original_event_id, + "test_aggregations_redaction_prevents_access_to_aggregations", + ), + access_token=self.user_token, + content="{}", + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + # Check that aggregations returns zero + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/aggregations/%s/m.annotation/m.reaction" + % (self.room, original_event_id), + access_token=self.user_token, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertIn("chunk", channel.json_body) + self.assertEquals(channel.json_body["chunk"], []) + def _send_relation( - self, relation_type, event_type, key=None, content={}, access_token=None + self, + relation_type, + event_type, + key=None, + content={}, + access_token=None, + parent_id=None, ): """Helper function to send a relation pointing at `self.parent_id` Args: relation_type (str): One of `RelationTypes` event_type (str): The type of the event to create + parent_id (str): The event_id this relation relates to. If None, then self.parent_id key (str|None): The aggregation key used for m.annotation relation type. content(dict|None): The content of the created event. @@ -564,10 +672,12 @@ def _send_relation( if key: query = "?key=" + six.moves.urllib.parse.quote_plus(key.encode("utf-8")) + original_id = parent_id if parent_id else self.parent_id + request, channel = self.make_request( "POST", "/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s" - % (self.room, self.parent_id, relation_type, event_type, query), + % (self.room, original_id, relation_type, event_type, query), json.dumps(content).encode("utf-8"), access_token=access_token, ) From 7ad1d763566eb34bd32234811aa9901d8f3668aa Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 18 Jul 2019 23:57:15 +1000 Subject: [PATCH 71/80] Support Prometheus_client 0.4.0+ (#5636) --- UPGRADE.rst | 7 + changelog.d/5636.misc | 1 + docs/metrics-howto.rst | 102 +++++++++++ synapse/app/_base.py | 3 +- synapse/app/appservice.py | 3 +- synapse/app/client_reader.py | 3 +- synapse/app/event_creator.py | 3 +- synapse/app/federation_reader.py | 3 +- synapse/app/federation_sender.py | 3 +- synapse/app/frontend_proxy.py | 3 +- synapse/app/homeserver.py | 3 +- synapse/app/media_repository.py | 3 +- synapse/app/pusher.py | 3 +- synapse/app/synchrotron.py | 3 +- synapse/app/user_dir.py | 3 +- synapse/metrics/__init__.py | 17 ++ synapse/metrics/_exposition.py | 258 ++++++++++++++++++++++++++++ synapse/metrics/resource.py | 20 --- synapse/python_dependencies.py | 4 +- tests/storage/test_event_metrics.py | 4 +- 20 files changed, 399 insertions(+), 50 deletions(-) create mode 100644 changelog.d/5636.misc create mode 100644 synapse/metrics/_exposition.py delete mode 100644 synapse/metrics/resource.py diff --git a/UPGRADE.rst b/UPGRADE.rst index 72064accf3e2..cf228c7c529b 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -49,6 +49,13 @@ returned by the Client-Server API: # configured on port 443. curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" +Upgrading to v1.2.0 +=================== + +Some counter metrics have been renamed, with the old names deprecated. See +`the metrics documentation `_ +for details. + Upgrading to v1.1.0 =================== diff --git a/changelog.d/5636.misc b/changelog.d/5636.misc new file mode 100644 index 000000000000..3add9902832f --- /dev/null +++ b/changelog.d/5636.misc @@ -0,0 +1 @@ +Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. \ No newline at end of file diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 32b064e2da22..973641f3dca6 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -59,6 +59,108 @@ How to monitor Synapse metrics using Prometheus Restart Prometheus. +Renaming of metrics & deprecation of old names in 1.2 +----------------------------------------------------- + +Synapse 1.2 updates the Prometheus metrics to match the naming convention of the +upstream ``prometheus_client``. The old names are considered deprecated and will +be removed in a future version of Synapse. + ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| New Name | Old Name | ++=============================================================================+=======================================================================+ +| python_gc_objects_collected_total | python_gc_objects_collected | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| python_gc_objects_uncollectable_total | python_gc_objects_uncollectable | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| python_gc_collections_total | python_gc_collections | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| process_cpu_seconds_total | process_cpu_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_federation_client_sent_transactions_total | synapse_federation_client_sent_transactions | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_federation_client_events_processed_total | synapse_federation_client_events_processed | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_event_processing_loop_count_total | synapse_event_processing_loop_count | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_event_processing_loop_room_count_total | synapse_event_processing_loop_room_count | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_util_metrics_block_count_total | synapse_util_metrics_block_count | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_util_metrics_block_time_seconds_total | synapse_util_metrics_block_time_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_util_metrics_block_ru_utime_seconds_total | synapse_util_metrics_block_ru_utime_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_util_metrics_block_ru_stime_seconds_total | synapse_util_metrics_block_ru_stime_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_util_metrics_block_db_txn_count_total | synapse_util_metrics_block_db_txn_count | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_util_metrics_block_db_txn_duration_seconds_total | synapse_util_metrics_block_db_txn_duration_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_util_metrics_block_db_sched_duration_seconds_total | synapse_util_metrics_block_db_sched_duration_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_background_process_start_count_total | synapse_background_process_start_count | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_background_process_ru_utime_seconds_total | synapse_background_process_ru_utime_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_background_process_ru_stime_seconds_total | synapse_background_process_ru_stime_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_background_process_db_txn_count_total | synapse_background_process_db_txn_count | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_background_process_db_txn_duration_seconds_total | synapse_background_process_db_txn_duration_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_background_process_db_sched_duration_seconds_total | synapse_background_process_db_sched_duration_seconds | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_storage_events_persisted_events_total | synapse_storage_events_persisted_events | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_storage_events_persisted_events_sep_total | synapse_storage_events_persisted_events_sep | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_storage_events_state_delta_total | synapse_storage_events_state_delta | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_storage_events_state_delta_single_event_total | synapse_storage_events_state_delta_single_event | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_storage_events_state_delta_reuse_delta_total | synapse_storage_events_state_delta_reuse_delta | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_federation_server_received_pdus_total | synapse_federation_server_received_pdus | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_federation_server_received_edus_total | synapse_federation_server_received_edus | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_handler_presence_notified_presence_total | synapse_handler_presence_notified_presence | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_handler_presence_federation_presence_out_total | synapse_handler_presence_federation_presence_out | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_handler_presence_presence_updates_total | synapse_handler_presence_presence_updates | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_handler_presence_timers_fired_total | synapse_handler_presence_timers_fired | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_handler_presence_federation_presence_total | synapse_handler_presence_federation_presence | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_handler_presence_bump_active_time_total | synapse_handler_presence_bump_active_time | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_federation_client_sent_edus_total | synapse_federation_client_sent_edus | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_federation_client_sent_pdu_destinations_count_total | synapse_federation_client_sent_pdu_destinations:count | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_federation_client_sent_pdu_destinations_total | synapse_federation_client_sent_pdu_destinations:total | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_handlers_appservice_events_processed_total | synapse_handlers_appservice_events_processed | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_notifier_notified_events_total | synapse_notifier_notified_events | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_invalidation_counter | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter_total | synapse_push_bulk_push_rule_evaluator_push_rules_state_size_counter | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_http_httppusher_http_pushes_processed_total | synapse_http_httppusher_http_pushes_processed | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_http_httppusher_http_pushes_failed_total | synapse_http_httppusher_http_pushes_failed | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_http_httppusher_badge_updates_processed_total | synapse_http_httppusher_badge_updates_processed | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ +| synapse_http_httppusher_badge_updates_failed_total | synapse_http_httppusher_badge_updates_failed | ++-----------------------------------------------------------------------------+-----------------------------------------------------------------------+ + + Removal of deprecated metrics & time based counters becoming histograms in 0.31.0 --------------------------------------------------------------------------------- diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 807f320b46e1..540dbd92369c 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -149,8 +149,7 @@ def listen_metrics(bind_addresses, port): """ Start Prometheus metrics server. """ - from synapse.metrics import RegistryProxy - from prometheus_client import start_http_server + from synapse.metrics import RegistryProxy, start_http_server for host in bind_addresses: logger.info("Starting metrics listener on %s:%d", host, port) diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index be44249ed6f1..e01f3e5f3b04 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -27,8 +27,7 @@ from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index ff11beca82a7..29bddc4823f1 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -28,8 +28,7 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index cacad25eacd5..042cfd04afcf 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -28,8 +28,7 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index 11e80dbae02c..76a97f8f3271 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -29,8 +29,7 @@ from synapse.federation.transport.server import TransportLayerServer from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 97da7bdcbf4a..fec49d509299 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -28,9 +28,8 @@ from synapse.federation import send_queue from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import RegistryProxy +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.events import SlavedEventStore diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 417a10bbd23c..1f1f1df78e5e 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -30,8 +30,7 @@ from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 639b1429c05a..0c075cb3f1eb 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -55,9 +55,8 @@ from synapse.http.server import RootRedirect from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext -from synapse.metrics import RegistryProxy +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource from synapse.module_api import ModuleApi from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index f23b9b6eda89..d70780e9d54e 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -28,8 +28,7 @@ from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index 4f929edf8660..070de7d0b015 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -27,8 +27,7 @@ from synapse.config.logger import setup_logging from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import __func__ from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.events import SlavedEventStore diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index de4797fddcd8..315c03069477 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -32,8 +32,7 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore, __func__ from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 1177ddd72e92..03ef21bd01d8 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -29,8 +29,7 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext, run_in_background -from synapse.metrics import RegistryProxy -from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore from synapse.replication.slave.storage.client_ips import SlavedClientIpStore diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index eaf0aaa86e89..488280b4a6ac 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -29,8 +29,16 @@ from twisted.internet import reactor +from synapse.metrics._exposition import ( + MetricsResource, + generate_latest, + start_http_server, +) + logger = logging.getLogger(__name__) +METRICS_PREFIX = "/_synapse/metrics" + running_on_pypy = platform.python_implementation() == "PyPy" all_metrics = [] all_collectors = [] @@ -470,3 +478,12 @@ def f(*args, **kwargs): gc.disable() except AttributeError: pass + +__all__ = [ + "MetricsResource", + "generate_latest", + "start_http_server", + "LaterGauge", + "InFlightGauge", + "BucketCollector", +] diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py new file mode 100644 index 000000000000..1933ecd3e365 --- /dev/null +++ b/synapse/metrics/_exposition.py @@ -0,0 +1,258 @@ +# -*- coding: utf-8 -*- +# Copyright 2015-2019 Prometheus Python Client Developers +# Copyright 2019 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +This code is based off `prometheus_client/exposition.py` from version 0.7.1. + +Due to the renaming of metrics in prometheus_client 0.4.0, this customised +vendoring of the code will emit both the old versions that Synapse dashboards +expect, and the newer "best practice" version of the up-to-date official client. +""" + +import math +import threading +from collections import namedtuple +from http.server import BaseHTTPRequestHandler, HTTPServer +from socketserver import ThreadingMixIn +from urllib.parse import parse_qs, urlparse + +from prometheus_client import REGISTRY + +from twisted.web.resource import Resource + +try: + from prometheus_client.samples import Sample +except ImportError: + Sample = namedtuple("Sample", ["name", "labels", "value", "timestamp", "exemplar"]) + + +CONTENT_TYPE_LATEST = str("text/plain; version=0.0.4; charset=utf-8") + + +INF = float("inf") +MINUS_INF = float("-inf") + + +def floatToGoString(d): + d = float(d) + if d == INF: + return "+Inf" + elif d == MINUS_INF: + return "-Inf" + elif math.isnan(d): + return "NaN" + else: + s = repr(d) + dot = s.find(".") + # Go switches to exponents sooner than Python. + # We only need to care about positive values for le/quantile. + if d > 0 and dot > 6: + mantissa = "{0}.{1}{2}".format(s[0], s[1:dot], s[dot + 1 :]).rstrip("0.") + return "{0}e+0{1}".format(mantissa, dot - 1) + return s + + +def sample_line(line, name): + if line.labels: + labelstr = "{{{0}}}".format( + ",".join( + [ + '{0}="{1}"'.format( + k, + v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""), + ) + for k, v in sorted(line.labels.items()) + ] + ) + ) + else: + labelstr = "" + timestamp = "" + if line.timestamp is not None: + # Convert to milliseconds. + timestamp = " {0:d}".format(int(float(line.timestamp) * 1000)) + return "{0}{1} {2}{3}\n".format( + name, labelstr, floatToGoString(line.value), timestamp + ) + + +def nameify_sample(sample): + """ + If we get a prometheus_client<0.4.0 sample as a tuple, transform it into a + namedtuple which has the names we expect. + """ + if not isinstance(sample, Sample): + sample = Sample(*sample, None, None) + + return sample + + +def generate_latest(registry, emit_help=False): + output = [] + + for metric in registry.collect(): + + if metric.name.startswith("__unused"): + continue + + if not metric.samples: + # No samples, don't bother. + continue + + mname = metric.name + mnewname = metric.name + mtype = metric.type + + # OpenMetrics -> Prometheus + if mtype == "counter": + mnewname = mnewname + "_total" + elif mtype == "info": + mtype = "gauge" + mnewname = mnewname + "_info" + elif mtype == "stateset": + mtype = "gauge" + elif mtype == "gaugehistogram": + mtype = "histogram" + elif mtype == "unknown": + mtype = "untyped" + + # Output in the old format for compatibility. + if emit_help: + output.append( + "# HELP {0} {1}\n".format( + mname, + metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), + ) + ) + output.append("# TYPE {0} {1}\n".format(mname, mtype)) + for sample in map(nameify_sample, metric.samples): + # Get rid of the OpenMetrics specific samples + for suffix in ["_created", "_gsum", "_gcount"]: + if sample.name.endswith(suffix): + break + else: + newname = sample.name.replace(mnewname, mname) + if ":" in newname and newname.endswith("_total"): + newname = newname[: -len("_total")] + output.append(sample_line(sample, newname)) + + # Get rid of the weird colon things while we're at it + if mtype == "counter": + mnewname = mnewname.replace(":total", "") + mnewname = mnewname.replace(":", "_") + + if mname == mnewname: + continue + + # Also output in the new format, if it's different. + if emit_help: + output.append( + "# HELP {0} {1}\n".format( + mnewname, + metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), + ) + ) + output.append("# TYPE {0} {1}\n".format(mnewname, mtype)) + for sample in map(nameify_sample, metric.samples): + # Get rid of the OpenMetrics specific samples + for suffix in ["_created", "_gsum", "_gcount"]: + if sample.name.endswith(suffix): + break + else: + output.append( + sample_line( + sample, sample.name.replace(":total", "").replace(":", "_") + ) + ) + + return "".join(output).encode("utf-8") + + +class MetricsHandler(BaseHTTPRequestHandler): + """HTTP handler that gives metrics from ``REGISTRY``.""" + + registry = REGISTRY + + def do_GET(self): + registry = self.registry + params = parse_qs(urlparse(self.path).query) + + if "help" in params: + emit_help = True + else: + emit_help = False + + try: + output = generate_latest(registry, emit_help=emit_help) + except Exception: + self.send_error(500, "error generating metric output") + raise + self.send_response(200) + self.send_header("Content-Type", CONTENT_TYPE_LATEST) + self.end_headers() + self.wfile.write(output) + + def log_message(self, format, *args): + """Log nothing.""" + + @classmethod + def factory(cls, registry): + """Returns a dynamic MetricsHandler class tied + to the passed registry. + """ + # This implementation relies on MetricsHandler.registry + # (defined above and defaulted to REGISTRY). + + # As we have unicode_literals, we need to create a str() + # object for type(). + cls_name = str(cls.__name__) + MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry}) + return MyMetricsHandler + + +class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer): + """Thread per request HTTP server.""" + + # Make worker threads "fire and forget". Beginning with Python 3.7 this + # prevents a memory leak because ``ThreadingMixIn`` starts to gather all + # non-daemon threads in a list in order to join on them at server close. + # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the + # same as Python 3.7's ``ThreadingHTTPServer``. + daemon_threads = True + + +def start_http_server(port, addr="", registry=REGISTRY): + """Starts an HTTP server for prometheus metrics as a daemon thread""" + CustomMetricsHandler = MetricsHandler.factory(registry) + httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler) + t = threading.Thread(target=httpd.serve_forever) + t.daemon = True + t.start() + + +class MetricsResource(Resource): + """ + Twisted ``Resource`` that serves prometheus metrics. + """ + + isLeaf = True + + def __init__(self, registry=REGISTRY): + self.registry = registry + + def render_GET(self, request): + request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii")) + return generate_latest(self.registry) diff --git a/synapse/metrics/resource.py b/synapse/metrics/resource.py deleted file mode 100644 index 9789359077cb..000000000000 --- a/synapse/metrics/resource.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from prometheus_client.twisted import MetricsResource - -METRICS_PREFIX = "/_synapse/metrics" - -__all__ = ["MetricsResource", "METRICS_PREFIX"] diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index e7618057be6d..c6465c0386df 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -65,9 +65,7 @@ "msgpack>=0.5.2", "phonenumbers>=8.2.0", "six>=1.10", - # prometheus_client 0.4.0 changed the format of counter metrics - # (cf https://github.com/matrix-org/synapse/issues/4001) - "prometheus_client>=0.0.18,<0.4.0", + "prometheus_client>=0.0.18,<0.8.0", # we use attr.s(slots), which arrived in 16.0.0 # Twisted 18.7.0 requires attrs>=17.4.0 "attrs>=17.4.0", diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index d44359ff9333..f26ff57a186c 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -13,9 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from prometheus_client.exposition import generate_latest - -from synapse.metrics import REGISTRY +from synapse.metrics import REGISTRY, generate_latest from synapse.types import Requester, UserID from tests.unittest import HomeserverTestCase From 82345bc09a9980de58c13a18b489403237acf4bd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 18 Jul 2019 15:06:54 +0100 Subject: [PATCH 72/80] Clean up opentracing configuration options (#5712) Clean up config settings and dead code. This is mostly about cleaning up the config format, to bring it into line with our conventions. In particular: * There should be a blank line after `## Section ##' headings * There should be a blank line between each config setting * There should be a `#`-only line between a comment and the setting it describes * We don't really do the `# #` style commenting-out of whole sections if we can help it * rename `tracer_enabled` to `enabled` While we're here, do more config parsing upfront, which makes it easier to use later on. Also removes redundant code from LogContextScopeManager. Also changes the changelog fragment to a `feature` - it's exciting! --- changelog.d/5544.feature | 2 + changelog.d/5544.misc | 1 - changelog.d/5712.feature | 2 + docs/sample_config.yaml | 45 ++++++++++++------ synapse/config/tracer.py | 63 +++++++++++++++++--------- synapse/logging/opentracing.py | 42 ++++++++--------- synapse/logging/scopecontextmanager.py | 4 +- 7 files changed, 96 insertions(+), 63 deletions(-) create mode 100644 changelog.d/5544.feature delete mode 100644 changelog.d/5544.misc create mode 100644 changelog.d/5712.feature diff --git a/changelog.d/5544.feature b/changelog.d/5544.feature new file mode 100644 index 000000000000..7d3459129d14 --- /dev/null +++ b/changelog.d/5544.feature @@ -0,0 +1,2 @@ +Add support for opentracing. + diff --git a/changelog.d/5544.misc b/changelog.d/5544.misc deleted file mode 100644 index 81d6f74c31c8..000000000000 --- a/changelog.d/5544.misc +++ /dev/null @@ -1 +0,0 @@ -Added opentracing and configuration options. diff --git a/changelog.d/5712.feature b/changelog.d/5712.feature new file mode 100644 index 000000000000..7d3459129d14 --- /dev/null +++ b/changelog.d/5712.feature @@ -0,0 +1,2 @@ +Add support for opentracing. + diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 663ff31622f4..5b804d16a463 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1409,17 +1409,34 @@ password_config: ## Opentracing ## -# These settings enable opentracing which implements distributed tracing -# This allows you to observe the causal chain of events across servers -# including requests, key lookups etc. across any server running -# synapse or any other other services which supports opentracing. -# (specifically those implemented with jaeger) - -#opentracing: -# # Enable / disable tracer -# tracer_enabled: false -# # The list of homeservers we wish to expose our current traces to. -# # The list is a list of regexes which are matched against the -# # servername of the homeserver -# homeserver_whitelist: -# - ".*" + +# These settings enable opentracing, which implements distributed tracing. +# This allows you to observe the causal chains of events across servers +# including requests, key lookups etc., across any server running +# synapse or any other other services which supports opentracing +# (specifically those implemented with Jaeger). +# +opentracing: + # tracing is disabled by default. Uncomment the following line to enable it. + # + #enabled: true + + # The list of homeservers we wish to send and receive span contexts and span baggage. + # + # Though it's mostly safe to send and receive span contexts to and from + # untrusted users since span contexts are usually opaque ids it can lead to + # two problems, namely: + # - If the span context is marked as sampled by the sending homeserver the receiver will + # sample it. Therefore two homeservers with wildly disparaging sampling policies + # could incur higher sampling counts than intended. + # - Span baggage can be arbitrary data. For safety this has been disabled in synapse + # but that doesn't prevent another server sending you baggage which will be logged + # to opentracing logs. + # + # This a list of regexes which are matched against the server_name of the + # homeserver. + # + # By defult, it is empty, so no servers are matched. + # + #homeserver_whitelist: + # - ".*" diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 63a637984aa8..a2ce9ab3f66f 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -18,33 +18,52 @@ class TracerConfig(Config): def read_config(self, config, **kwargs): - self.tracer_config = config.get("opentracing") + opentracing_config = config.get("opentracing") + if opentracing_config is None: + opentracing_config = {} - self.tracer_config = config.get("opentracing", {"tracer_enabled": False}) + self.opentracer_enabled = opentracing_config.get("enabled", False) + if not self.opentracer_enabled: + return - if self.tracer_config.get("tracer_enabled", False): - # The tracer is enabled so sanitize the config - # If no whitelists are given - self.tracer_config.setdefault("homeserver_whitelist", []) + # The tracer is enabled so sanitize the config - if not isinstance(self.tracer_config.get("homeserver_whitelist"), list): - raise ConfigError("Tracer homesererver_whitelist config is malformed") + self.opentracer_whitelist = opentracing_config.get("homeserver_whitelist", []) + if not isinstance(self.opentracer_whitelist, list): + raise ConfigError("Tracer homeserver_whitelist config is malformed") def generate_config_section(cls, **kwargs): return """\ ## Opentracing ## - # These settings enable opentracing which implements distributed tracing - # This allows you to observe the causal chain of events across servers - # including requests, key lookups etc. across any server running - # synapse or any other other services which supports opentracing. - # (specifically those implemented with jaeger) - - #opentracing: - # # Enable / disable tracer - # tracer_enabled: false - # # The list of homeservers we wish to expose our current traces to. - # # The list is a list of regexes which are matched against the - # # servername of the homeserver - # homeserver_whitelist: - # - ".*" + + # These settings enable opentracing, which implements distributed tracing. + # This allows you to observe the causal chains of events across servers + # including requests, key lookups etc., across any server running + # synapse or any other other services which supports opentracing + # (specifically those implemented with Jaeger). + # + opentracing: + # tracing is disabled by default. Uncomment the following line to enable it. + # + #enabled: true + + # The list of homeservers we wish to send and receive span contexts and span baggage. + # + # Though it's mostly safe to send and receive span contexts to and from + # untrusted users since span contexts are usually opaque ids it can lead to + # two problems, namely: + # - If the span context is marked as sampled by the sending homeserver the receiver will + # sample it. Therefore two homeservers with wildly disparaging sampling policies + # could incur higher sampling counts than intended. + # - Span baggage can be arbitrary data. For safety this has been disabled in synapse + # but that doesn't prevent another server sending you baggage which will be logged + # to opentracing logs. + # + # This a list of regexes which are matched against the server_name of the + # homeserver. + # + # By defult, it is empty, so no servers are matched. + # + #homeserver_whitelist: + # - ".*" """ diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index f0ceea2a642a..415040f5ee00 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2019 The Matrix.org Foundation C.I.C.d +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,6 +24,15 @@ # this move the methods have work very similarly to opentracing's and it should only # be a matter of few regexes to move over to opentracing's access patterns proper. +import contextlib +import logging +import re +from functools import wraps + +from twisted.internet import defer + +from synapse.config import ConfigError + try: import opentracing except ImportError: @@ -35,12 +44,6 @@ JaegerConfig = None LogContextScopeManager = None -import contextlib -import logging -import re -from functools import wraps - -from twisted.internet import defer logger = logging.getLogger(__name__) @@ -91,7 +94,8 @@ def _only_if_tracing_inner(*args, **kwargs): return _only_if_tracing_inner -# Block everything by default +# A regex which matches the server_names to expose traces for. +# None means 'block everything'. _homeserver_whitelist = None tags = _DumTagNames @@ -101,31 +105,24 @@ def init_tracer(config): """Set the whitelists and initialise the JaegerClient tracer Args: - config (Config) - The config used by the homeserver. Here it's used to set the service - name to the homeserver's. + config (HomeserverConfig): The config used by the homeserver """ global opentracing - if not config.tracer_config.get("tracer_enabled", False): + if not config.opentracer_enabled: # We don't have a tracer opentracing = None return - if not opentracing: - logger.error( - "The server has been configure to use opentracing but opentracing is not installed." - ) - raise ModuleNotFoundError("opentracing") - - if not JaegerConfig: - logger.error( - "The server has been configure to use opentracing but opentracing is not installed." + if not opentracing or not JaegerConfig: + raise ConfigError( + "The server has been configured to use opentracing but opentracing is not " + "installed." ) # Include the worker name name = config.worker_name if config.worker_name else "master" - set_homeserver_whitelist(config.tracer_config["homeserver_whitelist"]) + set_homeserver_whitelist(config.opentracer_whitelist) jaeger_config = JaegerConfig( config={"sampler": {"type": "const", "param": 1}, "logging": True}, service_name="{} {}".format(config.server_name, name), @@ -232,7 +229,6 @@ def whitelisted_homeserver(destination): """Checks if a destination matches the whitelist Args: destination (String)""" - global _homeserver_whitelist if _homeserver_whitelist: return _homeserver_whitelist.match(destination) return False diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index 91e14462f308..8c661302c9f0 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -34,9 +34,7 @@ class LogContextScopeManager(ScopeManager): """ def __init__(self, config): - # Set the whitelists - logger.info(config.tracer_config) - self._homeserver_whitelist = config.tracer_config["homeserver_whitelist"] + pass @property def active(self): From cfc00068bd7f7517ae5fd1d3ecd147518a0eb787 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 18 Jul 2019 15:56:59 +0100 Subject: [PATCH 73/80] enable aggregations support by default --- synapse/config/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index 080d0630bd96..00170f139369 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -136,7 +136,7 @@ def read_config(self, config, **kwargs): # Whether to enable experimental MSC1849 (aka relations) support self.experimental_msc1849_support_enabled = config.get( - "experimental_msc1849_support_enabled", False + "experimental_msc1849_support_enabled", True ) # Options to control access by tracking MAU From a3e40bd5b426c3cf852c389bbcc9fc392289c3ed Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 18 Jul 2019 16:02:09 +0100 Subject: [PATCH 74/80] towncrier --- changelog.d/5714.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5714.feature diff --git a/changelog.d/5714.feature b/changelog.d/5714.feature new file mode 100644 index 000000000000..2fd32e5e3858 --- /dev/null +++ b/changelog.d/5714.feature @@ -0,0 +1 @@ +Enable aggregations support by default From 826e6ec3bdfca92a5815f032c3246fab9a2aef88 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Mon, 22 Jul 2019 11:15:21 +0100 Subject: [PATCH 75/80] Opentracing Documentation (#5703) * Opentracing survival guide * Update decorator names in doc * Doc cleanup These are all alterations as a result of comments in #5703, it includes mostly typos and clarifications. The most interesting changes are: - Split developer and user docs into two sections - Add a high level description of OpenTracing * newsfile * Move contributer specific info to docstring. * Sample config. * Trailing whitespace. * Update 5703.misc * Apply suggestions from code review Mostly just rewording parts of the docs for clarity. Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5703.misc | 1 + docs/opentracing.rst | 100 ++++++++++++++++++++++++++ docs/sample_config.yaml | 14 +--- synapse/config/tracer.py | 14 +--- synapse/logging/opentracing.py | 125 +++++++++++++++++++++++++++++++++ 5 files changed, 230 insertions(+), 24 deletions(-) create mode 100644 changelog.d/5703.misc create mode 100644 docs/opentracing.rst diff --git a/changelog.d/5703.misc b/changelog.d/5703.misc new file mode 100644 index 000000000000..6e9b2d734e03 --- /dev/null +++ b/changelog.d/5703.misc @@ -0,0 +1 @@ +Documentation for opentracing. diff --git a/docs/opentracing.rst b/docs/opentracing.rst new file mode 100644 index 000000000000..b91a2208a8fb --- /dev/null +++ b/docs/opentracing.rst @@ -0,0 +1,100 @@ +=========== +OpenTracing +=========== + +Background +---------- + +OpenTracing is a semi-standard being adopted by a number of distributed tracing +platforms. It is a common api for facilitating vendor-agnostic tracing +instrumentation. That is, we can use the OpenTracing api and select one of a +number of tracer implementations to do the heavy lifting in the background. +Our current selected implementation is Jaeger. + +OpenTracing is a tool which gives an insight into the causal relationship of +work done in and between servers. The servers each track events and report them +to a centralised server - in Synapse's case: Jaeger. The basic unit used to +represent events is the span. The span roughly represents a single piece of work +that was done and the time at which it occurred. A span can have child spans, +meaning that the work of the child had to be completed for the parent span to +complete, or it can have follow-on spans which represent work that is undertaken +as a result of the parent but is not depended on by the parent to in order to +finish. + +Since this is undertaken in a distributed environment a request to another +server, such as an RPC or a simple GET, can be considered a span (a unit or +work) for the local server. This causal link is what OpenTracing aims to +capture and visualise. In order to do this metadata about the local server's +span, i.e the 'span context', needs to be included with the request to the +remote. + +It is up to the remote server to decide what it does with the spans +it creates. This is called the sampling policy and it can be configured +through Jaeger's settings. + +For OpenTracing concepts see +https://opentracing.io/docs/overview/what-is-tracing/. + +For more information about Jaeger's implementation see +https://www.jaegertracing.io/docs/ + +===================== +Seting up OpenTracing +===================== + +To receive OpenTracing spans, start up a Jaeger server. This can be done +using docker like so: + +.. code-block:: bash + + docker run -d --name jaeger + -p 6831:6831/udp \ + -p 6832:6832/udp \ + -p 5778:5778 \ + -p 16686:16686 \ + -p 14268:14268 \ + jaegertracing/all-in-one:1.13 + +Latest documentation is probably at +https://www.jaegertracing.io/docs/1.13/getting-started/ + + +Enable OpenTracing in Synapse +----------------------------- + +OpenTracing is not enabled by default. It must be enabled in the homeserver +config by uncommenting the config options under ``opentracing`` as shown in +the `sample config <./sample_config.yaml>`_. For example: + +.. code-block:: yaml + + opentracing: + tracer_enabled: true + homeserver_whitelist: + - "mytrustedhomeserver.org" + - "*.myotherhomeservers.com" + +Homeserver whitelisting +----------------------- + +The homeserver whitelist is configured using regular expressions. A list of regular +expressions can be given and their union will be compared when propagating any +spans contexts to another homeserver. + +Though it's mostly safe to send and receive span contexts to and from +untrusted users since span contexts are usually opaque ids it can lead to +two problems, namely: + +- If the span context is marked as sampled by the sending homeserver the receiver will + sample it. Therefore two homeservers with wildly different sampling policies + could incur higher sampling counts than intended. +- Sending servers can attach arbitrary data to spans, known as 'baggage'. For safety this has been disabled in Synapse + but that doesn't prevent another server sending you baggage which will be logged + to OpenTracing's logs. + +================== +Configuring Jaeger +================== + +Sampling strategies can be set as in this document: +https://www.jaegertracing.io/docs/1.13/sampling/ diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 5b804d16a463..0a96197ca65e 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1422,18 +1422,8 @@ opentracing: #enabled: true # The list of homeservers we wish to send and receive span contexts and span baggage. - # - # Though it's mostly safe to send and receive span contexts to and from - # untrusted users since span contexts are usually opaque ids it can lead to - # two problems, namely: - # - If the span context is marked as sampled by the sending homeserver the receiver will - # sample it. Therefore two homeservers with wildly disparaging sampling policies - # could incur higher sampling counts than intended. - # - Span baggage can be arbitrary data. For safety this has been disabled in synapse - # but that doesn't prevent another server sending you baggage which will be logged - # to opentracing logs. - # - # This a list of regexes which are matched against the server_name of the + # See docs/opentracing.rst + # This is a list of regexes which are matched against the server_name of the # homeserver. # # By defult, it is empty, so no servers are matched. diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index a2ce9ab3f66f..4479454415d4 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -48,18 +48,8 @@ def generate_config_section(cls, **kwargs): #enabled: true # The list of homeservers we wish to send and receive span contexts and span baggage. - # - # Though it's mostly safe to send and receive span contexts to and from - # untrusted users since span contexts are usually opaque ids it can lead to - # two problems, namely: - # - If the span context is marked as sampled by the sending homeserver the receiver will - # sample it. Therefore two homeservers with wildly disparaging sampling policies - # could incur higher sampling counts than intended. - # - Span baggage can be arbitrary data. For safety this has been disabled in synapse - # but that doesn't prevent another server sending you baggage which will be logged - # to opentracing logs. - # - # This a list of regexes which are matched against the server_name of the + # See docs/opentracing.rst + # This is a list of regexes which are matched against the server_name of the # homeserver. # # By defult, it is empty, so no servers are matched. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 415040f5ee00..3da33d7826fe 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -24,6 +24,131 @@ # this move the methods have work very similarly to opentracing's and it should only # be a matter of few regexes to move over to opentracing's access patterns proper. +""" +============================ +Using OpenTracing in Synapse +============================ + +Python-specific tracing concepts are at https://opentracing.io/guides/python/. +Note that Synapse wraps OpenTracing in a small module (this one) in order to make the +OpenTracing dependency optional. That means that the access patterns are +different to those demonstrated in the OpenTracing guides. However, it is +still useful to know, especially if OpenTracing is included as a full dependency +in the future or if you are modifying this module. + + +OpenTracing is encapsulated so that +no span objects from OpenTracing are exposed in Synapse's code. This allows +OpenTracing to be easily disabled in Synapse and thereby have OpenTracing as +an optional dependency. This does however limit the number of modifiable spans +at any point in the code to one. From here out references to `opentracing` +in the code snippets refer to the Synapses module. + +Tracing +------- + +In Synapse it is not possible to start a non-active span. Spans can be started +using the ``start_active_span`` method. This returns a scope (see +OpenTracing docs) which is a context manager that needs to be entered and +exited. This is usually done by using ``with``. + +.. code-block:: python + + from synapse.logging.opentracing import start_active_span + + with start_active_span("operation name"): + # Do something we want to tracer + +Forgetting to enter or exit a scope will result in some mysterious and grievous log +context errors. + +At anytime where there is an active span ``opentracing.set_tag`` can be used to +set a tag on the current active span. + +Tracing functions +----------------- + +Functions can be easily traced using decorators. There is a decorator for +'normal' function and for functions which are actually deferreds. The name of +the function becomes the operation name for the span. + +.. code-block:: python + + from synapse.logging.opentracing import trace, trace_deferred + + # Start a span using 'normal_function' as the operation name + @trace + def normal_function(*args, **kwargs): + # Does all kinds of cool and expected things + return something_usual_and_useful + + # Start a span using 'deferred_function' as the operation name + @trace_deferred + @defer.inlineCallbacks + def deferred_function(*args, **kwargs): + # We start + yield we_wait + # we finish + defer.returnValue(something_usual_and_useful) + +Operation names can be explicitly set for functions by using +``trace_using_operation_name`` and +``trace_deferred_using_operation_name`` + +.. code-block:: python + + from synapse.logging.opentracing import ( + trace_using_operation_name, + trace_deferred_using_operation_name + ) + + @trace_using_operation_name("A *much* better operation name") + def normal_function(*args, **kwargs): + # Does all kinds of cool and expected things + return something_usual_and_useful + + @trace_deferred_using_operation_name("Another exciting operation name!") + @defer.inlineCallbacks + def deferred_function(*args, **kwargs): + # We start + yield we_wait + # we finish + defer.returnValue(something_usual_and_useful) + +Contexts and carriers +--------------------- + +There are a selection of wrappers for injecting and extracting contexts from +carriers provided. Unfortunately OpenTracing's three context injection +techniques are not adequate for our inject of OpenTracing span-contexts into +Twisted's http headers, EDU contents and our database tables. Also note that +the binary encoding format mandated by OpenTracing is not actually implemented +by jaeger_client v4.0.0 - it will silently noop. +Please refer to the end of ``logging/opentracing.py`` for the available +injection and extraction methods. + +Homeserver whitelisting +----------------------- + +Most of the whitelist checks are encapsulated in the modules's injection +and extraction method but be aware that using custom carriers or crossing +unchartered waters will require the enforcement of the whitelist. +``logging/opentracing.py`` has a ``whitelisted_homeserver`` method which takes +in a destination and compares it to the whitelist. + +======= +Gotchas +======= + +- Checking whitelists on span propagation +- Inserting pii +- Forgetting to enter or exit a scope +- Span source: make sure that the span you expect to be active across a + function call really will be that one. Does the current function have more + than one caller? Will all of those calling functions have be in a context + with an active span? +""" + import contextlib import logging import re From 54437c48ca0dcc745f11a362ce7dc7267b568896 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 22 Jul 2019 12:59:04 +0100 Subject: [PATCH 76/80] 1.2.0rc1 --- CHANGES.md | 81 ++++++++++++++++++++++++++++++++++++++++ changelog.d/5397.doc | 1 - changelog.d/5544.feature | 2 - changelog.d/5589.feature | 1 - changelog.d/5597.feature | 1 - changelog.d/5606.misc | 1 - changelog.d/5609.bugfix | 1 - changelog.d/5611.misc | 1 - changelog.d/5613.feature | 1 - changelog.d/5616.misc | 1 - changelog.d/5617.misc | 1 - changelog.d/5619.docker | 1 - changelog.d/5620.docker | 1 - changelog.d/5621.bugfix | 1 - changelog.d/5622.misc | 1 - changelog.d/5623.feature | 1 - changelog.d/5625.removal | 1 - changelog.d/5626.feature | 1 - changelog.d/5627.misc | 1 - changelog.d/5628.misc | 1 - changelog.d/5629.bugfix | 1 - changelog.d/5630.misc | 1 - changelog.d/5636.misc | 1 - changelog.d/5637.misc | 1 - changelog.d/5638.bugfix | 1 - changelog.d/5639.misc | 1 - changelog.d/5640.misc | 1 - changelog.d/5641.misc | 1 - changelog.d/5642.misc | 1 - changelog.d/5643.misc | 1 - changelog.d/5644.bugfix | 1 - changelog.d/5645.misc | 1 - changelog.d/5651.doc | 1 - changelog.d/5654.bugfix | 1 - changelog.d/5655.misc | 1 - changelog.d/5656.misc | 1 - changelog.d/5657.misc | 1 - changelog.d/5658.bugfix | 1 - changelog.d/5659.misc | 1 - changelog.d/5660.feature | 1 - changelog.d/5661.doc | 1 - changelog.d/5664.misc | 1 - changelog.d/5673.misc | 1 - changelog.d/5674.feature | 1 - changelog.d/5675.doc | 1 - changelog.d/5689.misc | 1 - changelog.d/5699.bugfix | 1 - changelog.d/5700.bugfix | 2 - changelog.d/5701.bugfix | 1 - changelog.d/5703.misc | 1 - changelog.d/5707.bugfix | 1 - changelog.d/5712.feature | 2 - changelog.d/5714.feature | 1 - synapse/__init__.py | 2 +- 54 files changed, 82 insertions(+), 56 deletions(-) delete mode 100644 changelog.d/5397.doc delete mode 100644 changelog.d/5544.feature delete mode 100644 changelog.d/5589.feature delete mode 100644 changelog.d/5597.feature delete mode 100644 changelog.d/5606.misc delete mode 100644 changelog.d/5609.bugfix delete mode 100644 changelog.d/5611.misc delete mode 100644 changelog.d/5613.feature delete mode 100644 changelog.d/5616.misc delete mode 100644 changelog.d/5617.misc delete mode 100644 changelog.d/5619.docker delete mode 100644 changelog.d/5620.docker delete mode 100644 changelog.d/5621.bugfix delete mode 100644 changelog.d/5622.misc delete mode 100644 changelog.d/5623.feature delete mode 100644 changelog.d/5625.removal delete mode 100644 changelog.d/5626.feature delete mode 100644 changelog.d/5627.misc delete mode 100644 changelog.d/5628.misc delete mode 100644 changelog.d/5629.bugfix delete mode 100644 changelog.d/5630.misc delete mode 100644 changelog.d/5636.misc delete mode 100644 changelog.d/5637.misc delete mode 100644 changelog.d/5638.bugfix delete mode 100644 changelog.d/5639.misc delete mode 100644 changelog.d/5640.misc delete mode 100644 changelog.d/5641.misc delete mode 100644 changelog.d/5642.misc delete mode 100644 changelog.d/5643.misc delete mode 100644 changelog.d/5644.bugfix delete mode 100644 changelog.d/5645.misc delete mode 100644 changelog.d/5651.doc delete mode 100644 changelog.d/5654.bugfix delete mode 100644 changelog.d/5655.misc delete mode 100644 changelog.d/5656.misc delete mode 100644 changelog.d/5657.misc delete mode 100644 changelog.d/5658.bugfix delete mode 100644 changelog.d/5659.misc delete mode 100644 changelog.d/5660.feature delete mode 100644 changelog.d/5661.doc delete mode 100644 changelog.d/5664.misc delete mode 100644 changelog.d/5673.misc delete mode 100644 changelog.d/5674.feature delete mode 100644 changelog.d/5675.doc delete mode 100644 changelog.d/5689.misc delete mode 100644 changelog.d/5699.bugfix delete mode 100644 changelog.d/5700.bugfix delete mode 100644 changelog.d/5701.bugfix delete mode 100644 changelog.d/5703.misc delete mode 100644 changelog.d/5707.bugfix delete mode 100644 changelog.d/5712.feature delete mode 100644 changelog.d/5714.feature diff --git a/CHANGES.md b/CHANGES.md index dc8c74fe5802..e42fe1ba4d07 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,84 @@ +Synapse 1.2.0rc1 (2019-07-22) +============================= + +Features +-------- + +- Add support for opentracing. ([\#5544](https://github.com/matrix-org/synapse/issues/5544), [\#5712](https://github.com/matrix-org/synapse/issues/5712)) +- Add ability to pull all locally stored events out of synapse that a particular user can see. ([\#5589](https://github.com/matrix-org/synapse/issues/5589)) +- Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. ([\#5597](https://github.com/matrix-org/synapse/issues/5597)) +- Add `sender` and `origin_server_ts` fields to `m.replace`. ([\#5613](https://github.com/matrix-org/synapse/issues/5613)) +- Add default push rule to ignore reactions. ([\#5623](https://github.com/matrix-org/synapse/issues/5623)) +- Include the original event when asking for its relations. ([\#5626](https://github.com/matrix-org/synapse/issues/5626)) +- Implement `session_lifetime` configuration option, after which access tokens will expire. ([\#5660](https://github.com/matrix-org/synapse/issues/5660)) +- Return "This account has been deactivated" when a deactivated user tries to login. ([\#5674](https://github.com/matrix-org/synapse/issues/5674)) +- Enable aggregations support by default ([\#5714](https://github.com/matrix-org/synapse/issues/5714)) + + +Bugfixes +-------- + +- Fix 'utime went backwards' errors on daemonization. ([\#5609](https://github.com/matrix-org/synapse/issues/5609)) +- Various minor fixes to the federation request rate limiter. ([\#5621](https://github.com/matrix-org/synapse/issues/5621)) +- Forbid viewing relations on an event once it has been redacted. ([\#5629](https://github.com/matrix-org/synapse/issues/5629)) +- Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. ([\#5638](https://github.com/matrix-org/synapse/issues/5638)) +- Fix newly-registered users not being able to lookup their own profile without joining a room. ([\#5644](https://github.com/matrix-org/synapse/issues/5644)) +- Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. ([\#5654](https://github.com/matrix-org/synapse/issues/5654)) +- Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. ([\#5658](https://github.com/matrix-org/synapse/issues/5658)) +- Fix some problems with authenticating redactions in recent room versions. ([\#5699](https://github.com/matrix-org/synapse/issues/5699), [\#5700](https://github.com/matrix-org/synapse/issues/5700), [\#5707](https://github.com/matrix-org/synapse/issues/5707)) +- Ignore redactions of m.room.create events. ([\#5701](https://github.com/matrix-org/synapse/issues/5701)) + + +Updates to the Docker image +--------------------------- + +- Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). ([\#5619](https://github.com/matrix-org/synapse/issues/5619)) +- Add missing space in default logging file format generated by the Docker image. ([\#5620](https://github.com/matrix-org/synapse/issues/5620)) + + +Improved Documentation +---------------------- + +- Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! ([\#5397](https://github.com/matrix-org/synapse/issues/5397)) +- --no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. ([\#5651](https://github.com/matrix-org/synapse/issues/5651)) +- Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! ([\#5661](https://github.com/matrix-org/synapse/issues/5661)) +- Minor tweaks to postgres documentation. ([\#5675](https://github.com/matrix-org/synapse/issues/5675)) + + +Deprecations and Removals +------------------------- + +- Remove support for the `invite_3pid_guest` configuration setting. ([\#5625](https://github.com/matrix-org/synapse/issues/5625)) + + +Internal Changes +---------------- + +- Move logging code out of `synapse.util` and into `synapse.logging`. ([\#5606](https://github.com/matrix-org/synapse/issues/5606), [\#5617](https://github.com/matrix-org/synapse/issues/5617)) +- Add a blacklist file to the repo to blacklist certain sytests from failing CI. ([\#5611](https://github.com/matrix-org/synapse/issues/5611)) +- Make runtime errors surrounding password reset emails much clearer. ([\#5616](https://github.com/matrix-org/synapse/issues/5616)) +- Remove dead code for persiting outgoing federation transactions. ([\#5622](https://github.com/matrix-org/synapse/issues/5622)) +- Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. ([\#5627](https://github.com/matrix-org/synapse/issues/5627)) +- Move RegistrationHandler.get_or_create_user to test code. ([\#5628](https://github.com/matrix-org/synapse/issues/5628)) +- Add some more common python virtual-environment paths to the black exclusion list. ([\#5630](https://github.com/matrix-org/synapse/issues/5630)) +- Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. ([\#5636](https://github.com/matrix-org/synapse/issues/5636)) +- Unblacklist some user_directory sytests. ([\#5637](https://github.com/matrix-org/synapse/issues/5637)) +- Factor out some redundant code in the login implementation. ([\#5639](https://github.com/matrix-org/synapse/issues/5639)) +- Update ModuleApi to avoid register(generate_token=True). ([\#5640](https://github.com/matrix-org/synapse/issues/5640)) +- Remove access-token support from RegistrationHandler.register, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641)) +- Remove access-token support from `RegistrationStore.register`, and rename it. ([\#5642](https://github.com/matrix-org/synapse/issues/5642)) +- Improve logging for auto-join when a new user is created. ([\#5643](https://github.com/matrix-org/synapse/issues/5643)) +- Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. ([\#5645](https://github.com/matrix-org/synapse/issues/5645)) +- Fix a small typo in a code comment. ([\#5655](https://github.com/matrix-org/synapse/issues/5655)) +- Clean up exception handling around client access tokens. ([\#5656](https://github.com/matrix-org/synapse/issues/5656)) +- Add a mechanism for per-test homeserver configuration in the unit tests. ([\#5657](https://github.com/matrix-org/synapse/issues/5657)) +- Inline issue_access_token. ([\#5659](https://github.com/matrix-org/synapse/issues/5659)) +- Update the sytest BuildKite configuration to checkout Synapse in `/src`. ([\#5664](https://github.com/matrix-org/synapse/issues/5664)) +- Add a `docker` type to the towncrier configuration. ([\#5673](https://github.com/matrix-org/synapse/issues/5673)) +- Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. ([\#5689](https://github.com/matrix-org/synapse/issues/5689)) +- Documentation for opentracing. ([\#5703](https://github.com/matrix-org/synapse/issues/5703)) + + Synapse 1.1.0 (2019-07-04) ========================== diff --git a/changelog.d/5397.doc b/changelog.d/5397.doc deleted file mode 100644 index c2b500b482d8..000000000000 --- a/changelog.d/5397.doc +++ /dev/null @@ -1 +0,0 @@ -Add information about nginx normalisation to reverse_proxy.rst. Contributed by @skalarproduktraum - thanks! diff --git a/changelog.d/5544.feature b/changelog.d/5544.feature deleted file mode 100644 index 7d3459129d14..000000000000 --- a/changelog.d/5544.feature +++ /dev/null @@ -1,2 +0,0 @@ -Add support for opentracing. - diff --git a/changelog.d/5589.feature b/changelog.d/5589.feature deleted file mode 100644 index a87e669dd4ea..000000000000 --- a/changelog.d/5589.feature +++ /dev/null @@ -1 +0,0 @@ -Add ability to pull all locally stored events out of synapse that a particular user can see. diff --git a/changelog.d/5597.feature b/changelog.d/5597.feature deleted file mode 100644 index 6f92748885b4..000000000000 --- a/changelog.d/5597.feature +++ /dev/null @@ -1 +0,0 @@ -Add a basic admin command app to allow server operators to run Synapse admin commands separately from the main production instance. diff --git a/changelog.d/5606.misc b/changelog.d/5606.misc deleted file mode 100644 index bb3c02816789..000000000000 --- a/changelog.d/5606.misc +++ /dev/null @@ -1 +0,0 @@ -Move logging code out of `synapse.util` and into `synapse.logging`. diff --git a/changelog.d/5609.bugfix b/changelog.d/5609.bugfix deleted file mode 100644 index 534ee22a1b3f..000000000000 --- a/changelog.d/5609.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 'utime went backwards' errors on daemonization. diff --git a/changelog.d/5611.misc b/changelog.d/5611.misc deleted file mode 100644 index a2d169513912..000000000000 --- a/changelog.d/5611.misc +++ /dev/null @@ -1 +0,0 @@ -Add a blacklist file to the repo to blacklist certain sytests from failing CI. diff --git a/changelog.d/5613.feature b/changelog.d/5613.feature deleted file mode 100644 index 4b7bb2745c19..000000000000 --- a/changelog.d/5613.feature +++ /dev/null @@ -1 +0,0 @@ -Add `sender` and `origin_server_ts` fields to `m.replace`. diff --git a/changelog.d/5616.misc b/changelog.d/5616.misc deleted file mode 100644 index 9f94be6778c6..000000000000 --- a/changelog.d/5616.misc +++ /dev/null @@ -1 +0,0 @@ -Make runtime errors surrounding password reset emails much clearer. diff --git a/changelog.d/5617.misc b/changelog.d/5617.misc deleted file mode 100644 index bb3c02816789..000000000000 --- a/changelog.d/5617.misc +++ /dev/null @@ -1 +0,0 @@ -Move logging code out of `synapse.util` and into `synapse.logging`. diff --git a/changelog.d/5619.docker b/changelog.d/5619.docker deleted file mode 100644 index b69e5cc57cb5..000000000000 --- a/changelog.d/5619.docker +++ /dev/null @@ -1 +0,0 @@ -Base Docker image on a newer Alpine Linux version (3.8 -> 3.10). diff --git a/changelog.d/5620.docker b/changelog.d/5620.docker deleted file mode 100644 index cbb5a75d6ad5..000000000000 --- a/changelog.d/5620.docker +++ /dev/null @@ -1 +0,0 @@ -Add missing space in default logging file format generated by the Docker image. diff --git a/changelog.d/5621.bugfix b/changelog.d/5621.bugfix deleted file mode 100644 index f1a2851f45ef..000000000000 --- a/changelog.d/5621.bugfix +++ /dev/null @@ -1 +0,0 @@ -Various minor fixes to the federation request rate limiter. diff --git a/changelog.d/5622.misc b/changelog.d/5622.misc deleted file mode 100644 index 9f0a87311c27..000000000000 --- a/changelog.d/5622.misc +++ /dev/null @@ -1 +0,0 @@ -Remove dead code for persiting outgoing federation transactions. diff --git a/changelog.d/5623.feature b/changelog.d/5623.feature deleted file mode 100644 index b73080e88d29..000000000000 --- a/changelog.d/5623.feature +++ /dev/null @@ -1 +0,0 @@ -Add default push rule to ignore reactions. diff --git a/changelog.d/5625.removal b/changelog.d/5625.removal deleted file mode 100644 index d33a778d695d..000000000000 --- a/changelog.d/5625.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for the `invite_3pid_guest` configuration setting. diff --git a/changelog.d/5626.feature b/changelog.d/5626.feature deleted file mode 100644 index 5ef793b943a0..000000000000 --- a/changelog.d/5626.feature +++ /dev/null @@ -1 +0,0 @@ -Include the original event when asking for its relations. diff --git a/changelog.d/5627.misc b/changelog.d/5627.misc deleted file mode 100644 index 730721b5ef19..000000000000 --- a/changelog.d/5627.misc +++ /dev/null @@ -1 +0,0 @@ -Add `lint.sh` to the scripts-dev folder which will run all linting steps required by CI. diff --git a/changelog.d/5628.misc b/changelog.d/5628.misc deleted file mode 100644 index fec8446793a0..000000000000 --- a/changelog.d/5628.misc +++ /dev/null @@ -1 +0,0 @@ -Move RegistrationHandler.get_or_create_user to test code. diff --git a/changelog.d/5629.bugfix b/changelog.d/5629.bugfix deleted file mode 100644 index 672eabad4085..000000000000 --- a/changelog.d/5629.bugfix +++ /dev/null @@ -1 +0,0 @@ -Forbid viewing relations on an event once it has been redacted. diff --git a/changelog.d/5630.misc b/changelog.d/5630.misc deleted file mode 100644 index f112d873eb80..000000000000 --- a/changelog.d/5630.misc +++ /dev/null @@ -1 +0,0 @@ -Add some more common python virtual-environment paths to the black exclusion list. diff --git a/changelog.d/5636.misc b/changelog.d/5636.misc deleted file mode 100644 index 3add9902832f..000000000000 --- a/changelog.d/5636.misc +++ /dev/null @@ -1 +0,0 @@ -Some counter metrics exposed over Prometheus have been renamed, with the old names preserved for backwards compatibility and deprecated. See `docs/metrics-howto.rst` for details. \ No newline at end of file diff --git a/changelog.d/5637.misc b/changelog.d/5637.misc deleted file mode 100644 index f18d6197e51a..000000000000 --- a/changelog.d/5637.misc +++ /dev/null @@ -1 +0,0 @@ -Unblacklist some user_directory sytests. diff --git a/changelog.d/5638.bugfix b/changelog.d/5638.bugfix deleted file mode 100644 index 66781ad9e687..000000000000 --- a/changelog.d/5638.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix requests to the `/store_invite` endpoint of identity servers being sent in the wrong format. diff --git a/changelog.d/5639.misc b/changelog.d/5639.misc deleted file mode 100644 index 413b13128c6f..000000000000 --- a/changelog.d/5639.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out some redundant code in the login implementation. diff --git a/changelog.d/5640.misc b/changelog.d/5640.misc deleted file mode 100644 index 7d69a1b3b640..000000000000 --- a/changelog.d/5640.misc +++ /dev/null @@ -1 +0,0 @@ -Update ModuleApi to avoid register(generate_token=True). diff --git a/changelog.d/5641.misc b/changelog.d/5641.misc deleted file mode 100644 index 1899bc963d4e..000000000000 --- a/changelog.d/5641.misc +++ /dev/null @@ -1 +0,0 @@ -Remove access-token support from RegistrationHandler.register, and rename it. diff --git a/changelog.d/5642.misc b/changelog.d/5642.misc deleted file mode 100644 index e7f8e214a4e6..000000000000 --- a/changelog.d/5642.misc +++ /dev/null @@ -1 +0,0 @@ -Remove access-token support from `RegistrationStore.register`, and rename it. diff --git a/changelog.d/5643.misc b/changelog.d/5643.misc deleted file mode 100644 index 2b2316469e03..000000000000 --- a/changelog.d/5643.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging for auto-join when a new user is created. diff --git a/changelog.d/5644.bugfix b/changelog.d/5644.bugfix deleted file mode 100644 index f6302fd08dd3..000000000000 --- a/changelog.d/5644.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix newly-registered users not being able to lookup their own profile without joining a room. diff --git a/changelog.d/5645.misc b/changelog.d/5645.misc deleted file mode 100644 index 4fa9699e4fae..000000000000 --- a/changelog.d/5645.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. \ No newline at end of file diff --git a/changelog.d/5651.doc b/changelog.d/5651.doc deleted file mode 100644 index e2d5a8dc8ad7..000000000000 --- a/changelog.d/5651.doc +++ /dev/null @@ -1 +0,0 @@ ---no-pep517 should be --no-use-pep517 in the documentation to setup the development environment. diff --git a/changelog.d/5654.bugfix b/changelog.d/5654.bugfix deleted file mode 100644 index 5f76b041cd1c..000000000000 --- a/changelog.d/5654.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug in #5626 that prevented the original_event field from actually having the contents of the original event in a call to `/relations`. \ No newline at end of file diff --git a/changelog.d/5655.misc b/changelog.d/5655.misc deleted file mode 100644 index acab6aee92bf..000000000000 --- a/changelog.d/5655.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a small typo in a code comment. \ No newline at end of file diff --git a/changelog.d/5656.misc b/changelog.d/5656.misc deleted file mode 100644 index a8de20a7d01e..000000000000 --- a/changelog.d/5656.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up exception handling around client access tokens. diff --git a/changelog.d/5657.misc b/changelog.d/5657.misc deleted file mode 100644 index bdec9ae4c0b6..000000000000 --- a/changelog.d/5657.misc +++ /dev/null @@ -1 +0,0 @@ -Add a mechanism for per-test homeserver configuration in the unit tests. diff --git a/changelog.d/5658.bugfix b/changelog.d/5658.bugfix deleted file mode 100644 index f6ae906a9ad6..000000000000 --- a/changelog.d/5658.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 3PID bind requests being sent to identity servers as `application/x-form-www-urlencoded` data, which is deprecated. diff --git a/changelog.d/5659.misc b/changelog.d/5659.misc deleted file mode 100644 index 686001295c33..000000000000 --- a/changelog.d/5659.misc +++ /dev/null @@ -1 +0,0 @@ -Inline issue_access_token. diff --git a/changelog.d/5660.feature b/changelog.d/5660.feature deleted file mode 100644 index 82889fdaf16b..000000000000 --- a/changelog.d/5660.feature +++ /dev/null @@ -1 +0,0 @@ -Implement `session_lifetime` configuration option, after which access tokens will expire. diff --git a/changelog.d/5661.doc b/changelog.d/5661.doc deleted file mode 100644 index c70e62014e40..000000000000 --- a/changelog.d/5661.doc +++ /dev/null @@ -1 +0,0 @@ -Improvements to Postgres setup instructions. Contributed by @Lrizika - thanks! diff --git a/changelog.d/5664.misc b/changelog.d/5664.misc deleted file mode 100644 index 0ca7a0fbd0c7..000000000000 --- a/changelog.d/5664.misc +++ /dev/null @@ -1 +0,0 @@ -Update the sytest BuildKite configuration to checkout Synapse in `/src`. diff --git a/changelog.d/5673.misc b/changelog.d/5673.misc deleted file mode 100644 index 194225635834..000000000000 --- a/changelog.d/5673.misc +++ /dev/null @@ -1 +0,0 @@ -Add a `docker` type to the towncrier configuration. diff --git a/changelog.d/5674.feature b/changelog.d/5674.feature deleted file mode 100644 index 04bdfa4ad5eb..000000000000 --- a/changelog.d/5674.feature +++ /dev/null @@ -1 +0,0 @@ -Return "This account has been deactivated" when a deactivated user tries to login. diff --git a/changelog.d/5675.doc b/changelog.d/5675.doc deleted file mode 100644 index 4cd4d0be1a5a..000000000000 --- a/changelog.d/5675.doc +++ /dev/null @@ -1 +0,0 @@ -Minor tweaks to postgres documentation. diff --git a/changelog.d/5689.misc b/changelog.d/5689.misc deleted file mode 100644 index 8aa3e3f6a220..000000000000 --- a/changelog.d/5689.misc +++ /dev/null @@ -1 +0,0 @@ -Convert `synapse.federation.transport.server` to `async`. Might improve some stack traces. \ No newline at end of file diff --git a/changelog.d/5699.bugfix b/changelog.d/5699.bugfix deleted file mode 100644 index 30d5e67f6774..000000000000 --- a/changelog.d/5699.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some problems with authenticating redactions in recent room versions. \ No newline at end of file diff --git a/changelog.d/5700.bugfix b/changelog.d/5700.bugfix deleted file mode 100644 index 51bce8d441e1..000000000000 --- a/changelog.d/5700.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix some problems with authenticating redactions in recent room versions. - diff --git a/changelog.d/5701.bugfix b/changelog.d/5701.bugfix deleted file mode 100644 index fd2866e16abe..000000000000 --- a/changelog.d/5701.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ignore redactions of m.room.create events. diff --git a/changelog.d/5703.misc b/changelog.d/5703.misc deleted file mode 100644 index 6e9b2d734e03..000000000000 --- a/changelog.d/5703.misc +++ /dev/null @@ -1 +0,0 @@ -Documentation for opentracing. diff --git a/changelog.d/5707.bugfix b/changelog.d/5707.bugfix deleted file mode 100644 index aa3046c5e112..000000000000 --- a/changelog.d/5707.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix some problems with authenticating redactions in recent room versions. diff --git a/changelog.d/5712.feature b/changelog.d/5712.feature deleted file mode 100644 index 7d3459129d14..000000000000 --- a/changelog.d/5712.feature +++ /dev/null @@ -1,2 +0,0 @@ -Add support for opentracing. - diff --git a/changelog.d/5714.feature b/changelog.d/5714.feature deleted file mode 100644 index 2fd32e5e3858..000000000000 --- a/changelog.d/5714.feature +++ /dev/null @@ -1 +0,0 @@ -Enable aggregations support by default diff --git a/synapse/__init__.py b/synapse/__init__.py index cf22fabd6118..f26e49da3623 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ except ImportError: pass -__version__ = "1.1.0" +__version__ = "1.2.0rc1" From 8b0d5b171e1af3eb9a8a380af1be0f9c71d22d17 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 22 Jul 2019 13:15:35 +0100 Subject: [PATCH 77/80] Make changelog slightly more readable --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index e42fe1ba4d07..bb6bcb75ed94 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -65,7 +65,7 @@ Internal Changes - Unblacklist some user_directory sytests. ([\#5637](https://github.com/matrix-org/synapse/issues/5637)) - Factor out some redundant code in the login implementation. ([\#5639](https://github.com/matrix-org/synapse/issues/5639)) - Update ModuleApi to avoid register(generate_token=True). ([\#5640](https://github.com/matrix-org/synapse/issues/5640)) -- Remove access-token support from RegistrationHandler.register, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641)) +- Remove access-token support from `RegistrationHandler.register`, and rename it. ([\#5641](https://github.com/matrix-org/synapse/issues/5641)) - Remove access-token support from `RegistrationStore.register`, and rename it. ([\#5642](https://github.com/matrix-org/synapse/issues/5642)) - Improve logging for auto-join when a new user is created. ([\#5643](https://github.com/matrix-org/synapse/issues/5643)) - Remove unused and unnecessary check for FederationDeniedError in _exception_to_failure. ([\#5645](https://github.com/matrix-org/synapse/issues/5645)) From cf2972c818344214244961e6175f559a6b59123b Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Wed, 24 Jul 2019 13:07:35 +0100 Subject: [PATCH 78/80] Fix servlet metric names (#5734) * Fix servlet metric names Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> * Remove redundant check * Cover all return paths --- changelog.d/5734.bugfix | 1 + synapse/federation/transport/server.py | 4 +- synapse/http/server.py | 41 ++++++++++++++------- synapse/http/servlet.py | 4 +- synapse/replication/http/_base.py | 2 +- synapse/rest/admin/server_notice_servlet.py | 9 ++++- synapse/rest/client/v1/room.py | 37 +++++++++++++++---- synapse/rest/client/v2_alpha/relations.py | 2 + tests/test_server.py | 21 ++++++++--- tests/utils.py | 2 +- 10 files changed, 92 insertions(+), 31 deletions(-) create mode 100644 changelog.d/5734.bugfix diff --git a/changelog.d/5734.bugfix b/changelog.d/5734.bugfix new file mode 100644 index 000000000000..33aea5a94cb7 --- /dev/null +++ b/changelog.d/5734.bugfix @@ -0,0 +1 @@ +Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 663264dec48d..ea4e1b6d0f28 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -325,7 +325,9 @@ def register(self, server): if code is None: continue - server.register_paths(method, (pattern,), self._wrap(code)) + server.register_paths( + method, (pattern,), self._wrap(code), self.__class__.__name__ + ) class FederationSendServlet(BaseFederationServlet): diff --git a/synapse/http/server.py b/synapse/http/server.py index 72a3d67eb69d..e6f351ba3b2c 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -245,7 +245,9 @@ class JsonResource(HttpServer, resource.Resource): isLeaf = True - _PathEntry = collections.namedtuple("_PathEntry", ["pattern", "callback"]) + _PathEntry = collections.namedtuple( + "_PathEntry", ["pattern", "callback", "servlet_classname"] + ) def __init__(self, hs, canonical_json=True): resource.Resource.__init__(self) @@ -255,12 +257,28 @@ def __init__(self, hs, canonical_json=True): self.path_regexs = {} self.hs = hs - def register_paths(self, method, path_patterns, callback): + def register_paths(self, method, path_patterns, callback, servlet_classname): + """ + Registers a request handler against a regular expression. Later request URLs are + checked against these regular expressions in order to identify an appropriate + handler for that request. + + Args: + method (str): GET, POST etc + + path_patterns (Iterable[str]): A list of regular expressions to which + the request URLs are compared. + + callback (function): The handler for the request. Usually a Servlet + + servlet_classname (str): The name of the handler to be used in prometheus + and opentracing logs. + """ method = method.encode("utf-8") # method is bytes on py3 for path_pattern in path_patterns: logger.debug("Registering for %s %s", method, path_pattern.pattern) self.path_regexs.setdefault(method, []).append( - self._PathEntry(path_pattern, callback) + self._PathEntry(path_pattern, callback, servlet_classname) ) def render(self, request): @@ -275,13 +293,9 @@ async def _async_render(self, request): This checks if anyone has registered a callback for that method and path. """ - callback, group_dict = self._get_handler_for_request(request) + callback, servlet_classname, group_dict = self._get_handler_for_request(request) - servlet_instance = getattr(callback, "__self__", None) - if servlet_instance is not None: - servlet_classname = servlet_instance.__class__.__name__ - else: - servlet_classname = "%r" % callback + # Make sure we have a name for this handler in prometheus. request.request_metrics.name = servlet_classname # Now trigger the callback. If it returns a response, we send it @@ -311,7 +325,8 @@ def _get_handler_for_request(self, request): request (twisted.web.http.Request): Returns: - Tuple[Callable, dict[unicode, unicode]]: callback method, and the + Tuple[Callable, str, dict[unicode, unicode]]: callback method, the + label to use for that method in prometheus metrics, and the dict mapping keys to path components as specified in the handler's path match regexp. @@ -320,7 +335,7 @@ def _get_handler_for_request(self, request): None, or a tuple of (http code, response body). """ if request.method == b"OPTIONS": - return _options_handler, {} + return _options_handler, "options_request_handler", {} # Loop through all the registered callbacks to check if the method # and path regex match @@ -328,10 +343,10 @@ def _get_handler_for_request(self, request): m = path_entry.pattern.match(request.path.decode("ascii")) if m: # We found a match! - return path_entry.callback, m.groupdict() + return path_entry.callback, path_entry.servlet_classname, m.groupdict() # Huh. No one wanted to handle that? Fiiiiiine. Send 400. - return _unrecognised_request_handler, {} + return _unrecognised_request_handler, "unrecognised_request_handler", {} def _send_response( self, request, code, response_json_object, response_code_message=None diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 889038ff2585..f0ca7d9aba44 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -290,11 +290,13 @@ def register(self, http_server): for method in ("GET", "PUT", "POST", "OPTIONS", "DELETE"): if hasattr(self, "on_%s" % (method,)): + servlet_classname = self.__class__.__name__ method_handler = getattr(self, "on_%s" % (method,)) http_server.register_paths( method, patterns, - trace_servlet(self.__class__.__name__, method_handler), + trace_servlet(servlet_classname, method_handler), + servlet_classname, ) else: diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index fe482e279fd1..43c89e36ddc2 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -205,7 +205,7 @@ def register(self, http_server): args = "/".join("(?P<%s>[^/]+)" % (arg,) for arg in url_args) pattern = re.compile("^/_synapse/replication/%s/%s$" % (self.NAME, args)) - http_server.register_paths(method, [pattern], handler) + http_server.register_paths(method, [pattern], handler, self.__class__.__name__) def _cached_handler(self, request, txn_id, **kwargs): """Called on new incoming requests when caching is enabled. Checks diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index ee66838a0d34..d9c71261f211 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -59,9 +59,14 @@ def __init__(self, hs): def register(self, json_resource): PATTERN = "^/_synapse/admin/v1/send_server_notice" - json_resource.register_paths("POST", (re.compile(PATTERN + "$"),), self.on_POST) json_resource.register_paths( - "PUT", (re.compile(PATTERN + "/(?P[^/]*)$"),), self.on_PUT + "POST", (re.compile(PATTERN + "$"),), self.on_POST, self.__class__.__name__ + ) + json_resource.register_paths( + "PUT", + (re.compile(PATTERN + "/(?P[^/]*)$"),), + self.on_PUT, + self.__class__.__name__, ) @defer.inlineCallbacks diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 7709c2d705c2..6276e97f8923 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -67,11 +67,17 @@ def register(self, http_server): register_txn_path(self, PATTERNS, http_server) # define CORS for all of /rooms in RoomCreateRestServlet for simplicity http_server.register_paths( - "OPTIONS", client_patterns("/rooms(?:/.*)?$", v1=True), self.on_OPTIONS + "OPTIONS", + client_patterns("/rooms(?:/.*)?$", v1=True), + self.on_OPTIONS, + self.__class__.__name__, ) # define CORS for /createRoom[/txnid] http_server.register_paths( - "OPTIONS", client_patterns("/createRoom(?:/.*)?$", v1=True), self.on_OPTIONS + "OPTIONS", + client_patterns("/createRoom(?:/.*)?$", v1=True), + self.on_OPTIONS, + self.__class__.__name__, ) def on_PUT(self, request, txn_id): @@ -116,16 +122,28 @@ def register(self, http_server): ) http_server.register_paths( - "GET", client_patterns(state_key, v1=True), self.on_GET + "GET", + client_patterns(state_key, v1=True), + self.on_GET, + self.__class__.__name__, ) http_server.register_paths( - "PUT", client_patterns(state_key, v1=True), self.on_PUT + "PUT", + client_patterns(state_key, v1=True), + self.on_PUT, + self.__class__.__name__, ) http_server.register_paths( - "GET", client_patterns(no_state_key, v1=True), self.on_GET_no_state_key + "GET", + client_patterns(no_state_key, v1=True), + self.on_GET_no_state_key, + self.__class__.__name__, ) http_server.register_paths( - "PUT", client_patterns(no_state_key, v1=True), self.on_PUT_no_state_key + "PUT", + client_patterns(no_state_key, v1=True), + self.on_PUT_no_state_key, + self.__class__.__name__, ) def on_GET_no_state_key(self, request, room_id, event_type): @@ -845,18 +863,23 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False): with_get: True to also register respective GET paths for the PUTs. """ http_server.register_paths( - "POST", client_patterns(regex_string + "$", v1=True), servlet.on_POST + "POST", + client_patterns(regex_string + "$", v1=True), + servlet.on_POST, + servlet.__class__.__name__, ) http_server.register_paths( "PUT", client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_PUT, + servlet.__class__.__name__, ) if with_get: http_server.register_paths( "GET", client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_GET, + servlet.__class__.__name__, ) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 6e52f6d284f7..9e9a639055fd 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -72,11 +72,13 @@ def register(self, http_server): "POST", client_patterns(self.PATTERN + "$", releases=()), self.on_PUT_or_POST, + self.__class__.__name__, ) http_server.register_paths( "PUT", client_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), self.on_PUT, + self.__class__.__name__, ) def on_PUT(self, request, *args, **kwargs): diff --git a/tests/test_server.py b/tests/test_server.py index ba08483a4b69..2a7d407c98fc 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -61,7 +61,10 @@ def _callback(request, **kwargs): res = JsonResource(self.homeserver) res.register_paths( - "GET", [re.compile("^/_matrix/foo/(?P[^/]*)$")], _callback + "GET", + [re.compile("^/_matrix/foo/(?P[^/]*)$")], + _callback, + "test_servlet", ) request, channel = make_request( @@ -82,7 +85,9 @@ def _callback(request, **kwargs): raise Exception("boo") res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -105,7 +110,9 @@ def _callback(request, **kwargs): return make_deferred_yieldable(d) res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -122,7 +129,9 @@ def _callback(request, **kwargs): raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN) res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo") render(request, res, self.reactor) @@ -143,7 +152,9 @@ def _callback(request, **kwargs): self.fail("shouldn't ever get here") res = JsonResource(self.homeserver) - res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback) + res.register_paths( + "GET", [re.compile("^/_matrix/foo$")], _callback, "test_servlet" + ) request, channel = make_request(self.reactor, b"GET", b"/_matrix/foobar") render(request, res, self.reactor) diff --git a/tests/utils.py b/tests/utils.py index 8a94ce0b475d..99a3deae215c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -471,7 +471,7 @@ def trigger( raise KeyError("No event can handle %s" % path) - def register_paths(self, method, path_patterns, callback): + def register_paths(self, method, path_patterns, callback, servlet_name): for path_pattern in path_patterns: self.callbacks.append((method, path_pattern, callback)) From 2d573e2e2b7127882ab2c221fd554371e0274c76 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 24 Jul 2019 13:38:33 +0100 Subject: [PATCH 79/80] 1.2.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/5734.bugfix | 1 - synapse/__init__.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/5734.bugfix diff --git a/CHANGES.md b/CHANGES.md index bb6bcb75ed94..be3dc37137c4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.2.0rc2 (2019-07-24) +============================= + +Bugfixes +-------- + +- Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. ([\#5734](https://github.com/matrix-org/synapse/issues/5734)) + + Synapse 1.2.0rc1 (2019-07-22) ============================= diff --git a/changelog.d/5734.bugfix b/changelog.d/5734.bugfix deleted file mode 100644 index 33aea5a94cb7..000000000000 --- a/changelog.d/5734.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression introduced in v1.2.0rc1 which led to incorrect labels on some prometheus metrics. diff --git a/synapse/__init__.py b/synapse/__init__.py index f26e49da3623..ca14545e4de2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ except ImportError: pass -__version__ = "1.2.0rc1" +__version__ = "1.2.0rc2" From c0a1301ccd91cdfbb079f675b3c3dc305f876be7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 25 Jul 2019 14:10:32 +0100 Subject: [PATCH 80/80] 1.2.0 --- CHANGES.md | 6 ++++++ debian/changelog | 7 +++++-- synapse/__init__.py | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index be3dc37137c4..d655723326f7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.2.0 (2019-07-25) +========================== + +No significant changes. + + Synapse 1.2.0rc2 (2019-07-24) ============================= diff --git a/debian/changelog b/debian/changelog index 8aba444f1d67..aafdd1cde205 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,4 @@ -matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.2.0) stable; urgency=medium [ Amber Brown ] * Update logging config defaults to match API changes in Synapse. @@ -6,7 +6,10 @@ matrix-synapse-py3 (1.1.0-1) UNRELEASED; urgency=medium [ Richard van der Hoff ] * Add Recommends and Depends for some libraries which you probably want. - -- Erik Johnston Thu, 04 Jul 2019 13:59:02 +0100 + [ Synapse Packaging team ] + * New synapse release 1.2.0. + + -- Synapse Packaging team Thu, 25 Jul 2019 14:10:07 +0100 matrix-synapse-py3 (1.1.0) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index ca14545e4de2..3435de4e2f12 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -35,4 +35,4 @@ except ImportError: pass -__version__ = "1.2.0rc2" +__version__ = "1.2.0"