From dd2d32dcdb3238735aeeeaff18e5c754b1d50be9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Apr 2021 11:07:47 +0100 Subject: [PATCH 01/40] Add type hints to presence handler (#9885) --- changelog.d/9885.misc | 1 + synapse/handlers/presence.py | 159 ++++++++++++++++++++--------------- 2 files changed, 90 insertions(+), 70 deletions(-) create mode 100644 changelog.d/9885.misc diff --git a/changelog.d/9885.misc b/changelog.d/9885.misc new file mode 100644 index 000000000..492fccea4 --- /dev/null +++ b/changelog.d/9885.misc @@ -0,0 +1 @@ +Add type hints to presence handler. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 969c73c1e..e9f618bb5 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -28,6 +28,7 @@ from contextlib import contextmanager from typing import ( TYPE_CHECKING, + Callable, Collection, Dict, FrozenSet, @@ -232,23 +233,23 @@ async def bump_presence_active_time(self, user: UserID): """ async def update_external_syncs_row( - self, process_id, user_id, is_syncing, sync_time_msec - ): + self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int + ) -> None: """Update the syncing users for an external process as a delta. This is a no-op when presence is handled by a different worker. Args: - process_id (str): An identifier for the process the users are + process_id: An identifier for the process the users are syncing against. This allows synapse to process updates as user start and stop syncing against a given process. - user_id (str): The user who has started or stopped syncing - is_syncing (bool): Whether or not the user is now syncing - sync_time_msec(int): Time in ms when the user was last syncing + user_id: The user who has started or stopped syncing + is_syncing: Whether or not the user is now syncing + sync_time_msec: Time in ms when the user was last syncing """ pass - async def update_external_syncs_clear(self, process_id): + async def update_external_syncs_clear(self, process_id: str) -> None: """Marks all users that had been marked as syncing by a given process as offline. @@ -304,7 +305,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class WorkerPresenceHandler(BasePresenceHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs @@ -327,7 +328,7 @@ def __init__(self, hs): # user_id -> last_sync_ms. Lists the users that have stopped syncing but # we haven't notified the presence writer of that yet - self.users_going_offline = {} + self.users_going_offline = {} # type: Dict[str, int] self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) @@ -346,24 +347,21 @@ def __init__(self, hs): self._on_shutdown, ) - def _on_shutdown(self): + def _on_shutdown(self) -> None: if self._presence_enabled: self.hs.get_tcp_replication().send_command( ClearUserSyncsCommand(self.instance_id) ) - def send_user_sync(self, user_id, is_syncing, last_sync_ms): + def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None: if self._presence_enabled: self.hs.get_tcp_replication().send_user_sync( self.instance_id, user_id, is_syncing, last_sync_ms ) - def mark_as_coming_online(self, user_id): + def mark_as_coming_online(self, user_id: str) -> None: """A user has started syncing. Send a UserSync to the presence writer, unless they had recently stopped syncing. - - Args: - user_id (str) """ going_offline = self.users_going_offline.pop(user_id, None) if not going_offline: @@ -371,18 +369,15 @@ def mark_as_coming_online(self, user_id): # were offline self.send_user_sync(user_id, True, self.clock.time_msec()) - def mark_as_going_offline(self, user_id): + def mark_as_going_offline(self, user_id: str) -> None: """A user has stopped syncing. We wait before notifying the presence writer as its likely they'll come back soon. This allows us to avoid sending a stopped syncing immediately followed by a started syncing notification to the presence writer - - Args: - user_id (str) """ self.users_going_offline[user_id] = self.clock.time_msec() - def send_stop_syncing(self): + def send_stop_syncing(self) -> None: """Check if there are any users who have stopped syncing a while ago and haven't come back yet. If there are poke the presence writer about them. """ @@ -430,7 +425,9 @@ def _user_syncing(): return _user_syncing() - async def notify_from_replication(self, states, stream_id): + async def notify_from_replication( + self, states: List[UserPresenceState], stream_id: int + ) -> None: parties = await get_interested_parties(self.store, self.presence_router, states) room_ids_to_states, users_to_states = parties @@ -478,7 +475,12 @@ def get_currently_syncing_users_for_replication(self) -> Iterable[str]: if count > 0 ] - async def set_state(self, target_user, state, ignore_status_msg=False): + async def set_state( + self, + target_user: UserID, + state: JsonDict, + ignore_status_msg: bool = False, + ) -> None: """Set the presence state of the user.""" presence = state["presence"] @@ -508,7 +510,7 @@ async def set_state(self, target_user, state, ignore_status_msg=False): ignore_status_msg=ignore_status_msg, ) - async def bump_presence_active_time(self, user): + async def bump_presence_active_time(self, user: UserID) -> None: """We've seen the user do something that indicates they're interacting with the app. """ @@ -592,8 +594,8 @@ def __init__(self, hs: "HomeServer"): # we assume that all the sync requests on that process have stopped. # Stored as a dict from process_id to set of user_id, and a dict of # process_id to millisecond timestamp last updated. - self.external_process_to_current_syncs = {} # type: Dict[int, Set[str]] - self.external_process_last_updated_ms = {} # type: Dict[int, int] + self.external_process_to_current_syncs = {} # type: Dict[str, Set[str]] + self.external_process_last_updated_ms = {} # type: Dict[str, int] self.external_sync_linearizer = Linearizer(name="external_sync_linearizer") @@ -633,7 +635,7 @@ def run_persister(): self._event_pos = self.store.get_current_events_token() self._event_processing = False - async def _on_shutdown(self): + async def _on_shutdown(self) -> None: """Gets called when shutting down. This lets us persist any updates that we haven't yet persisted, e.g. updates that only changes some internal timers. This allows changes to persist across startup without having to @@ -662,7 +664,7 @@ async def _on_shutdown(self): ) logger.info("Finished _on_shutdown") - async def _persist_unpersisted_changes(self): + async def _persist_unpersisted_changes(self) -> None: """We periodically persist the unpersisted changes, as otherwise they may stack up and slow down shutdown times. """ @@ -762,7 +764,7 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: states, destinations ) - async def _handle_timeouts(self): + async def _handle_timeouts(self) -> None: """Checks the presence of users that have timed out and updates as appropriate. """ @@ -814,7 +816,7 @@ async def _handle_timeouts(self): return await self._update_states(changes) - async def bump_presence_active_time(self, user): + async def bump_presence_active_time(self, user: UserID) -> None: """We've seen the user do something that indicates they're interacting with the app. """ @@ -911,17 +913,17 @@ def get_currently_syncing_users_for_replication(self) -> Iterable[str]: return [] async def update_external_syncs_row( - self, process_id, user_id, is_syncing, sync_time_msec - ): + self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int + ) -> None: """Update the syncing users for an external process as a delta. Args: - process_id (str): An identifier for the process the users are + process_id: An identifier for the process the users are syncing against. This allows synapse to process updates as user start and stop syncing against a given process. - user_id (str): The user who has started or stopped syncing - is_syncing (bool): Whether or not the user is now syncing - sync_time_msec(int): Time in ms when the user was last syncing + user_id: The user who has started or stopped syncing + is_syncing: Whether or not the user is now syncing + sync_time_msec: Time in ms when the user was last syncing """ with (await self.external_sync_linearizer.queue(process_id)): prev_state = await self.current_state_for_user(user_id) @@ -958,7 +960,7 @@ async def update_external_syncs_row( self.external_process_last_updated_ms[process_id] = self.clock.time_msec() - async def update_external_syncs_clear(self, process_id): + async def update_external_syncs_clear(self, process_id: str) -> None: """Marks all users that had been marked as syncing by a given process as offline. @@ -979,12 +981,12 @@ async def update_external_syncs_clear(self, process_id): ) self.external_process_last_updated_ms.pop(process_id, None) - async def current_state_for_user(self, user_id): + async def current_state_for_user(self, user_id: str) -> UserPresenceState: """Get the current presence state for a user.""" res = await self.current_state_for_users([user_id]) return res[user_id] - async def _persist_and_notify(self, states): + async def _persist_and_notify(self, states: List[UserPresenceState]) -> None: """Persist states in the database, poke the notifier and send to interested remote servers """ @@ -1005,7 +1007,7 @@ async def _persist_and_notify(self, states): # stream (which is updated by `store.update_presence`). await self.maybe_send_presence_to_interested_destinations(states) - async def incoming_presence(self, origin, content): + async def incoming_presence(self, origin: str, content: JsonDict) -> None: """Called when we receive a `m.presence` EDU from a remote server.""" if not self._presence_enabled: return @@ -1055,7 +1057,9 @@ async def incoming_presence(self, origin, content): federation_presence_counter.inc(len(updates)) await self._update_states(updates) - async def set_state(self, target_user, state, ignore_status_msg=False): + async def set_state( + self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False + ) -> None: """Set the presence state of the user.""" status_msg = state.get("status_msg", None) presence = state["presence"] @@ -1089,7 +1093,7 @@ async def set_state(self, target_user, state, ignore_status_msg=False): await self._update_states([prev_state.copy_and_replace(**new_fields)]) - async def is_visible(self, observed_user, observer_user): + async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool: """Returns whether a user can see another user's presence.""" observer_room_ids = await self.store.get_rooms_for_user( observer_user.to_string() @@ -1144,7 +1148,7 @@ async def get_all_presence_updates( ) return rows - def notify_new_event(self): + def notify_new_event(self) -> None: """Called when new events have happened. Handles users and servers joining rooms and require being sent presence. """ @@ -1163,7 +1167,7 @@ async def _process_presence(): run_as_background_process("presence.notify_new_event", _process_presence) - async def _unsafe_process(self): + async def _unsafe_process(self) -> None: # Loop round handling deltas until we're up to date while True: with Measure(self.clock, "presence_delta"): @@ -1188,7 +1192,7 @@ async def _unsafe_process(self): max_pos ) - async def _handle_state_delta(self, deltas): + async def _handle_state_delta(self, deltas: List[JsonDict]) -> None: """Process current state deltas to find new joins that need to be handled. """ @@ -1311,7 +1315,7 @@ async def _on_user_joined_room( return [remote_host], states -def should_notify(old_state, new_state): +def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool: """Decides if a presence state change should be sent to interested parties.""" if old_state == new_state: return False @@ -1347,7 +1351,9 @@ def should_notify(old_state, new_state): return False -def format_user_presence_state(state, now, include_user_id=True): +def format_user_presence_state( + state: UserPresenceState, now: int, include_user_id: bool = True +) -> JsonDict: """Convert UserPresenceState to a format that can be sent down to clients and to other servers. @@ -1385,11 +1391,11 @@ def __init__(self, hs: "HomeServer"): @log_function async def get_new_events( self, - user, - from_key, - room_ids=None, - include_offline=True, - explicit_room_id=None, + user: UserID, + from_key: Optional[int], + room_ids: Optional[List[str]] = None, + include_offline: bool = True, + explicit_room_id: Optional[str] = None, **kwargs, ) -> Tuple[List[UserPresenceState], int]: # The process for getting presence events are: @@ -1594,7 +1600,7 @@ def _filter_offline_presence_state( if update.state != PresenceState.OFFLINE ] - def get_current_key(self): + def get_current_key(self) -> int: return self.store.get_current_presence_token() @cached(num_args=2, cache_context=True) @@ -1654,15 +1660,20 @@ async def _get_interested_in( return users_interested_in -def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): +def handle_timeouts( + user_states: List[UserPresenceState], + is_mine_fn: Callable[[str], bool], + syncing_user_ids: Set[str], + now: int, +) -> List[UserPresenceState]: """Checks the presence of users that have timed out and updates as appropriate. Args: - user_states(list): List of UserPresenceState's to check. - is_mine_fn (fn): Function that returns if a user_id is ours - syncing_user_ids (set): Set of user_ids with active syncs. - now (int): Current time in ms. + user_states: List of UserPresenceState's to check. + is_mine_fn: Function that returns if a user_id is ours + syncing_user_ids: Set of user_ids with active syncs. + now: Current time in ms. Returns: List of UserPresenceState updates @@ -1679,14 +1690,16 @@ def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): return list(changes.values()) -def handle_timeout(state, is_mine, syncing_user_ids, now): +def handle_timeout( + state: UserPresenceState, is_mine: bool, syncing_user_ids: Set[str], now: int +) -> Optional[UserPresenceState]: """Checks the presence of the user to see if any of the timers have elapsed Args: - state (UserPresenceState) - is_mine (bool): Whether the user is ours - syncing_user_ids (set): Set of user_ids with active syncs. - now (int): Current time in ms. + state + is_mine: Whether the user is ours + syncing_user_ids: Set of user_ids with active syncs. + now: Current time in ms. Returns: A UserPresenceState update or None if no update. @@ -1738,23 +1751,29 @@ def handle_timeout(state, is_mine, syncing_user_ids, now): return state if changed else None -def handle_update(prev_state, new_state, is_mine, wheel_timer, now): +def handle_update( + prev_state: UserPresenceState, + new_state: UserPresenceState, + is_mine: bool, + wheel_timer: WheelTimer, + now: int, +) -> Tuple[UserPresenceState, bool, bool]: """Given a presence update: 1. Add any appropriate timers. 2. Check if we should notify anyone. Args: - prev_state (UserPresenceState) - new_state (UserPresenceState) - is_mine (bool): Whether the user is ours - wheel_timer (WheelTimer) - now (int): Time now in ms + prev_state + new_state + is_mine: Whether the user is ours + wheel_timer + now: Time now in ms Returns: 3-tuple: `(new_state, persist_and_notify, federation_ping)` where: - new_state: is the state to actually persist - - persist_and_notify (bool): whether to persist and notify people - - federation_ping (bool): whether we should send a ping over federation + - persist_and_notify: whether to persist and notify people + - federation_ping: whether we should send a ping over federation """ user_id = new_state.user_id From 391bfe9a7b7b22c3dbee9f9e02071fd5c1730ab5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Apr 2021 11:59:28 +0100 Subject: [PATCH 02/40] Reduce memory footprint of caches (#9886) --- changelog.d/9886.misc | 1 + synapse/util/caches/lrucache.py | 77 +++++++++++++++++++++++++-------- 2 files changed, 60 insertions(+), 18 deletions(-) create mode 100644 changelog.d/9886.misc diff --git a/changelog.d/9886.misc b/changelog.d/9886.misc new file mode 100644 index 000000000..8ff869e65 --- /dev/null +++ b/changelog.d/9886.misc @@ -0,0 +1 @@ +Reduce memory usage of the LRU caches. diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index a21d34fcb..10b0ec6b7 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -17,8 +17,10 @@ from typing import ( Any, Callable, + Collection, Generic, Iterable, + List, Optional, Type, TypeVar, @@ -57,13 +59,56 @@ class _Node: __slots__ = ["prev_node", "next_node", "key", "value", "callbacks"] def __init__( - self, prev_node, next_node, key, value, callbacks: Optional[set] = None + self, + prev_node, + next_node, + key, + value, + callbacks: Collection[Callable[[], None]] = (), ): self.prev_node = prev_node self.next_node = next_node self.key = key self.value = value - self.callbacks = callbacks or set() + + # Set of callbacks to run when the node gets deleted. We store as a list + # rather than a set to keep memory usage down (and since we expect few + # entries per node, the performance of checking for duplication in a + # list vs using a set is negligible). + # + # Note that we store this as an optional list to keep the memory + # footprint down. Storing `None` is free as its a singleton, while empty + # lists are 56 bytes (and empty sets are 216 bytes, if we did the naive + # thing and used sets). + self.callbacks = None # type: Optional[List[Callable[[], None]]] + + self.add_callbacks(callbacks) + + def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None: + """Add to stored list of callbacks, removing duplicates.""" + + if not callbacks: + return + + if not self.callbacks: + self.callbacks = [] + + for callback in callbacks: + if callback not in self.callbacks: + self.callbacks.append(callback) + + def run_and_clear_callbacks(self) -> None: + """Run all callbacks and clear the stored list of callbacks. Used when + the node is being deleted. + """ + + if not self.callbacks: + return + + for callback in self.callbacks: + callback() + + self.callbacks = None class LruCache(Generic[KT, VT]): @@ -177,10 +222,10 @@ def cache_len(): self.len = synchronized(cache_len) - def add_node(key, value, callbacks: Optional[set] = None): + def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()): prev_node = list_root next_node = prev_node.next_node - node = _Node(prev_node, next_node, key, value, callbacks or set()) + node = _Node(prev_node, next_node, key, value, callbacks) prev_node.next_node = node next_node.prev_node = node cache[key] = node @@ -211,16 +256,15 @@ def delete_node(node): deleted_len = size_callback(node.value) cached_cache_len[0] -= deleted_len - for cb in node.callbacks: - cb() - node.callbacks.clear() + node.run_and_clear_callbacks() + return deleted_len @overload def cache_get( key: KT, default: Literal[None] = None, - callbacks: Iterable[Callable[[], None]] = ..., + callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., ) -> Optional[VT]: ... @@ -229,7 +273,7 @@ def cache_get( def cache_get( key: KT, default: T, - callbacks: Iterable[Callable[[], None]] = ..., + callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., ) -> Union[T, VT]: ... @@ -238,13 +282,13 @@ def cache_get( def cache_get( key: KT, default: Optional[T] = None, - callbacks: Iterable[Callable[[], None]] = (), + callbacks: Collection[Callable[[], None]] = (), update_metrics: bool = True, ): node = cache.get(key, None) if node is not None: move_node_to_front(node) - node.callbacks.update(callbacks) + node.add_callbacks(callbacks) if update_metrics and metrics: metrics.inc_hits() return node.value @@ -260,10 +304,8 @@ def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()): # We sometimes store large objects, e.g. dicts, which cause # the inequality check to take a long time. So let's only do # the check if we have some callbacks to call. - if node.callbacks and value != node.value: - for cb in node.callbacks: - cb() - node.callbacks.clear() + if value != node.value: + node.run_and_clear_callbacks() # We don't bother to protect this by value != node.value as # generally size_callback will be cheap compared with equality @@ -273,7 +315,7 @@ def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()): cached_cache_len[0] -= size_callback(node.value) cached_cache_len[0] += size_callback(value) - node.callbacks.update(callbacks) + node.add_callbacks(callbacks) move_node_to_front(node) node.value = value @@ -326,8 +368,7 @@ def cache_clear() -> None: list_root.next_node = list_root list_root.prev_node = list_root for node in cache.values(): - for cb in node.callbacks: - cb() + node.run_and_clear_callbacks() cache.clear() if size_callback: cached_cache_len[0] = 0 From 10a08ab88ad423bfca86983808c47f34a601ec9c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 28 Apr 2021 07:44:52 -0400 Subject: [PATCH 03/40] Use the parent's logging context name for runWithConnection. (#9895) This fixes a regression where the logging context for runWithConnection was reported as runWithConnection instead of the connection name, e.g. "POST-XYZ". --- changelog.d/9895.bugfix | 1 + synapse/storage/database.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9895.bugfix diff --git a/changelog.d/9895.bugfix b/changelog.d/9895.bugfix new file mode 100644 index 000000000..1053f975b --- /dev/null +++ b/changelog.d/9895.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index bd39c095a..a761ad603 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -715,7 +715,9 @@ def inner_func(conn, *args, **kwargs): # pool). assert not self.engine.in_transaction(conn) - with LoggingContext("runWithConnection", parent_context) as context: + with LoggingContext( + str(curr_context), parent_context=parent_context + ) as context: sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) context.add_database_scheduled(sched_duration_sec) From 0085dc5abc614579f3adbd9e6d2cbdd41facef00 Mon Sep 17 00:00:00 2001 From: ThibF Date: Thu, 29 Apr 2021 09:31:45 +0000 Subject: [PATCH 04/40] Delete room endpoint (#9889) Support the delete of a room through DELETE request and mark previous request as deprecated through documentation. Signed-off-by: Thibault Ferrante --- changelog.d/9889.feature | 1 + changelog.d/9889.removal | 1 + docs/admin_api/rooms.md | 11 ++- synapse/rest/admin/rooms.py | 134 +++++++++++++++++++++++----------- tests/rest/admin/test_room.py | 45 +++++++----- 5 files changed, 128 insertions(+), 64 deletions(-) create mode 100644 changelog.d/9889.feature create mode 100644 changelog.d/9889.removal diff --git a/changelog.d/9889.feature b/changelog.d/9889.feature new file mode 100644 index 000000000..74d46f222 --- /dev/null +++ b/changelog.d/9889.feature @@ -0,0 +1 @@ +Add support for `DELETE /_synapse/admin/v1/rooms/`. \ No newline at end of file diff --git a/changelog.d/9889.removal b/changelog.d/9889.removal new file mode 100644 index 000000000..398b9e129 --- /dev/null +++ b/changelog.d/9889.removal @@ -0,0 +1 @@ +Mark as deprecated `POST /_synapse/admin/v1/rooms//delete`. \ No newline at end of file diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index bc737b30f..01d388242 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -427,7 +427,7 @@ the new room. Users on other servers will be unaffected. The API is: ``` -POST /_synapse/admin/v1/rooms//delete +DELETE /_synapse/admin/v1/rooms/ ``` with a body of: @@ -528,6 +528,15 @@ You will have to manually handle, if you so choose, the following: * Users that would have been booted from the room (and will have been force-joined to the Content Violation room). * Removal of the Content Violation room if desired. +## Deprecated endpoint + +The previous deprecated API will be removed in a future release, it was: + +``` +POST /_synapse/admin/v1/rooms//delete +``` + +It behaves the same way than the current endpoint except the path and the method. # Make Room Admin API diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index d0cf12174..f289ffe3d 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -37,9 +37,11 @@ from synapse.util import json_decoder if TYPE_CHECKING: + from synapse.api.auth import Auth + from synapse.handlers.pagination import PaginationHandler + from synapse.handlers.room import RoomShutdownHandler from synapse.server import HomeServer - logger = logging.getLogger(__name__) @@ -146,50 +148,14 @@ def __init__(self, hs: "HomeServer"): async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) - - content = parse_json_object_from_request(request) - - block = content.get("block", False) - if not isinstance(block, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'block' must be a boolean, if given", - Codes.BAD_JSON, - ) - - purge = content.get("purge", True) - if not isinstance(purge, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'purge' must be a boolean, if given", - Codes.BAD_JSON, - ) - - force_purge = content.get("force_purge", False) - if not isinstance(force_purge, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'force_purge' must be a boolean, if given", - Codes.BAD_JSON, - ) - - ret = await self.room_shutdown_handler.shutdown_room( - room_id=room_id, - new_room_user_id=content.get("new_room_user_id"), - new_room_name=content.get("room_name"), - message=content.get("message"), - requester_user_id=requester.user.to_string(), - block=block, + return await _delete_room( + request, + room_id, + self.auth, + self.room_shutdown_handler, + self.pagination_handler, ) - # Purge room - if purge: - await self.pagination_handler.purge_room(room_id, force=force_purge) - - return (200, ret) - class ListRoomRestServlet(RestServlet): """ @@ -282,7 +248,22 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: class RoomRestServlet(RestServlet): - """Get room details. + """Manage a room. + + On GET : Get details of a room. + + On DELETE : Delete a room from server. + + It is a combination and improvement of shutdown and purge room. + + Shuts down a room by removing all local users from the room. + Blocking all future invites and joins to the room is optional. + + If desired any local aliases will be repointed to a new room + created by `new_room_user_id` and kicked users will be auto- + joined to the new room. + + If 'purge' is true, it will remove all traces of a room from the database. TODO: Add on_POST to allow room creation without joining the room """ @@ -293,6 +274,8 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() + self.room_shutdown_handler = hs.get_room_shutdown_handler() + self.pagination_handler = hs.get_pagination_handler() async def on_GET( self, request: SynapseRequest, room_id: str @@ -308,6 +291,17 @@ async def on_GET( return (200, ret) + async def on_DELETE( + self, request: SynapseRequest, room_id: str + ) -> Tuple[int, JsonDict]: + return await _delete_room( + request, + room_id, + self.auth, + self.room_shutdown_handler, + self.pagination_handler, + ) + class RoomMembersRestServlet(RestServlet): """ @@ -694,3 +688,55 @@ async def on_GET( ) return 200, results + + +async def _delete_room( + request: SynapseRequest, + room_id: str, + auth: "Auth", + room_shutdown_handler: "RoomShutdownHandler", + pagination_handler: "PaginationHandler", +) -> Tuple[int, JsonDict]: + requester = await auth.get_user_by_req(request) + await assert_user_is_admin(auth, requester.user) + + content = parse_json_object_from_request(request) + + block = content.get("block", False) + if not isinstance(block, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'block' must be a boolean, if given", + Codes.BAD_JSON, + ) + + purge = content.get("purge", True) + if not isinstance(purge, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'purge' must be a boolean, if given", + Codes.BAD_JSON, + ) + + force_purge = content.get("force_purge", False) + if not isinstance(force_purge, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'force_purge' must be a boolean, if given", + Codes.BAD_JSON, + ) + + ret = await room_shutdown_handler.shutdown_room( + room_id=room_id, + new_room_user_id=content.get("new_room_user_id"), + new_room_name=content.get("room_name"), + message=content.get("message"), + requester_user_id=requester.user.to_string(), + block=block, + ) + + # Purge room + if purge: + await pagination_handler.purge_room(room_id, force=force_purge) + + return (200, ret) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 6b8418812..ee071c247 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -17,6 +17,8 @@ from typing import List, Optional from unittest.mock import Mock +from parameterized import parameterized_class + import synapse.rest.admin from synapse.api.constants import EventTypes, Membership from synapse.api.errors import Codes @@ -144,6 +146,13 @@ def _assert_peek(self, room_id, expect_code): ) +@parameterized_class( + ("method", "url_template"), + [ + ("POST", "/_synapse/admin/v1/rooms/%s/delete"), + ("DELETE", "/_synapse/admin/v1/rooms/%s"), + ], +) class DeleteRoomTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, @@ -175,7 +184,7 @@ def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as( self.other_user, tok=self.other_user_tok ) - self.url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id + self.url = self.url_template % self.room_id def test_requester_is_no_admin(self): """ @@ -183,7 +192,7 @@ def test_requester_is_no_admin(self): """ channel = self.make_request( - "POST", + self.method, self.url, json.dumps({}), access_token=self.other_user_tok, @@ -196,10 +205,10 @@ def test_room_does_not_exist(self): """ Check that unknown rooms/server return error 404. """ - url = "/_synapse/admin/v1/rooms/!unknown:test/delete" + url = self.url_template % "!unknown:test" channel = self.make_request( - "POST", + self.method, url, json.dumps({}), access_token=self.admin_user_tok, @@ -212,10 +221,10 @@ def test_room_is_not_valid(self): """ Check that invalid room names, return an error 400. """ - url = "/_synapse/admin/v1/rooms/invalidroom/delete" + url = self.url_template % "invalidroom" channel = self.make_request( - "POST", + self.method, url, json.dumps({}), access_token=self.admin_user_tok, @@ -234,7 +243,7 @@ def test_new_room_user_does_not_exist(self): body = json.dumps({"new_room_user_id": "@unknown:test"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -253,7 +262,7 @@ def test_new_room_user_is_not_local(self): body = json.dumps({"new_room_user_id": "@not:exist.bla"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -272,7 +281,7 @@ def test_block_is_not_bool(self): body = json.dumps({"block": "NotBool"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -288,7 +297,7 @@ def test_purge_is_not_bool(self): body = json.dumps({"purge": "NotBool"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -314,7 +323,7 @@ def test_purge_room_and_block(self): body = json.dumps({"block": True, "purge": True}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -347,7 +356,7 @@ def test_purge_room_and_not_block(self): body = json.dumps({"block": False, "purge": True}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -381,7 +390,7 @@ def test_block_room_and_not_purge(self): body = json.dumps({"block": False, "purge": False}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -426,10 +435,9 @@ def test_shutdown_room_consent(self): self._is_member(room_id=self.room_id, user_id=self.other_user) # Test that the admin can still send shutdown - url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id channel = self.make_request( - "POST", - url.encode("ascii"), + self.method, + self.url, json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) @@ -473,10 +481,9 @@ def test_shutdown_room_block_peek(self): self._is_member(room_id=self.room_id, user_id=self.other_user) # Test that the admin can still send shutdown - url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id channel = self.make_request( - "POST", - url.encode("ascii"), + self.method, + self.url, json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) From bb4b11846f3bdd539a1671eb8f1db8ee1a0bf57a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 29 Apr 2021 07:17:28 -0400 Subject: [PATCH 05/40] Add missing type hints to handlers and fix a Spam Checker type hint. (#9896) The user_may_create_room_alias method on spam checkers declared the room_alias parameter as a str when in reality it is passed a RoomAlias object. --- changelog.d/9896.bugfix | 1 + changelog.d/9896.misc | 1 + synapse/events/spamcheck.py | 5 ++- synapse/handlers/directory.py | 59 ++++++++++++++++------------ synapse/handlers/identity.py | 9 +++-- synapse/handlers/message.py | 24 +++++++---- synapse/handlers/room_member.py | 2 +- synapse/handlers/ui_auth/checkers.py | 35 +++++++++-------- 8 files changed, 82 insertions(+), 54 deletions(-) create mode 100644 changelog.d/9896.bugfix create mode 100644 changelog.d/9896.misc diff --git a/changelog.d/9896.bugfix b/changelog.d/9896.bugfix new file mode 100644 index 000000000..07a8e87f9 --- /dev/null +++ b/changelog.d/9896.bugfix @@ -0,0 +1 @@ +Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. diff --git a/changelog.d/9896.misc b/changelog.d/9896.misc new file mode 100644 index 000000000..e41c7d1f0 --- /dev/null +++ b/changelog.d/9896.misc @@ -0,0 +1 @@ +Add type hints to the `synapse.handlers` module. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 7118d5f52..d5fa19509 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -20,6 +20,7 @@ from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour +from synapse.types import RoomAlias from synapse.util.async_helpers import maybe_awaitable if TYPE_CHECKING: @@ -113,7 +114,9 @@ async def user_may_create_room(self, userid: str) -> bool: return True - async def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool: + async def user_may_create_room_alias( + self, userid: str, room_alias: RoomAlias + ) -> bool: """Checks if a given user may create a room alias If this method returns false, the association request will be rejected. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 90932316f..de1b14cde 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -14,7 +14,7 @@ import logging import string -from typing import Iterable, List, Optional +from typing import TYPE_CHECKING, Iterable, List, Optional from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes from synapse.api.errors import ( @@ -27,15 +27,19 @@ SynapseError, ) from synapse.appservice import ApplicationService -from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id +from synapse.storage.databases.main.directory import RoomAliasMapping +from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class DirectoryHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.state = hs.get_state_handler() @@ -60,7 +64,7 @@ async def _create_association( room_id: str, servers: Optional[Iterable[str]] = None, creator: Optional[str] = None, - ): + ) -> None: # general association creation for both human users and app services for wchar in string.whitespace: @@ -104,8 +108,9 @@ async def create_association( """ user_id = requester.user.to_string() + room_alias_str = room_alias.to_string() - if len(room_alias.to_string()) > MAX_ALIAS_LENGTH: + if len(room_alias_str) > MAX_ALIAS_LENGTH: raise SynapseError( 400, "Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH, @@ -114,7 +119,7 @@ async def create_association( service = requester.app_service if service: - if not service.is_interested_in_alias(room_alias.to_string()): + if not service.is_interested_in_alias(room_alias_str): raise SynapseError( 400, "This application service has not reserved this kind of alias.", @@ -138,7 +143,7 @@ async def create_association( raise AuthError(403, "This user is not permitted to create this alias") if not self.config.is_alias_creation_allowed( - user_id, room_id, room_alias.to_string() + user_id, room_id, room_alias_str ): # Lets just return a generic message, as there may be all sorts of # reasons why we said no. TODO: Allow configurable error messages @@ -211,7 +216,7 @@ async def delete_association( async def delete_appservice_association( self, service: ApplicationService, room_alias: RoomAlias - ): + ) -> None: if not service.is_interested_in_alias(room_alias.to_string()): raise SynapseError( 400, @@ -220,7 +225,7 @@ async def delete_appservice_association( ) await self._delete_association(room_alias) - async def _delete_association(self, room_alias: RoomAlias): + async def _delete_association(self, room_alias: RoomAlias) -> str: if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room alias must be local") @@ -228,17 +233,19 @@ async def _delete_association(self, room_alias: RoomAlias): return room_id - async def get_association(self, room_alias: RoomAlias): + async def get_association(self, room_alias: RoomAlias) -> JsonDict: room_id = None if self.hs.is_mine(room_alias): - result = await self.get_association_from_room_alias(room_alias) + result = await self.get_association_from_room_alias( + room_alias + ) # type: Optional[RoomAliasMapping] if result: room_id = result.room_id servers = result.servers else: try: - result = await self.federation.make_query( + fed_result = await self.federation.make_query( destination=room_alias.domain, query_type="directory", args={"room_alias": room_alias.to_string()}, @@ -248,13 +255,13 @@ async def get_association(self, room_alias: RoomAlias): except CodeMessageException as e: logging.warning("Error retrieving alias") if e.code == 404: - result = None + fed_result = None else: raise - if result and "room_id" in result and "servers" in result: - room_id = result["room_id"] - servers = result["servers"] + if fed_result and "room_id" in fed_result and "servers" in fed_result: + room_id = fed_result["room_id"] + servers = fed_result["servers"] if not room_id: raise SynapseError( @@ -275,7 +282,7 @@ async def get_association(self, room_alias: RoomAlias): return {"room_id": room_id, "servers": servers} - async def on_directory_query(self, args): + async def on_directory_query(self, args: JsonDict) -> JsonDict: room_alias = RoomAlias.from_string(args["room_alias"]) if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room Alias is not hosted on this homeserver") @@ -293,7 +300,7 @@ async def on_directory_query(self, args): async def _update_canonical_alias( self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias - ): + ) -> None: """ Send an updated canonical alias event if the removed alias was set as the canonical alias or listed in the alt_aliases field. @@ -344,7 +351,9 @@ async def _update_canonical_alias( ratelimit=False, ) - async def get_association_from_room_alias(self, room_alias: RoomAlias): + async def get_association_from_room_alias( + self, room_alias: RoomAlias + ) -> Optional[RoomAliasMapping]: result = await self.store.get_association_from_room_alias(room_alias) if not result: # Query AS to see if it exists @@ -372,7 +381,7 @@ def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None) -> b # either no interested services, or no service with an exclusive lock return True - async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): + async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool: """Determine whether a user can delete an alias. One of the following must be true: @@ -394,14 +403,13 @@ async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): if not room_id: return False - res = await self.auth.check_can_change_room_list( + return await self.auth.check_can_change_room_list( room_id, UserID.from_string(user_id) ) - return res async def edit_published_room_list( self, requester: Requester, room_id: str, visibility: str - ): + ) -> None: """Edit the entry of the room in the published room list. requester @@ -469,7 +477,7 @@ async def edit_published_room_list( async def edit_published_appservice_room_list( self, appservice_id: str, network_id: str, room_id: str, visibility: str - ): + ) -> None: """Add or remove a room from the appservice/network specific public room list. @@ -499,5 +507,4 @@ async def get_aliases_for_room( room_id, requester.user.to_string() ) - aliases = await self.store.get_aliases_for_room(room_id) - return aliases + return await self.store.get_aliases_for_room(room_id) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 0b3b1fadb..33d16fbf9 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -17,7 +17,7 @@ """Utilities for interacting with Identity Servers""" import logging import urllib.parse -from typing import Awaitable, Callable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple from synapse.api.errors import ( CodeMessageException, @@ -41,13 +41,16 @@ from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) id_server_scheme = "https://" class IdentityHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) # An HTTP client for contacting trusted URLs. @@ -80,7 +83,7 @@ async def ratelimit_request_token_requests( request: SynapseRequest, medium: str, address: str, - ): + ) -> None: """Used to ratelimit requests to `/requestToken` by IP and address. Args: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ec8eb2167..49f8aa25e 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -15,7 +15,7 @@ # limitations under the License. import logging import random -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple from canonicaljson import encode_canonical_json @@ -66,7 +66,7 @@ class MessageHandler: """Contains some read only APIs to get state about a room""" - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.clock = hs.get_clock() self.state = hs.get_state_handler() @@ -91,7 +91,7 @@ async def get_room_data( room_id: str, event_type: str, state_key: str, - ) -> dict: + ) -> Optional[EventBase]: """Get data from a room. Args: @@ -115,6 +115,10 @@ async def get_room_data( data = await self.state.get_current_state(room_id, event_type, state_key) elif membership == Membership.LEAVE: key = (event_type, state_key) + # If the membership is not JOIN, then the event ID should exist. + assert ( + membership_event_id is not None + ), "check_user_in_room_or_world_readable returned invalid data" room_state = await self.state_store.get_state_for_events( [membership_event_id], StateFilter.from_types([key]) ) @@ -186,10 +190,12 @@ async def get_state_events( event = last_events[0] if visible_events: - room_state = await self.state_store.get_state_for_events( + room_state_events = await self.state_store.get_state_for_events( [event.event_id], state_filter=state_filter ) - room_state = room_state[event.event_id] + room_state = room_state_events[ + event.event_id + ] # type: Mapping[Any, EventBase] else: raise AuthError( 403, @@ -210,10 +216,14 @@ async def get_state_events( ) room_state = await self.store.get_events(state_ids.values()) elif membership == Membership.LEAVE: - room_state = await self.state_store.get_state_for_events( + # If the membership is not JOIN, then the event ID should exist. + assert ( + membership_event_id is not None + ), "check_user_in_room_or_world_readable returned invalid data" + room_state_events = await self.state_store.get_state_for_events( [membership_event_id], state_filter=state_filter ) - room_state = room_state[membership_event_id] + room_state = room_state_events[membership_event_id] now = self.clock.time_msec() events = await self._event_serializer.serialize_events( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 2c5bada1d..20700fc5a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1044,7 +1044,7 @@ async def _is_server_notice_room(self, room_id: str) -> bool: class RoomMemberMasterHandler(RoomMemberHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.distributor = hs.get_distributor() diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 0eeb7c03f..5414ce77d 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import Any +from typing import TYPE_CHECKING, Any from twisted.web.client import PartialDownloadError @@ -22,13 +22,16 @@ from synapse.config.emailconfig import ThreepidBehaviour from synapse.util import json_decoder +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class UserInteractiveAuthChecker: """Abstract base class for an interactive auth checker""" - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): pass def is_enabled(self) -> bool: @@ -57,10 +60,10 @@ async def check_auth(self, authdict: dict, clientip: str) -> Any: class DummyAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.DUMMY - def is_enabled(self): + def is_enabled(self) -> bool: return True - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return True @@ -70,24 +73,24 @@ class TermsAuthChecker(UserInteractiveAuthChecker): def is_enabled(self): return True - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return True class RecaptchaAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.RECAPTCHA - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self._enabled = bool(hs.config.recaptcha_private_key) self._http_client = hs.get_proxied_http_client() self._url = hs.config.recaptcha_siteverify_api self._secret = hs.config.recaptcha_private_key - def is_enabled(self): + def is_enabled(self) -> bool: return self._enabled - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: try: user_response = authdict["response"] except KeyError: @@ -132,11 +135,11 @@ async def check_auth(self, authdict, clientip): class _BaseThreepidAuthChecker: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.hs = hs self.store = hs.get_datastore() - async def _check_threepid(self, medium, authdict): + async def _check_threepid(self, medium: str, authdict: dict) -> dict: if "threepid_creds" not in authdict: raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) @@ -206,31 +209,31 @@ async def _check_threepid(self, medium, authdict): class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): AUTH_TYPE = LoginType.EMAIL_IDENTITY - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): UserInteractiveAuthChecker.__init__(self, hs) _BaseThreepidAuthChecker.__init__(self, hs) - def is_enabled(self): + def is_enabled(self) -> bool: return self.hs.config.threepid_behaviour_email in ( ThreepidBehaviour.REMOTE, ThreepidBehaviour.LOCAL, ) - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("email", authdict) class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): AUTH_TYPE = LoginType.MSISDN - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): UserInteractiveAuthChecker.__init__(self, hs) _BaseThreepidAuthChecker.__init__(self, hs) - def is_enabled(self): + def is_enabled(self) -> bool: return bool(self.hs.config.account_threepid_delegate_msisdn) - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("msisdn", authdict) From b85821aca2c3cfc1c732dfcc0c1d6758a263487a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 4 May 2021 13:28:59 +0100 Subject: [PATCH 06/40] Add port parameter to the sample config for psycopg2 args (#9911) Adds the `port` option with the default value to the sample config file. --- changelog.d/9911.doc | 1 + docs/sample_config.yaml | 1 + synapse/config/database.py | 1 + 3 files changed, 3 insertions(+) create mode 100644 changelog.d/9911.doc diff --git a/changelog.d/9911.doc b/changelog.d/9911.doc new file mode 100644 index 000000000..f7fd9f1ba --- /dev/null +++ b/changelog.d/9911.doc @@ -0,0 +1 @@ +Add `port` argument to the Postgres database sample config section. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index e0350279a..d013725cd 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -810,6 +810,7 @@ caches: # password: secretpassword # database: synapse # host: localhost +# port: 5432 # cp_min: 5 # cp_max: 10 # diff --git a/synapse/config/database.py b/synapse/config/database.py index 79a02706b..c76ef1e1d 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -58,6 +58,7 @@ # password: secretpassword # database: synapse # host: localhost +# port: 5432 # cp_min: 5 # cp_max: 10 # From e3bc4617fcd5c858ff02cf2d443b898db87ae8a5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 4 May 2021 15:14:22 +0100 Subject: [PATCH 07/40] Time external cache response time (#9904) --- changelog.d/9904.misc | 1 + synapse/replication/tcp/external_cache.py | 36 ++++++++++++++++------- 2 files changed, 27 insertions(+), 10 deletions(-) create mode 100644 changelog.d/9904.misc diff --git a/changelog.d/9904.misc b/changelog.d/9904.misc new file mode 100644 index 000000000..3db1e625a --- /dev/null +++ b/changelog.d/9904.misc @@ -0,0 +1 @@ +Time response time for external cache requests. diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index 1a3b051e3..b402f8281 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -15,7 +15,7 @@ import logging from typing import TYPE_CHECKING, Any, Optional -from prometheus_client import Counter +from prometheus_client import Counter, Histogram from synapse.logging.context import make_deferred_yieldable from synapse.util import json_decoder, json_encoder @@ -35,6 +35,20 @@ labelnames=["cache_name", "hit"], ) +response_timer = Histogram( + "synapse_external_cache_response_time_seconds", + "Time taken to get a response from Redis for a cache get/set request", + labelnames=["method"], + buckets=( + 0.001, + 0.002, + 0.005, + 0.01, + 0.02, + 0.05, + ), +) + logger = logging.getLogger(__name__) @@ -72,13 +86,14 @@ async def set(self, cache_name: str, key: str, value: Any, expiry_ms: int) -> No logger.debug("Caching %s %s: %r", cache_name, key, encoded_value) - return await make_deferred_yieldable( - self._redis_connection.set( - self._get_redis_key(cache_name, key), - encoded_value, - pexpire=expiry_ms, + with response_timer.labels("set").time(): + return await make_deferred_yieldable( + self._redis_connection.set( + self._get_redis_key(cache_name, key), + encoded_value, + pexpire=expiry_ms, + ) ) - ) async def get(self, cache_name: str, key: str) -> Optional[Any]: """Look up a key/value in the named cache.""" @@ -86,9 +101,10 @@ async def get(self, cache_name: str, key: str) -> Optional[Any]: if self._redis_connection is None: return None - result = await make_deferred_yieldable( - self._redis_connection.get(self._get_redis_key(cache_name, key)) - ) + with response_timer.labels("get").time(): + result = await make_deferred_yieldable( + self._redis_connection.get(self._get_redis_key(cache_name, key)) + ) logger.debug("Got cache result %s %s: %r", cache_name, key, result) From e9eb3549d32a6f93d07de8dbd5e1ebe54c8d8278 Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Wed, 5 May 2021 13:37:56 +0000 Subject: [PATCH 08/40] Leave out optional keys from /sync (#9919) This leaves out all optional keys from /sync. This should be fine for all clients tested against conduit already, but it may break some clients, as such we should check, that at least most of them don't break horribly and maybe back out some of the individual changes. (We can probably always leave out groups for example, while the others may cause more issues.) Signed-off-by: Nicolas Werner --- changelog.d/9919.feature | 1 + synapse/rest/client/v2_alpha/sync.py | 62 +++++++++++++------ tests/rest/client/v2_alpha/test_sync.py | 30 +-------- .../test_resource_limits_server_notices.py | 8 ++- 4 files changed, 51 insertions(+), 50 deletions(-) create mode 100644 changelog.d/9919.feature diff --git a/changelog.d/9919.feature b/changelog.d/9919.feature new file mode 100644 index 000000000..07747505d --- /dev/null +++ b/changelog.d/9919.feature @@ -0,0 +1 @@ +Omit empty fields from the `/sync` response. Contributed by @deepbluev7. diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 95ee3f1b8..5f8565333 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -14,6 +14,7 @@ import itertools import logging +from collections import defaultdict from typing import TYPE_CHECKING, Tuple from synapse.api.constants import PresenceState @@ -229,24 +230,49 @@ async def encode_response(self, time_now, sync_result, access_token_id, filter): ) logger.debug("building sync response dict") - return { - "account_data": {"events": sync_result.account_data}, - "to_device": {"events": sync_result.to_device}, - "device_lists": { - "changed": list(sync_result.device_lists.changed), - "left": list(sync_result.device_lists.left), - }, - "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now), - "rooms": {"join": joined, "invite": invited, "leave": archived}, - "groups": { - "join": sync_result.groups.join, - "invite": sync_result.groups.invite, - "leave": sync_result.groups.leave, - }, - "device_one_time_keys_count": sync_result.device_one_time_keys_count, - "org.matrix.msc2732.device_unused_fallback_key_types": sync_result.device_unused_fallback_key_types, - "next_batch": await sync_result.next_batch.to_string(self.store), - } + + response: dict = defaultdict(dict) + response["next_batch"] = await sync_result.next_batch.to_string(self.store) + + if sync_result.account_data: + response["account_data"] = {"events": sync_result.account_data} + if sync_result.presence: + response["presence"] = SyncRestServlet.encode_presence( + sync_result.presence, time_now + ) + + if sync_result.to_device: + response["to_device"] = {"events": sync_result.to_device} + + if sync_result.device_lists.changed: + response["device_lists"]["changed"] = list(sync_result.device_lists.changed) + if sync_result.device_lists.left: + response["device_lists"]["left"] = list(sync_result.device_lists.left) + + if sync_result.device_one_time_keys_count: + response[ + "device_one_time_keys_count" + ] = sync_result.device_one_time_keys_count + if sync_result.device_unused_fallback_key_types: + response[ + "org.matrix.msc2732.device_unused_fallback_key_types" + ] = sync_result.device_unused_fallback_key_types + + if joined: + response["rooms"]["join"] = joined + if invited: + response["rooms"]["invite"] = invited + if archived: + response["rooms"]["leave"] = archived + + if sync_result.groups.join: + response["groups"]["join"] = sync_result.groups.join + if sync_result.groups.invite: + response["groups"]["invite"] = sync_result.groups.invite + if sync_result.groups.leave: + response["groups"]["leave"] = sync_result.groups.leave + + return response @staticmethod def encode_presence(events, time_now): diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index dbcbdf159..74be5176d 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -37,35 +37,7 @@ def test_sync_argless(self): channel = self.make_request("GET", "/sync") self.assertEqual(channel.code, 200) - self.assertTrue( - { - "next_batch", - "rooms", - "presence", - "account_data", - "to_device", - "device_lists", - }.issubset(set(channel.json_body.keys())) - ) - - def test_sync_presence_disabled(self): - """ - When presence is disabled, the key does not appear in /sync. - """ - self.hs.config.use_presence = False - - channel = self.make_request("GET", "/sync") - - self.assertEqual(channel.code, 200) - self.assertTrue( - { - "next_batch", - "rooms", - "account_data", - "to_device", - "device_lists", - }.issubset(set(channel.json_body.keys())) - ) + self.assertIn("next_batch", channel.json_body) class SyncFilterTestCase(unittest.HomeserverTestCase): diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index d46521ccd..3245aa91c 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -306,8 +306,9 @@ def test_no_invite_without_notice(self): channel = self.make_request("GET", "/sync?timeout=0", access_token=tok) - invites = channel.json_body["rooms"]["invite"] - self.assertEqual(len(invites), 0, invites) + self.assertNotIn( + "rooms", channel.json_body, "Got invites without server notice" + ) def test_invite_with_notice(self): """Tests that, if the MAU limit is hit, the server notices user invites each user @@ -364,7 +365,8 @@ def _trigger_notice_and_join(self): # We could also pick another user and sync with it, which would return an # invite to a system notices room, but it doesn't matter which user we're # using so we use the last one because it saves us an extra sync. - invites = channel.json_body["rooms"]["invite"] + if "rooms" in channel.json_body: + invites = channel.json_body["rooms"]["invite"] # Make sure we have an invite to process. self.assertEqual(len(invites), 1, invites) From d5305000f1c5799ffb6fcd64ad27e7bfd8ba2113 Mon Sep 17 00:00:00 2001 From: Christopher May-Townsend Date: Wed, 5 May 2021 16:33:04 +0100 Subject: [PATCH 09/40] Docker healthcheck timings - add startup delay and changed interval (#9913) * Add healthcheck startup delay by 5secs and reduced interval check to 15s to reduce waiting time for docker aware edge routers bringing an instance online --- changelog.d/9913.docker | 1 + docker/Dockerfile | 2 +- docker/README.md | 17 ++++++++++++++--- 3 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 changelog.d/9913.docker diff --git a/changelog.d/9913.docker b/changelog.d/9913.docker new file mode 100644 index 000000000..93835e14c --- /dev/null +++ b/changelog.d/9913.docker @@ -0,0 +1 @@ +Added startup_delay to docker healthcheck to reduce waiting time for coming online, updated readme for extra options, contributed by @Maquis196. diff --git a/docker/Dockerfile b/docker/Dockerfile index 4f5cd06d7..2bdc607e6 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -88,5 +88,5 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp ENTRYPOINT ["/start.py"] -HEALTHCHECK --interval=1m --timeout=5s \ +HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ CMD curl -fSs http://localhost:8008/health || exit 1 diff --git a/docker/README.md b/docker/README.md index a7d1e670f..c8d3c4b3d 100644 --- a/docker/README.md +++ b/docker/README.md @@ -191,6 +191,16 @@ whilst running the above `docker run` commands. ``` --no-healthcheck ``` + +## Disabling the healthcheck in docker-compose file + +If you wish to disable the healthcheck via docker-compose, append the following to your service configuration. + +``` + healthcheck: + disable: true +``` + ## Setting custom healthcheck on docker run If you wish to point the healthcheck at a different port with docker command, add the following @@ -202,14 +212,15 @@ If you wish to point the healthcheck at a different port with docker command, ad ## Setting the healthcheck in docker-compose file You can add the following to set a custom healthcheck in a docker compose file. -You will need version >2.1 for this to work. +You will need docker-compose version >2.1 for this to work. ``` healthcheck: test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"] - interval: 1m - timeout: 10s + interval: 15s + timeout: 5s retries: 3 + start_period: 5s ``` ## Using jemalloc From d0aee697ac0587c005bc1048f5036979331f1101 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 16:49:34 +0100 Subject: [PATCH 10/40] Use get_current_users_in_room from store and not StateHandler (#9910) --- changelog.d/9910.bugfix | 1 + changelog.d/9910.feature | 1 + synapse/handlers/directory.py | 4 ++-- synapse/handlers/events.py | 2 +- synapse/handlers/message.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 6 +++--- synapse/state/__init__.py | 10 +++++++--- synapse/storage/_base.py | 1 + synapse/storage/databases/main/roommember.py | 8 ++++++-- synapse/storage/databases/main/user_directory.py | 4 +--- 12 files changed, 26 insertions(+), 17 deletions(-) create mode 100644 changelog.d/9910.bugfix create mode 100644 changelog.d/9910.feature diff --git a/changelog.d/9910.bugfix b/changelog.d/9910.bugfix new file mode 100644 index 000000000..06d523fd4 --- /dev/null +++ b/changelog.d/9910.bugfix @@ -0,0 +1 @@ +Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. diff --git a/changelog.d/9910.feature b/changelog.d/9910.feature new file mode 100644 index 000000000..54165cce1 --- /dev/null +++ b/changelog.d/9910.feature @@ -0,0 +1 @@ +Improve performance after joining a large room when presence is enabled. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index de1b14cde..4064a2b85 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -78,7 +78,7 @@ async def _create_association( # TODO(erikj): Add transactions. # TODO(erikj): Check if there is a current association. if not servers: - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) servers = {get_domain_from_id(u) for u in users} if not servers: @@ -270,7 +270,7 @@ async def get_association(self, room_alias: RoomAlias) -> JsonDict: Codes.NOT_FOUND, ) - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) extra_servers = {get_domain_from_id(u) for u in users} servers = set(extra_servers) | set(servers) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index d82144d7f..f134f1e23 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -103,7 +103,7 @@ async def get_stream( # Send down presence. if event.state_key == auth_user_id: # Send down presence for everyone in the room. - users = await self.state.get_current_users_in_room( + users = await self.store.get_users_in_room( event.room_id ) # type: Iterable[str] else: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 49f8aa25e..393f17c3a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -258,7 +258,7 @@ async def get_joined_members(self, requester: Requester, room_id: str) -> dict: "Getting joined members after leaving is not implemented" ) - users_with_profile = await self.state.get_current_users_in_room(room_id) + users_with_profile = await self.store.get_users_in_room_with_profiles(room_id) # If this is an AS, double check that they are allowed to see the members. # This can either be because the AS user is in the room or because there diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index ebbc23433..8e085dfbe 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1293,7 +1293,7 @@ async def _on_user_joined_room( remote_host = get_domain_from_id(user_id) - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) user_ids = list(filter(self.is_mine_id, users)) states_d = await self.current_state_for_users(user_ids) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 5a888b794..fb4823a5c 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1327,7 +1327,7 @@ async def shutdown_room( new_room_id = None logger.info("Shutting down room %r", room_id) - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) kicked_users = [] failed_to_kick_users = [] for user_id in users: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a9a3ee05c..0fcc1532d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1190,7 +1190,7 @@ async def _generate_sync_entry_for_device_list( # Step 1b, check for newly joined rooms for room_id in newly_joined_rooms: - joined_users = await self.state.get_current_users_in_room(room_id) + joined_users = await self.store.get_users_in_room(room_id) newly_joined_or_invited_users.update(joined_users) # TODO: Check that these users are actually new, i.e. either they @@ -1206,7 +1206,7 @@ async def _generate_sync_entry_for_device_list( # Now find users that we no longer track for room_id in newly_left_rooms: - left_users = await self.state.get_current_users_in_room(room_id) + left_users = await self.store.get_users_in_room(room_id) newly_left_users.update(left_users) # Remove any users that we still share a room with. @@ -1361,7 +1361,7 @@ async def _generate_sync_entry_for_presence( extra_users_ids = set(newly_joined_or_invited_users) for room_id in newly_joined_rooms: - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) extra_users_ids.update(users) extra_users_ids.discard(user.to_string()) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b3bd92d37..a1770f620 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -213,19 +213,23 @@ async def get_current_state_ids( return ret.state async def get_current_users_in_room( - self, room_id: str, latest_event_ids: Optional[List[str]] = None + self, room_id: str, latest_event_ids: List[str] ) -> Dict[str, ProfileInfo]: """ Get the users who are currently in a room. + Note: This is much slower than using the equivalent method + `DataStore.get_users_in_room` or `DataStore.get_users_in_room_with_profiles`, + so this should only be used when wanting the users at a particular point + in the room. + Args: room_id: The ID of the room. latest_event_ids: Precomputed list of latest event IDs. Will be computed if None. Returns: Dictionary of user IDs to their profileinfo. """ - if not latest_event_ids: - latest_event_ids = await self.store.get_latest_event_ids_in_room(room_id) + assert latest_event_ids is not None logger.debug("calling resolve_state_groups from get_current_users_in_room") diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 6b68d8720..3d98d3f5f 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -69,6 +69,7 @@ def _invalidate_state_caches( self._attempt_to_invalidate_cache("is_host_joined", (room_id, host)) self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,)) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 2a8532f8c..5fc3bb5a7 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -205,8 +205,12 @@ async def get_users_in_room_with_profiles( def _get_users_in_room_with_profiles(txn) -> Dict[str, ProfileInfo]: sql = """ - SELECT user_id, display_name, avatar_url FROM room_memberships - WHERE room_id = ? AND membership = ? + SELECT state_key, display_name, avatar_url FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? """ txn.execute(sql, (room_id, Membership.JOIN)) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 7a082fdd2..a6bfb4902 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -142,8 +142,6 @@ async def _populate_user_directory_process_rooms(self, progress, batch_size): batch_size (int): Maximum number of state events to process per cycle. """ - state = self.hs.get_state_handler() - # If we don't have progress filed, delete everything. if not progress: await self.delete_all_from_user_dir() @@ -197,7 +195,7 @@ def _get_next_batch(txn): room_id ) - users_with_profile = await state.get_current_users_in_room(room_id) + users_with_profile = await self.get_users_in_room_with_profiles(room_id) user_ids = set(users_with_profile) # Update each user in the user directory. From de8f0a03a3cc3a2327dfd3058c99e48067965079 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 16:53:22 +0100 Subject: [PATCH 11/40] Don't set the external cache if its been done recently (#9905) --- changelog.d/9905.feature | 1 + synapse/handlers/federation.py | 4 +++- synapse/handlers/message.py | 34 ++++++++++++++++++++++++++++++---- 3 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9905.feature diff --git a/changelog.d/9905.feature b/changelog.d/9905.feature new file mode 100644 index 000000000..96a0e7f09 --- /dev/null +++ b/changelog.d/9905.feature @@ -0,0 +1 @@ +Improve performance of sending events for worker-based deployments using Redis. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 9d867aaf4..e8330a2b5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2446,7 +2446,9 @@ async def _check_event_auth( # If we are going to send this event over federation we precaclculate # the joined hosts. if event.internal_metadata.get_send_on_behalf_of(): - await self.event_creation_handler.cache_joined_hosts_for_event(event) + await self.event_creation_handler.cache_joined_hosts_for_event( + event, context + ) return context diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 393f17c3a..8729332d4 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -51,6 +51,7 @@ from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester from synapse.util import json_decoder, json_encoder from synapse.util.async_helpers import Linearizer +from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client @@ -457,6 +458,19 @@ def __init__(self, hs: "HomeServer"): self._external_cache = hs.get_external_cache() + # Stores the state groups we've recently added to the joined hosts + # external cache. Note that the timeout must be significantly less than + # the TTL on the external cache. + self._external_cache_joined_hosts_updates = ( + None + ) # type: Optional[ExpiringCache] + if self._external_cache.is_enabled(): + self._external_cache_joined_hosts_updates = ExpiringCache( + "_external_cache_joined_hosts_updates", + self.clock, + expiry_ms=30 * 60 * 1000, + ) + async def create_event( self, requester: Requester, @@ -967,7 +981,7 @@ async def handle_new_client_event( await self.action_generator.handle_push_actions_for_event(event, context) - await self.cache_joined_hosts_for_event(event) + await self.cache_joined_hosts_for_event(event, context) try: # If we're a worker we need to hit out to the master. @@ -1008,7 +1022,9 @@ async def handle_new_client_event( await self.store.remove_push_actions_from_staging(event.event_id) raise - async def cache_joined_hosts_for_event(self, event: EventBase) -> None: + async def cache_joined_hosts_for_event( + self, event: EventBase, context: EventContext + ) -> None: """Precalculate the joined hosts at the event, when using Redis, so that external federation senders don't have to recalculate it themselves. """ @@ -1016,6 +1032,9 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: if not self._external_cache.is_enabled(): return + # If external cache is enabled we should always have this. + assert self._external_cache_joined_hosts_updates is not None + # We actually store two mappings, event ID -> prev state group, # state group -> joined hosts, which is much more space efficient # than event ID -> joined hosts. @@ -1023,16 +1042,21 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: # Note: We have to cache event ID -> prev state group, as we don't # store that in the DB. # - # Note: We always set the state group -> joined hosts cache, even if - # we already set it, so that the expiry time is reset. + # Note: We set the state group -> joined hosts cache if it hasn't been + # set for a while, so that the expiry time is reset. state_entry = await self.state.resolve_state_groups_for_events( event.room_id, event_ids=event.prev_event_ids() ) if state_entry.state_group: + if state_entry.state_group in self._external_cache_joined_hosts_updates: + return + joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry) + # Note that the expiry times must be larger than the expiry time in + # _external_cache_joined_hosts_updates. await self._external_cache.set( "event_to_prev_state_group", event.event_id, @@ -1046,6 +1070,8 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: expiry_ms=60 * 60 * 1000, ) + self._external_cache_joined_hosts_updates[state_entry.state_group] = None + async def _validate_canonical_alias( self, directory_handler, room_alias_str: str, expected_room_id: str ) -> None: From 1fb9a2d0bf2506ca6e5343cb340a441585ca1c07 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 16:53:45 +0100 Subject: [PATCH 12/40] Limit how often GC happens by time. (#9902) Synapse can be quite memory intensive, and unless care is taken to tune the GC thresholds it can end up thrashing, causing noticable performance problems for large servers. We fix this by limiting how often we GC a given generation, regardless of current counts/thresholds. This does not help with the reverse problem where the thresholds are set too high, but that should only happen in situations where they've been manually configured. Adds a `gc_min_seconds_between` config option to override the defaults. Fixes #9890. --- changelog.d/9902.feature | 1 + docs/sample_config.yaml | 10 ++++++++++ synapse/app/generic_worker.py | 3 +++ synapse/app/homeserver.py | 3 +++ synapse/config/server.py | 31 ++++++++++++++++++++++++++++++- synapse/metrics/__init__.py | 18 ++++++++++++++++-- 6 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9902.feature diff --git a/changelog.d/9902.feature b/changelog.d/9902.feature new file mode 100644 index 000000000..4d9f324d4 --- /dev/null +++ b/changelog.d/9902.feature @@ -0,0 +1 @@ +Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d013725cd..f469d6e54 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -152,6 +152,16 @@ presence: # #gc_thresholds: [700, 10, 10] +# The minimum time in seconds between each GC for a generation, regardless of +# the GC thresholds. This ensures that we don't do GC too frequently. +# +# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive +# generation 0 GCs, etc. +# +# Defaults to `[1s, 10s, 30s]`. +# +#gc_min_interval: [0.5s, 30s, 1m] + # Set the limit on the returned events in the timeline in the get # and sync operations. The default value is 100. -1 means no upper limit. # diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1a15ceee8..a3fe9a3f3 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -455,6 +455,9 @@ def start(config_options): synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + if config.server.gc_seconds: + synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds + hs = GenericWorkerServer( config.server_name, config=config, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8e78134bb..6a823da10 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -342,6 +342,9 @@ def setup(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts + if config.server.gc_seconds: + synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds + hs = SynapseHomeServer( config.server_name, config=config, diff --git a/synapse/config/server.py b/synapse/config/server.py index 21ca7b33e..c290a35a9 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -19,7 +19,7 @@ import os.path import re from textwrap import indent -from typing import Any, Dict, Iterable, List, Optional, Set +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple import attr import yaml @@ -572,6 +572,7 @@ def read_config(self, config, **kwargs): _warn_if_webclient_configured(self.listeners) self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) + self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None)) @attr.s class LimitRemoteRoomsConfig: @@ -917,6 +918,16 @@ def generate_config_section( # #gc_thresholds: [700, 10, 10] + # The minimum time in seconds between each GC for a generation, regardless of + # the GC thresholds. This ensures that we don't do GC too frequently. + # + # A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive + # generation 0 GCs, etc. + # + # Defaults to `[1s, 10s, 30s]`. + # + #gc_min_interval: [0.5s, 30s, 1m] + # Set the limit on the returned events in the timeline in the get # and sync operations. The default value is 100. -1 means no upper limit. # @@ -1305,6 +1316,24 @@ def add_arguments(parser): help="Turn on the twisted telnet manhole service on the given port.", ) + def read_gc_intervals(self, durations) -> Optional[Tuple[float, float, float]]: + """Reads the three durations for the GC min interval option, returning seconds.""" + if durations is None: + return None + + try: + if len(durations) != 3: + raise ValueError() + return ( + self.parse_duration(durations[0]) / 1000, + self.parse_duration(durations[1]) / 1000, + self.parse_duration(durations[2]) / 1000, + ) + except Exception: + raise ConfigError( + "Value of `gc_min_interval` must be a list of three durations if set" + ) + def is_threepid_reserved(reserved_threepids, threepid): """Check the threepid against the reserved threepid config diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 31b7b3c25..e671da26d 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -535,6 +535,13 @@ def collect(self): REGISTRY.register(ReactorLastSeenMetric()) +# The minimum time in seconds between GCs for each generation, regardless of the current GC +# thresholds and counts. +MIN_TIME_BETWEEN_GCS = (1.0, 10.0, 30.0) + +# The time (in seconds since the epoch) of the last time we did a GC for each generation. +_last_gc = [0.0, 0.0, 0.0] + def runUntilCurrentTimer(reactor, func): @functools.wraps(func) @@ -575,11 +582,16 @@ def f(*args, **kwargs): return ret # Check if we need to do a manual GC (since its been disabled), and do - # one if necessary. + # one if necessary. Note we go in reverse order as e.g. a gen 1 GC may + # promote an object into gen 2, and we don't want to handle the same + # object multiple times. threshold = gc.get_threshold() counts = gc.get_count() for i in (2, 1, 0): - if threshold[i] < counts[i]: + # We check if we need to do one based on a straightforward + # comparison between the threshold and count. We also do an extra + # check to make sure that we don't a GC too often. + if threshold[i] < counts[i] and MIN_TIME_BETWEEN_GCS[i] < end - _last_gc[i]: if i == 0: logger.debug("Collecting gc %d", i) else: @@ -589,6 +601,8 @@ def f(*args, **kwargs): unreachable = gc.collect(i) end = time.time() + _last_gc[i] = end + gc_time.labels(i).observe(end - start) gc_unreachable.labels(i).set(unreachable) From ef889c98a6cde0cfa95f7fdaf7f99ec3c1e9bb7f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 16:54:36 +0100 Subject: [PATCH 13/40] Optionally track memory usage of each LruCache (#9881) This will double count slightly in the presence of interned strings. It's off by default as it can consume a lot of resources. --- changelog.d/9881.feature | 1 + mypy.ini | 3 +++ synapse/app/generic_worker.py | 1 + synapse/app/homeserver.py | 1 + synapse/config/cache.py | 11 ++++++++ synapse/python_dependencies.py | 2 ++ synapse/util/caches/__init__.py | 31 +++++++++++++++++++++ synapse/util/caches/lrucache.py | 48 ++++++++++++++++++++++++++++++++- 8 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9881.feature diff --git a/changelog.d/9881.feature b/changelog.d/9881.feature new file mode 100644 index 000000000..088a517e0 --- /dev/null +++ b/changelog.d/9881.feature @@ -0,0 +1 @@ +Add experimental option to track memory usage of the caches. diff --git a/mypy.ini b/mypy.ini index a40f705b7..ea655a0d4 100644 --- a/mypy.ini +++ b/mypy.ini @@ -171,3 +171,6 @@ ignore_missing_imports = True [mypy-txacme.*] ignore_missing_imports = True + +[mypy-pympler.*] +ignore_missing_imports = True diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index a3fe9a3f3..f730cdbd7 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -454,6 +454,7 @@ def start(config_options): config.server.update_user_directory = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage if config.server.gc_seconds: synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6a823da10..b2501ee4d 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -341,6 +341,7 @@ def setup(config_options): sys.exit(0) events.USE_FROZEN_DICTS = config.use_frozen_dicts + synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage if config.server.gc_seconds: synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds diff --git a/synapse/config/cache.py b/synapse/config/cache.py index 41b9b3f51..91165ee1c 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -17,6 +17,8 @@ import threading from typing import Callable, Dict +from synapse.python_dependencies import DependencyException, check_requirements + from ._base import Config, ConfigError # The prefix for all cache factor-related environment variables @@ -189,6 +191,15 @@ def read_config(self, config, **kwargs): ) self.cache_factors[cache] = factor + self.track_memory_usage = cache_config.get("track_memory_usage", False) + if self.track_memory_usage: + try: + check_requirements("cache_memory") + except DependencyException as e: + raise ConfigError( + e.message # noqa: B306, DependencyException.message is a property + ) + # Resize all caches (if necessary) with the new factors we've loaded self.resize_all_caches() diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 2de946f46..d58eeeaa7 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -116,6 +116,8 @@ # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) "redis": ["txredisapi>=1.4.7", "hiredis"], + # Required to use experimental `caches.track_memory_usage` config option. + "cache_memory": ["pympler"], } ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 46af7fa47..ca36f07c2 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -24,6 +24,11 @@ logger = logging.getLogger(__name__) + +# Whether to track estimated memory usage of the LruCaches. +TRACK_MEMORY_USAGE = False + + caches_by_name = {} # type: Dict[str, Sized] collectors_by_name = {} # type: Dict[str, CacheMetric] @@ -32,6 +37,11 @@ cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name"]) cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"]) +cache_memory_usage = Gauge( + "synapse_util_caches_cache_size_bytes", + "Estimated memory usage of the caches", + ["name"], +) response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) @@ -52,6 +62,7 @@ class CacheMetric: hits = attr.ib(default=0) misses = attr.ib(default=0) evicted_size = attr.ib(default=0) + memory_usage = attr.ib(default=None) def inc_hits(self): self.hits += 1 @@ -62,6 +73,19 @@ def inc_misses(self): def inc_evictions(self, size=1): self.evicted_size += size + def inc_memory_usage(self, memory: int): + if self.memory_usage is None: + self.memory_usage = 0 + + self.memory_usage += memory + + def dec_memory_usage(self, memory: int): + self.memory_usage -= memory + + def clear_memory_usage(self): + if self.memory_usage is not None: + self.memory_usage = 0 + def describe(self): return [] @@ -81,6 +105,13 @@ def collect(self): cache_total.labels(self._cache_name).set(self.hits + self.misses) if getattr(self._cache, "max_size", None): cache_max_size.labels(self._cache_name).set(self._cache.max_size) + + if TRACK_MEMORY_USAGE: + # self.memory_usage can be None if nothing has been inserted + # into the cache yet. + cache_memory_usage.labels(self._cache_name).set( + self.memory_usage or 0 + ) if self._collect_callback: self._collect_callback() except Exception as e: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 10b0ec6b7..1be675e01 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -32,9 +32,36 @@ from typing_extensions import Literal from synapse.config import cache as cache_config +from synapse.util import caches from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches.treecache import TreeCache +try: + from pympler.asizeof import Asizer + + def _get_size_of(val: Any, *, recurse=True) -> int: + """Get an estimate of the size in bytes of the object. + + Args: + val: The object to size. + recurse: If true will include referenced values in the size, + otherwise only sizes the given object. + """ + # Ignore singleton values when calculating memory usage. + if val in ((), None, ""): + return 0 + + sizer = Asizer() + sizer.exclude_refs((), None, "") + return sizer.asizeof(val, limit=100 if recurse else 0) + + +except ImportError: + + def _get_size_of(val: Any, *, recurse=True) -> int: + return 0 + + # Function type: the type used for invalidation callbacks FT = TypeVar("FT", bound=Callable[..., Any]) @@ -56,7 +83,7 @@ def enumerate_leaves(node, depth): class _Node: - __slots__ = ["prev_node", "next_node", "key", "value", "callbacks"] + __slots__ = ["prev_node", "next_node", "key", "value", "callbacks", "memory"] def __init__( self, @@ -84,6 +111,16 @@ def __init__( self.add_callbacks(callbacks) + self.memory = 0 + if caches.TRACK_MEMORY_USAGE: + self.memory = ( + _get_size_of(key) + + _get_size_of(value) + + _get_size_of(self.callbacks, recurse=False) + + _get_size_of(self, recurse=False) + ) + self.memory += _get_size_of(self.memory, recurse=False) + def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None: """Add to stored list of callbacks, removing duplicates.""" @@ -233,6 +270,9 @@ def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()): if size_callback: cached_cache_len[0] += size_callback(node.value) + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.inc_memory_usage(node.memory) + def move_node_to_front(node): prev_node = node.prev_node next_node = node.next_node @@ -258,6 +298,9 @@ def delete_node(node): node.run_and_clear_callbacks() + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.dec_memory_usage(node.memory) + return deleted_len @overload @@ -373,6 +416,9 @@ def cache_clear() -> None: if size_callback: cached_cache_len[0] = 0 + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.clear_memory_usage() + @synchronized def cache_contains(key: KT) -> bool: return key in cache From e2a443550e7b47bf8fe1b5fbd76f9ca95e81cbad Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 May 2021 11:56:51 -0400 Subject: [PATCH 14/40] Support stable MSC1772 spaces identifiers. (#9915) Support both the unstable and stable identifiers. A future release will disable the unstable identifiers. --- changelog.d/9915.feature | 1 + synapse/api/constants.py | 3 +++ synapse/handlers/space_summary.py | 8 ++++++-- 3 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9915.feature diff --git a/changelog.d/9915.feature b/changelog.d/9915.feature new file mode 100644 index 000000000..832916cb0 --- /dev/null +++ b/changelog.d/9915.feature @@ -0,0 +1 @@ +Support stable identifiers from [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 936b6534b..bff750e5f 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -110,6 +110,8 @@ class EventTypes: Dummy = "org.matrix.dummy_event" + SpaceChild = "m.space.child" + SpaceParent = "m.space.parent" MSC1772_SPACE_CHILD = "org.matrix.msc1772.space.child" MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent" @@ -174,6 +176,7 @@ class EventContentFields: SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after" # cf https://github.com/matrix-org/matrix-doc/pull/1772 + ROOM_TYPE = "m.type" MSC1772_ROOM_TYPE = "org.matrix.msc1772.type" diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index 01e3e050f..d32452747 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -288,6 +288,7 @@ async def _summarize_remote_room( ev.data for ev in res.events if ev.event_type == EventTypes.MSC1772_SPACE_CHILD + or ev.event_type == EventTypes.SpaceChild ) async def _is_room_accessible(self, room_id: str, requester: Optional[str]) -> bool: @@ -331,7 +332,9 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: ) # TODO: update once MSC1772 lands - room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE) + room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + if not room_type: + room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE) entry = { "room_id": stats["room_id"], @@ -360,8 +363,9 @@ async def _get_child_events(self, room_id: str) -> Iterable[EventBase]: [ event_id for key, event_id in current_state_ids.items() - # TODO: update once MSC1772 lands + # TODO: update once MSC1772 has been FCP for a period of time. if key[0] == EventTypes.MSC1772_SPACE_CHILD + or key[0] == EventTypes.SpaceChild ] ) From 37623e33822d4e032ba6d1f523fb09b12fe27aab Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 17:27:05 +0100 Subject: [PATCH 15/40] Increase perf of handling presence when joining large rooms. (#9916) --- changelog.d/9916.feature | 1 + synapse/handlers/presence.py | 154 +++++++++++++++++--------------- tests/handlers/test_presence.py | 14 +-- 3 files changed, 87 insertions(+), 82 deletions(-) create mode 100644 changelog.d/9916.feature diff --git a/changelog.d/9916.feature b/changelog.d/9916.feature new file mode 100644 index 000000000..54165cce1 --- /dev/null +++ b/changelog.d/9916.feature @@ -0,0 +1 @@ +Improve performance after joining a large room when presence is enabled. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 8e085dfbe..6fd1f3428 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1183,7 +1183,16 @@ async def _unsafe_process(self) -> None: max_pos, deltas = await self.store.get_current_state_deltas( self._event_pos, room_max_stream_ordering ) - await self._handle_state_delta(deltas) + + # We may get multiple deltas for different rooms, but we want to + # handle them on a room by room basis, so we batch them up by + # room. + deltas_by_room: Dict[str, List[JsonDict]] = {} + for delta in deltas: + deltas_by_room.setdefault(delta["room_id"], []).append(delta) + + for room_id, deltas_for_room in deltas_by_room.items(): + await self._handle_state_delta(room_id, deltas_for_room) self._event_pos = max_pos @@ -1192,17 +1201,21 @@ async def _unsafe_process(self) -> None: max_pos ) - async def _handle_state_delta(self, deltas: List[JsonDict]) -> None: - """Process current state deltas to find new joins that need to be - handled. + async def _handle_state_delta(self, room_id: str, deltas: List[JsonDict]) -> None: + """Process current state deltas for the room to find new joins that need + to be handled. """ - # A map of destination to a set of user state that they should receive - presence_destinations = {} # type: Dict[str, Set[UserPresenceState]] + + # Sets of newly joined users. Note that if the local server is + # joining a remote room for the first time we'll see both the joining + # user and all remote users as newly joined. + newly_joined_users = set() for delta in deltas: + assert room_id == delta["room_id"] + typ = delta["type"] state_key = delta["state_key"] - room_id = delta["room_id"] event_id = delta["event_id"] prev_event_id = delta["prev_event_id"] @@ -1231,72 +1244,55 @@ async def _handle_state_delta(self, deltas: List[JsonDict]) -> None: # Ignore changes to join events. continue - # Retrieve any user presence state updates that need to be sent as a result, - # and the destinations that need to receive it - destinations, user_presence_states = await self._on_user_joined_room( - room_id, state_key - ) - - # Insert the destinations and respective updates into our destinations dict - for destination in destinations: - presence_destinations.setdefault(destination, set()).update( - user_presence_states - ) - - # Send out user presence updates for each destination - for destination, user_state_set in presence_destinations.items(): - self._federation_queue.send_presence_to_destinations( - destinations=[destination], states=user_state_set - ) - - async def _on_user_joined_room( - self, room_id: str, user_id: str - ) -> Tuple[List[str], List[UserPresenceState]]: - """Called when we detect a user joining the room via the current state - delta stream. Returns the destinations that need to be updated and the - presence updates to send to them. - - Args: - room_id: The ID of the room that the user has joined. - user_id: The ID of the user that has joined the room. - - Returns: - A tuple of destinations and presence updates to send to them. - """ - if self.is_mine_id(user_id): - # If this is a local user then we need to send their presence - # out to hosts in the room (who don't already have it) - - # TODO: We should be able to filter the hosts down to those that - # haven't previously seen the user - - remote_hosts = await self.state.get_current_hosts_in_room(room_id) + newly_joined_users.add(state_key) - # Filter out ourselves. - filtered_remote_hosts = [ - host for host in remote_hosts if host != self.server_name - ] - - state = await self.current_state_for_user(user_id) - return filtered_remote_hosts, [state] - else: - # A remote user has joined the room, so we need to: - # 1. Check if this is a new server in the room - # 2. If so send any presence they don't already have for - # local users in the room. - - # TODO: We should be able to filter the users down to those that - # the server hasn't previously seen - - # TODO: Check that this is actually a new server joining the - # room. - - remote_host = get_domain_from_id(user_id) + if not newly_joined_users: + # If nobody has joined then there's nothing to do. + return - users = await self.store.get_users_in_room(room_id) - user_ids = list(filter(self.is_mine_id, users)) + # We want to send: + # 1. presence states of all local users in the room to newly joined + # remote servers + # 2. presence states of newly joined users to all remote servers in + # the room. + # + # TODO: Only send presence states to remote hosts that don't already + # have them (because they already share rooms). + + # Get all the users who were already in the room, by fetching the + # current users in the room and removing the newly joined users. + users = await self.store.get_users_in_room(room_id) + prev_users = set(users) - newly_joined_users + + # Construct sets for all the local users and remote hosts that were + # already in the room + prev_local_users = [] + prev_remote_hosts = set() + for user_id in prev_users: + if self.is_mine_id(user_id): + prev_local_users.append(user_id) + else: + prev_remote_hosts.add(get_domain_from_id(user_id)) + + # Similarly, construct sets for all the local users and remote hosts + # that were *not* already in the room. Care needs to be taken with the + # calculating the remote hosts, as a host may have already been in the + # room even if there is a newly joined user from that host. + newly_joined_local_users = [] + newly_joined_remote_hosts = set() + for user_id in newly_joined_users: + if self.is_mine_id(user_id): + newly_joined_local_users.append(user_id) + else: + host = get_domain_from_id(user_id) + if host not in prev_remote_hosts: + newly_joined_remote_hosts.add(host) - states_d = await self.current_state_for_users(user_ids) + # Send presence states of all local users in the room to newly joined + # remote servers. (We actually only send states for local users already + # in the room, as we'll send states for newly joined local users below.) + if prev_local_users and newly_joined_remote_hosts: + local_states = await self.current_state_for_users(prev_local_users) # Filter out old presence, i.e. offline presence states where # the user hasn't been active for a week. We can change this @@ -1306,13 +1302,27 @@ async def _on_user_joined_room( now = self.clock.time_msec() states = [ state - for state in states_d.values() + for state in local_states.values() if state.state != PresenceState.OFFLINE or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000 or state.status_msg is not None ] - return [remote_host], states + self._federation_queue.send_presence_to_destinations( + destinations=newly_joined_remote_hosts, + states=states, + ) + + # Send presence states of newly joined users to all remote servers in + # the room + if newly_joined_local_users and ( + prev_remote_hosts or newly_joined_remote_hosts + ): + local_states = await self.current_state_for_users(newly_joined_local_users) + self._federation_queue.send_presence_to_destinations( + destinations=prev_remote_hosts | newly_joined_remote_hosts, + states=list(local_states.values()), + ) def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool: diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index ce330e79c..1ffab709f 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -729,7 +729,7 @@ def test_remote_joins(self): ) self.assertEqual(expected_state.state, PresenceState.ONLINE) self.federation_sender.send_presence_to_destinations.assert_called_once_with( - destinations=["server2"], states={expected_state} + destinations={"server2"}, states=[expected_state] ) # @@ -740,7 +740,7 @@ def test_remote_joins(self): self._add_new_user(room_id, "@bob:server3") self.federation_sender.send_presence_to_destinations.assert_called_once_with( - destinations=["server3"], states={expected_state} + destinations={"server3"}, states=[expected_state] ) def test_remote_gets_presence_when_local_user_joins(self): @@ -788,14 +788,8 @@ def test_remote_gets_presence_when_local_user_joins(self): self.presence_handler.current_state_for_user("@test2:server") ) self.assertEqual(expected_state.state, PresenceState.ONLINE) - self.assertEqual( - self.federation_sender.send_presence_to_destinations.call_count, 2 - ) - self.federation_sender.send_presence_to_destinations.assert_any_call( - destinations=["server3"], states={expected_state} - ) - self.federation_sender.send_presence_to_destinations.assert_any_call( - destinations=["server2"], states={expected_state} + self.federation_sender.send_presence_to_destinations.assert_called_once_with( + destinations={"server2", "server3"}, states=[expected_state] ) def _add_new_user(self, room_id, user_id): From d783880083733a694ed4c7b15ca53be00e06f8a7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 May 2021 13:33:05 -0400 Subject: [PATCH 16/40] Include the time of the create event in Spaces Summary. (#9928) This is an update based on changes to MSC2946. The origin_server_ts of the m.room.create event is copied into the creation_ts field for each room returned from the spaces summary. --- changelog.d/9928.bugfix | 1 + synapse/handlers/space_summary.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/9928.bugfix diff --git a/changelog.d/9928.bugfix b/changelog.d/9928.bugfix new file mode 100644 index 000000000..7b74cd9fb --- /dev/null +++ b/changelog.d/9928.bugfix @@ -0,0 +1 @@ +Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index d32452747..2e997841f 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -347,6 +347,7 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: stats["history_visibility"] == HistoryVisibility.WORLD_READABLE ), "guest_can_join": stats["guest_access"] == "can_join", + "creation_ts": create_event.origin_server_ts, "room_type": room_type, } From 70f0ffd2fcd815a065b4734ac606654a2e11dd28 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 May 2021 16:31:16 -0400 Subject: [PATCH 17/40] Follow-up to #9915 to correct the identifier for room types. --- synapse/api/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index bff750e5f..ab628b2be 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -176,7 +176,7 @@ class EventContentFields: SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after" # cf https://github.com/matrix-org/matrix-doc/pull/1772 - ROOM_TYPE = "m.type" + ROOM_TYPE = "type" MSC1772_ROOM_TYPE = "org.matrix.msc1772.type" From eba431c539dbe0ca28794d89962d447d1f75938f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 May 2021 15:06:35 +0100 Subject: [PATCH 18/40] Revert "Leave out optional keys from /sync (#9919)" (#9940) This reverts commit e9eb3549d32a6f93d07de8dbd5e1ebe54c8d8278. --- changelog.d/9919.feature | 1 - synapse/rest/client/v2_alpha/sync.py | 62 ++++++------------- tests/rest/client/v2_alpha/test_sync.py | 30 ++++++++- .../test_resource_limits_server_notices.py | 8 +-- 4 files changed, 50 insertions(+), 51 deletions(-) delete mode 100644 changelog.d/9919.feature diff --git a/changelog.d/9919.feature b/changelog.d/9919.feature deleted file mode 100644 index 07747505d..000000000 --- a/changelog.d/9919.feature +++ /dev/null @@ -1 +0,0 @@ -Omit empty fields from the `/sync` response. Contributed by @deepbluev7. diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 5f8565333..95ee3f1b8 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -14,7 +14,6 @@ import itertools import logging -from collections import defaultdict from typing import TYPE_CHECKING, Tuple from synapse.api.constants import PresenceState @@ -230,49 +229,24 @@ async def encode_response(self, time_now, sync_result, access_token_id, filter): ) logger.debug("building sync response dict") - - response: dict = defaultdict(dict) - response["next_batch"] = await sync_result.next_batch.to_string(self.store) - - if sync_result.account_data: - response["account_data"] = {"events": sync_result.account_data} - if sync_result.presence: - response["presence"] = SyncRestServlet.encode_presence( - sync_result.presence, time_now - ) - - if sync_result.to_device: - response["to_device"] = {"events": sync_result.to_device} - - if sync_result.device_lists.changed: - response["device_lists"]["changed"] = list(sync_result.device_lists.changed) - if sync_result.device_lists.left: - response["device_lists"]["left"] = list(sync_result.device_lists.left) - - if sync_result.device_one_time_keys_count: - response[ - "device_one_time_keys_count" - ] = sync_result.device_one_time_keys_count - if sync_result.device_unused_fallback_key_types: - response[ - "org.matrix.msc2732.device_unused_fallback_key_types" - ] = sync_result.device_unused_fallback_key_types - - if joined: - response["rooms"]["join"] = joined - if invited: - response["rooms"]["invite"] = invited - if archived: - response["rooms"]["leave"] = archived - - if sync_result.groups.join: - response["groups"]["join"] = sync_result.groups.join - if sync_result.groups.invite: - response["groups"]["invite"] = sync_result.groups.invite - if sync_result.groups.leave: - response["groups"]["leave"] = sync_result.groups.leave - - return response + return { + "account_data": {"events": sync_result.account_data}, + "to_device": {"events": sync_result.to_device}, + "device_lists": { + "changed": list(sync_result.device_lists.changed), + "left": list(sync_result.device_lists.left), + }, + "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now), + "rooms": {"join": joined, "invite": invited, "leave": archived}, + "groups": { + "join": sync_result.groups.join, + "invite": sync_result.groups.invite, + "leave": sync_result.groups.leave, + }, + "device_one_time_keys_count": sync_result.device_one_time_keys_count, + "org.matrix.msc2732.device_unused_fallback_key_types": sync_result.device_unused_fallback_key_types, + "next_batch": await sync_result.next_batch.to_string(self.store), + } @staticmethod def encode_presence(events, time_now): diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 74be5176d..dbcbdf159 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -37,7 +37,35 @@ def test_sync_argless(self): channel = self.make_request("GET", "/sync") self.assertEqual(channel.code, 200) - self.assertIn("next_batch", channel.json_body) + self.assertTrue( + { + "next_batch", + "rooms", + "presence", + "account_data", + "to_device", + "device_lists", + }.issubset(set(channel.json_body.keys())) + ) + + def test_sync_presence_disabled(self): + """ + When presence is disabled, the key does not appear in /sync. + """ + self.hs.config.use_presence = False + + channel = self.make_request("GET", "/sync") + + self.assertEqual(channel.code, 200) + self.assertTrue( + { + "next_batch", + "rooms", + "account_data", + "to_device", + "device_lists", + }.issubset(set(channel.json_body.keys())) + ) class SyncFilterTestCase(unittest.HomeserverTestCase): diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 3245aa91c..d46521ccd 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -306,9 +306,8 @@ def test_no_invite_without_notice(self): channel = self.make_request("GET", "/sync?timeout=0", access_token=tok) - self.assertNotIn( - "rooms", channel.json_body, "Got invites without server notice" - ) + invites = channel.json_body["rooms"]["invite"] + self.assertEqual(len(invites), 0, invites) def test_invite_with_notice(self): """Tests that, if the MAU limit is hit, the server notices user invites each user @@ -365,8 +364,7 @@ def _trigger_notice_and_join(self): # We could also pick another user and sync with it, which would return an # invite to a system notices room, but it doesn't matter which user we're # using so we use the last one because it saves us an extra sync. - if "rooms" in channel.json_body: - invites = channel.json_body["rooms"]["invite"] + invites = channel.json_body["rooms"]["invite"] # Make sure we have an invite to process. self.assertEqual(len(invites), 1, invites) From 8771b1337da9faa3b60cf0ec0a128a7de856f19e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 May 2021 15:54:07 +0100 Subject: [PATCH 19/40] Export jemalloc stats to prometheus when used (#9882) --- changelog.d/9882.misc | 1 + synapse/app/_base.py | 2 + synapse/metrics/__init__.py | 1 + synapse/metrics/jemalloc.py | 196 ++++++++++++++++++++++++++++++++++++ 4 files changed, 200 insertions(+) create mode 100644 changelog.d/9882.misc create mode 100644 synapse/metrics/jemalloc.py diff --git a/changelog.d/9882.misc b/changelog.d/9882.misc new file mode 100644 index 000000000..facfa31f3 --- /dev/null +++ b/changelog.d/9882.misc @@ -0,0 +1 @@ +Export jemalloc stats to Prometheus if it is being used. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 638e01c1b..59918d789 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -37,6 +37,7 @@ from synapse.crypto import context_factory from synapse.logging.context import PreserveLoggingContext from synapse.metrics.background_process_metrics import wrap_as_background_process +from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.util.async_helpers import Linearizer from synapse.util.daemonize import daemonize_process from synapse.util.rlimit import change_resource_limit @@ -115,6 +116,7 @@ def start_reactor( def run(): logger.info("Running") + setup_jemalloc_stats() change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index e671da26d..fef284666 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -629,6 +629,7 @@ def f(*args, **kwargs): except AttributeError: pass + __all__ = [ "MetricsResource", "generate_latest", diff --git a/synapse/metrics/jemalloc.py b/synapse/metrics/jemalloc.py new file mode 100644 index 000000000..29ab6c022 --- /dev/null +++ b/synapse/metrics/jemalloc.py @@ -0,0 +1,196 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ctypes +import logging +import os +import re +from typing import Optional + +from synapse.metrics import REGISTRY, GaugeMetricFamily + +logger = logging.getLogger(__name__) + + +def _setup_jemalloc_stats(): + """Checks to see if jemalloc is loaded, and hooks up a collector to record + statistics exposed by jemalloc. + """ + + # Try to find the loaded jemalloc shared library, if any. We need to + # introspect into what is loaded, rather than loading whatever is on the + # path, as if we load a *different* jemalloc version things will seg fault. + + # We look in `/proc/self/maps`, which only exists on linux. + if not os.path.exists("/proc/self/maps"): + logger.debug("Not looking for jemalloc as no /proc/self/maps exist") + return + + # We're looking for a path at the end of the line that includes + # "libjemalloc". + regex = re.compile(r"/\S+/libjemalloc.*$") + + jemalloc_path = None + with open("/proc/self/maps") as f: + for line in f: + match = regex.search(line.strip()) + if match: + jemalloc_path = match.group() + + if not jemalloc_path: + # No loaded jemalloc was found. + logger.debug("jemalloc not found") + return + + logger.debug("Found jemalloc at %s", jemalloc_path) + + jemalloc = ctypes.CDLL(jemalloc_path) + + def _mallctl( + name: str, read: bool = True, write: Optional[int] = None + ) -> Optional[int]: + """Wrapper around `mallctl` for reading and writing integers to + jemalloc. + + Args: + name: The name of the option to read from/write to. + read: Whether to try and read the value. + write: The value to write, if given. + + Returns: + The value read if `read` is True, otherwise None. + + Raises: + An exception if `mallctl` returns a non-zero error code. + """ + + input_var = None + input_var_ref = None + input_len_ref = None + if read: + input_var = ctypes.c_size_t(0) + input_len = ctypes.c_size_t(ctypes.sizeof(input_var)) + + input_var_ref = ctypes.byref(input_var) + input_len_ref = ctypes.byref(input_len) + + write_var_ref = None + write_len = ctypes.c_size_t(0) + if write is not None: + write_var = ctypes.c_size_t(write) + write_len = ctypes.c_size_t(ctypes.sizeof(write_var)) + + write_var_ref = ctypes.byref(write_var) + + # The interface is: + # + # int mallctl( + # const char *name, + # void *oldp, + # size_t *oldlenp, + # void *newp, + # size_t newlen + # ) + # + # Where oldp/oldlenp is a buffer where the old value will be written to + # (if not null), and newp/newlen is the buffer with the new value to set + # (if not null). Note that they're all references *except* newlen. + result = jemalloc.mallctl( + name.encode("ascii"), + input_var_ref, + input_len_ref, + write_var_ref, + write_len, + ) + + if result != 0: + raise Exception("Failed to call mallctl") + + if input_var is None: + return None + + return input_var.value + + def _jemalloc_refresh_stats() -> None: + """Request that jemalloc updates its internal statistics. This needs to + be called before querying for stats, otherwise it will return stale + values. + """ + try: + _mallctl("epoch", read=False, write=1) + except Exception as e: + logger.warning("Failed to reload jemalloc stats: %s", e) + + class JemallocCollector: + """Metrics for internal jemalloc stats.""" + + def collect(self): + _jemalloc_refresh_stats() + + g = GaugeMetricFamily( + "jemalloc_stats_app_memory_bytes", + "The stats reported by jemalloc", + labels=["type"], + ) + + # Read the relevant global stats from jemalloc. Note that these may + # not be accurate if python is configured to use its internal small + # object allocator (which is on by default, disable by setting the + # env `PYTHONMALLOC=malloc`). + # + # See the jemalloc manpage for details about what each value means, + # roughly: + # - allocated ─ Total number of bytes allocated by the app + # - active ─ Total number of bytes in active pages allocated by + # the application, this is bigger than `allocated`. + # - resident ─ Maximum number of bytes in physically resident data + # pages mapped by the allocator, comprising all pages dedicated + # to allocator metadata, pages backing active allocations, and + # unused dirty pages. This is bigger than `active`. + # - mapped ─ Total number of bytes in active extents mapped by the + # allocator. + # - metadata ─ Total number of bytes dedicated to jemalloc + # metadata. + for t in ( + "allocated", + "active", + "resident", + "mapped", + "metadata", + ): + try: + value = _mallctl(f"stats.{t}") + except Exception as e: + # There was an error fetching the value, skip. + logger.warning("Failed to read jemalloc stats.%s: %s", t, e) + continue + + g.add_metric([t], value=value) + + yield g + + REGISTRY.register(JemallocCollector()) + + logger.debug("Added jemalloc stats") + + +def setup_jemalloc_stats(): + """Try to setup jemalloc stats, if jemalloc is loaded.""" + + try: + _setup_jemalloc_stats() + except Exception as e: + # This should only happen if we find the loaded jemalloc library, but + # fail to load it somehow (e.g. we somehow picked the wrong version). + logger.info("Failed to setup collector to record jemalloc stats: %s", e) From 25f43faa70f7cc58493b636c2702ae63395779dc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 May 2021 10:22:05 +0100 Subject: [PATCH 20/40] Reorganise the database schema directories (#9932) The hope here is that by moving all the schema files into synapse/storage/schema, it gets a bit easier for newcomers to navigate. It certainly got easier for me to write a helpful README. There's more to do on that front, but I'll follow up with other PRs for that. --- changelog.d/9932.misc | 1 + .../main/schema/full_schemas/README.md | 21 -------- synapse/storage/prepare_database.py | 48 ++++++++++--------- synapse/storage/schema/README.md | 37 ++++++++++++++ synapse/storage/schema/__init__.py | 17 +++++++ .../delta/25/00background_updates.sql | 0 .../delta/35/00background_updates_add_col.sql | 0 .../delta/58/00background_update_ordering.sql | 0 .../{ => common}/full_schemas/54/full.sql | 0 .../schema/{ => common}/schema_version.sql | 0 .../schema => schema/main}/delta/12/v12.sql | 0 .../schema => schema/main}/delta/13/v13.sql | 0 .../schema => schema/main}/delta/14/v14.sql | 0 .../main}/delta/15/appservice_txns.sql | 0 .../main}/delta/15/presence_indices.sql | 0 .../schema => schema/main}/delta/15/v15.sql | 0 .../main}/delta/16/events_order_index.sql | 0 .../delta/16/remote_media_cache_index.sql | 0 .../main}/delta/16/remove_duplicates.sql | 0 .../main}/delta/16/room_alias_index.sql | 0 .../main}/delta/16/unique_constraints.sql | 0 .../schema => schema/main}/delta/16/users.sql | 0 .../main}/delta/17/drop_indexes.sql | 0 .../main}/delta/17/server_keys.sql | 0 .../main}/delta/17/user_threepids.sql | 0 .../delta/18/server_keys_bigger_ints.sql | 0 .../main}/delta/19/event_index.sql | 0 .../schema => schema/main}/delta/20/dummy.sql | 0 .../main}/delta/20/pushers.py | 0 .../main}/delta/21/end_to_end_keys.sql | 0 .../main}/delta/21/receipts.sql | 0 .../main}/delta/22/receipts_index.sql | 0 .../main}/delta/22/user_threepids_unique.sql | 0 .../main}/delta/24/stats_reporting.sql | 0 .../schema => schema/main}/delta/25/fts.py | 0 .../main}/delta/25/guest_access.sql | 0 .../main}/delta/25/history_visibility.sql | 0 .../schema => schema/main}/delta/25/tags.sql | 0 .../main}/delta/26/account_data.sql | 0 .../main}/delta/27/account_data.sql | 0 .../main}/delta/27/forgotten_memberships.sql | 0 .../schema => schema/main}/delta/27/ts.py | 0 .../main}/delta/28/event_push_actions.sql | 0 .../main}/delta/28/events_room_stream.sql | 0 .../main}/delta/28/public_roms_index.sql | 0 .../main}/delta/28/receipts_user_id_index.sql | 0 .../main}/delta/28/upgrade_times.sql | 0 .../main}/delta/28/users_is_guest.sql | 0 .../main}/delta/29/push_actions.sql | 0 .../main}/delta/30/alias_creator.sql | 0 .../main}/delta/30/as_users.py | 0 .../main}/delta/30/deleted_pushers.sql | 0 .../main}/delta/30/presence_stream.sql | 0 .../main}/delta/30/public_rooms.sql | 0 .../main}/delta/30/push_rule_stream.sql | 0 .../delta/30/threepid_guest_access_tokens.sql | 0 .../main}/delta/31/invites.sql | 0 .../31/local_media_repository_url_cache.sql | 0 .../main}/delta/31/pushers.py | 0 .../main}/delta/31/pushers_index.sql | 0 .../main}/delta/31/search_update.py | 0 .../main}/delta/32/events.sql | 0 .../main}/delta/32/openid.sql | 0 .../main}/delta/32/pusher_throttle.sql | 0 .../main}/delta/32/remove_indices.sql | 0 .../main}/delta/32/reports.sql | 0 .../delta/33/access_tokens_device_index.sql | 0 .../main}/delta/33/devices.sql | 0 .../main}/delta/33/devices_for_e2e_keys.sql | 0 ...ices_for_e2e_keys_clear_unknown_device.sql | 0 .../main}/delta/33/event_fields.py | 0 .../main}/delta/33/remote_media_ts.py | 0 .../main}/delta/33/user_ips_index.sql | 0 .../main}/delta/34/appservice_stream.sql | 0 .../main}/delta/34/cache_stream.py | 0 .../main}/delta/34/device_inbox.sql | 0 .../delta/34/push_display_name_rename.sql | 0 .../main}/delta/34/received_txn_purge.py | 0 .../main}/delta/35/contains_url.sql | 0 .../main}/delta/35/device_outbox.sql | 0 .../main}/delta/35/device_stream_id.sql | 0 .../delta/35/event_push_actions_index.sql | 0 .../35/public_room_list_change_stream.sql | 0 .../main}/delta/35/stream_order_to_extrem.sql | 0 .../main}/delta/36/readd_public_rooms.sql | 0 .../main}/delta/37/remove_auth_idx.py | 0 .../main}/delta/37/user_threepids.sql | 0 .../main}/delta/38/postgres_fts_gist.sql | 0 .../main}/delta/39/appservice_room_list.sql | 0 .../delta/39/device_federation_stream_idx.sql | 0 .../main}/delta/39/event_push_index.sql | 0 .../delta/39/federation_out_position.sql | 0 .../main}/delta/39/membership_profile.sql | 0 .../main}/delta/40/current_state_idx.sql | 0 .../main}/delta/40/device_inbox.sql | 0 .../main}/delta/40/device_list_streams.sql | 0 .../main}/delta/40/event_push_summary.sql | 0 .../main}/delta/40/pushers.sql | 0 .../main}/delta/41/device_list_stream_idx.sql | 0 .../main}/delta/41/device_outbound_index.sql | 0 .../delta/41/event_search_event_id_idx.sql | 0 .../main}/delta/41/ratelimit.sql | 0 .../main}/delta/42/current_state_delta.sql | 0 .../main}/delta/42/device_list_last_id.sql | 0 .../main}/delta/42/event_auth_state_only.sql | 0 .../main}/delta/42/user_dir.py | 0 .../main}/delta/43/blocked_rooms.sql | 0 .../main}/delta/43/quarantine_media.sql | 0 .../main}/delta/43/url_cache.sql | 0 .../main}/delta/43/user_share.sql | 0 .../main}/delta/44/expire_url_cache.sql | 0 .../main}/delta/45/group_server.sql | 0 .../main}/delta/45/profile_cache.sql | 0 .../main}/delta/46/drop_refresh_tokens.sql | 0 .../delta/46/drop_unique_deleted_pushers.sql | 0 .../main}/delta/46/group_server.sql | 0 .../46/local_media_repository_url_idx.sql | 0 .../main}/delta/46/user_dir_null_room_ids.sql | 0 .../main}/delta/46/user_dir_typos.sql | 0 .../main}/delta/47/last_access_media.sql | 0 .../main}/delta/47/postgres_fts_gin.sql | 0 .../main}/delta/47/push_actions_staging.sql | 0 .../main}/delta/48/add_user_consent.sql | 0 .../delta/48/add_user_ips_last_seen_index.sql | 0 .../main}/delta/48/deactivated_users.sql | 0 .../main}/delta/48/group_unique_indexes.py | 0 .../main}/delta/48/groups_joinable.sql | 0 .../add_user_consent_server_notice_sent.sql | 0 .../main}/delta/49/add_user_daily_visits.sql | 0 .../49/add_user_ips_last_seen_only_index.sql | 0 .../delta/50/add_creation_ts_users_index.sql | 0 .../main}/delta/50/erasure_store.sql | 0 .../delta/50/make_event_content_nullable.py | 0 .../main}/delta/51/e2e_room_keys.sql | 0 .../main}/delta/51/monthly_active_users.sql | 0 .../52/add_event_to_state_group_index.sql | 0 .../52/device_list_streams_unique_idx.sql | 0 .../main}/delta/52/e2e_room_keys.sql | 0 .../main}/delta/53/add_user_type_to_users.sql | 0 .../main}/delta/53/drop_sent_transactions.sql | 0 .../main}/delta/53/event_format_version.sql | 0 .../main}/delta/53/user_dir_populate.sql | 0 .../main}/delta/53/user_ips_index.sql | 0 .../main}/delta/53/user_share.sql | 0 .../main}/delta/53/user_threepid_id.sql | 0 .../main}/delta/53/users_in_public_rooms.sql | 0 .../54/account_validity_with_renewal.sql | 0 .../delta/54/add_validity_to_server_keys.sql | 0 .../delta/54/delete_forward_extremities.sql | 0 .../main}/delta/54/drop_legacy_tables.sql | 0 .../main}/delta/54/drop_presence_list.sql | 0 .../main}/delta/54/relations.sql | 0 .../schema => schema/main}/delta/54/stats.sql | 0 .../main}/delta/54/stats2.sql | 0 .../main}/delta/55/access_token_expiry.sql | 0 .../delta/55/track_threepid_validations.sql | 0 .../delta/55/users_alter_deactivated.sql | 0 .../delta/56/add_spans_to_device_lists.sql | 0 .../56/current_state_events_membership.sql | 0 .../current_state_events_membership_mk2.sql | 0 .../56/delete_keys_from_deleted_backups.sql | 0 .../delta/56/destinations_failure_ts.sql | 0 ...tinations_retry_interval_type.sql.postgres | 0 .../delta/56/device_stream_id_insert.sql | 0 .../main}/delta/56/devices_last_seen.sql | 0 .../delta/56/drop_unused_event_tables.sql | 0 .../main}/delta/56/event_expiry.sql | 0 .../main}/delta/56/event_labels.sql | 0 .../56/event_labels_background_update.sql | 0 .../main}/delta/56/fix_room_keys_index.sql | 0 .../main}/delta/56/hidden_devices.sql | 0 .../delta/56/hidden_devices_fix.sql.sqlite | 0 .../56/nuke_empty_communities_from_db.sql | 0 .../main}/delta/56/public_room_list_idx.sql | 0 .../main}/delta/56/redaction_censor.sql | 0 .../main}/delta/56/redaction_censor2.sql | 0 .../redaction_censor3_fix_update.sql.postgres | 0 .../main}/delta/56/redaction_censor4.sql | 0 ...remove_tombstoned_rooms_from_directory.sql | 0 .../main}/delta/56/room_key_etag.sql | 0 .../main}/delta/56/room_membership_idx.sql | 0 .../main}/delta/56/room_retention.sql | 0 .../main}/delta/56/signing_keys.sql | 0 .../56/signing_keys_nonunique_signatures.sql | 0 .../main}/delta/56/stats_separated.sql | 0 .../delta/56/unique_user_filter_index.py | 0 .../main}/delta/56/user_external_ids.sql | 0 .../delta/56/users_in_public_rooms_idx.sql | 0 .../57/delete_old_current_state_events.sql | 0 .../57/device_list_remote_cache_stale.sql | 0 .../delta/57/local_current_membership.py | 0 .../delta/57/remove_sent_outbound_pokes.sql | 0 .../main}/delta/57/rooms_version_column.sql | 0 .../57/rooms_version_column_2.sql.postgres | 0 .../57/rooms_version_column_2.sql.sqlite | 0 .../57/rooms_version_column_3.sql.postgres | 0 .../57/rooms_version_column_3.sql.sqlite | 0 .../delta/58/02remove_dup_outbound_pokes.sql | 0 .../main}/delta/58/03persist_ui_auth.sql | 0 .../delta/58/05cache_instance.sql.postgres | 0 .../main}/delta/58/06dlols_unique_idx.py | 0 ...ethod_to_thumbnail_constraint.sql.postgres | 0 ..._method_to_thumbnail_constraint.sql.sqlite | 0 .../main}/delta/58/07persist_ui_auth_ips.sql | 0 ...08_media_safe_from_quarantine.sql.postgres | 0 .../08_media_safe_from_quarantine.sql.sqlite | 0 .../main}/delta/58/09shadow_ban.sql | 0 .../10_pushrules_enabled_delete_obsolete.sql | 0 .../58/10drop_local_rejections_stream.sql | 0 .../58/10federation_pos_instance_name.sql | 0 .../main}/delta/58/11dehydration.sql | 0 .../main}/delta/58/11fallback.sql | 0 .../main}/delta/58/11user_id_seq.py | 0 .../main}/delta/58/12room_stats.sql | 0 .../58/13remove_presence_allow_inbound.sql | 0 .../main}/delta/58/14events_instance_name.sql | 0 .../58/14events_instance_name.sql.postgres | 0 .../delta/58/15_catchup_destination_rooms.sql | 0 .../main}/delta/58/15unread_count.sql | 0 .../58/16populate_stats_process_rooms_fix.sql | 0 .../delta/58/17_catchup_last_successful.sql | 0 .../main}/delta/58/18stream_positions.sql | 0 .../delta/58/19instance_map.sql.postgres | 0 .../main}/delta/58/19txn_id.sql | 0 .../delta/58/20instance_name_event_tables.sql | 0 .../main}/delta/58/20user_daily_visits.sql | 0 .../main}/delta/58/21as_device_stream.sql | 0 .../delta/58/21drop_device_max_stream_id.sql | 0 .../main}/delta/58/22puppet_token.sql | 0 .../delta/58/22users_have_local_media.sql | 0 .../delta/58/23e2e_cross_signing_keys_idx.sql | 0 .../delta/58/24drop_event_json_index.sql | 0 .../58/25user_external_ids_user_id_idx.sql | 0 .../58/26access_token_last_validated.sql | 0 .../main}/delta/58/27local_invites.sql | 0 .../58/28drop_last_used_column.sql.postgres | 0 .../58/28drop_last_used_column.sql.sqlite | 0 .../main}/delta/59/01ignored_user.py | 0 .../main}/delta/59/02shard_send_to_device.sql | 0 ...shard_send_to_device_sequence.sql.postgres | 0 .../main}/delta/59/04_event_auth_chains.sql | 0 .../59/04_event_auth_chains.sql.postgres | 0 .../main}/delta/59/04drop_account_data.sql | 0 .../main}/delta/59/05cache_invalidation.sql | 0 .../main}/delta/59/06chain_cover_index.sql | 0 .../main}/delta/59/06shard_account_data.sql | 0 .../59/06shard_account_data.sql.postgres | 0 .../delta/59/07shard_account_data_fix.sql | 0 ...elete_pushers_for_deactivated_accounts.sql | 0 .../main}/delta/59/08delete_stale_pushers.sql | 0 .../delta/59/09rejected_events_metadata.sql | 0 .../delta/59/10delete_purged_chain_cover.sql | 0 .../11drop_thumbnail_constraint.sql.postgres | 0 .../12account_validity_token_used_ts_ms.sql | 0 .../delta/59/12presence_stream_instance.sql | 0 ...2presence_stream_instance_seq.sql.postgres | 0 .../full_schemas/16/application_services.sql | 0 .../main}/full_schemas/16/event_edges.sql | 0 .../full_schemas/16/event_signatures.sql | 0 .../main}/full_schemas/16/im.sql | 0 .../main}/full_schemas/16/keys.sql | 0 .../full_schemas/16/media_repository.sql | 0 .../main}/full_schemas/16/presence.sql | 0 .../main}/full_schemas/16/profiles.sql | 0 .../main}/full_schemas/16/push.sql | 0 .../main}/full_schemas/16/redactions.sql | 0 .../main}/full_schemas/16/room_aliases.sql | 0 .../main}/full_schemas/16/state.sql | 0 .../main}/full_schemas/16/transactions.sql | 0 .../main}/full_schemas/16/users.sql | 0 .../main}/full_schemas/54/full.sql.postgres | 0 .../main}/full_schemas/54/full.sql.sqlite | 0 .../full_schemas/54/stream_positions.sql | 0 .../state}/delta/23/drop_state_index.sql | 0 .../state}/delta/30/state_stream.sql | 0 .../state}/delta/32/remove_state_indices.sql | 0 .../state}/delta/35/add_state_index.sql | 0 .../state}/delta/35/state.sql | 0 .../state}/delta/35/state_dedupe.sql | 0 .../state}/delta/47/state_group_seq.py | 0 .../state}/delta/56/state_group_room_idx.sql | 0 .../state}/full_schemas/54/full.sql | 0 .../full_schemas/54/sequence.sql.postgres | 0 tests/storage/test_cleanup_extrems.py | 4 +- 284 files changed, 81 insertions(+), 47 deletions(-) create mode 100644 changelog.d/9932.misc delete mode 100644 synapse/storage/databases/main/schema/full_schemas/README.md create mode 100644 synapse/storage/schema/README.md create mode 100644 synapse/storage/schema/__init__.py rename synapse/storage/schema/{ => common}/delta/25/00background_updates.sql (100%) rename synapse/storage/schema/{ => common}/delta/35/00background_updates_add_col.sql (100%) rename synapse/storage/schema/{ => common}/delta/58/00background_update_ordering.sql (100%) rename synapse/storage/schema/{ => common}/full_schemas/54/full.sql (100%) rename synapse/storage/schema/{ => common}/schema_version.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/12/v12.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/13/v13.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/14/v14.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/15/appservice_txns.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/15/presence_indices.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/15/v15.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/events_order_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/remote_media_cache_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/remove_duplicates.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/room_alias_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/unique_constraints.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/17/drop_indexes.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/17/server_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/17/user_threepids.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/18/server_keys_bigger_ints.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/19/event_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/20/dummy.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/20/pushers.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/21/end_to_end_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/21/receipts.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/22/receipts_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/22/user_threepids_unique.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/24/stats_reporting.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/25/fts.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/25/guest_access.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/25/history_visibility.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/25/tags.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/26/account_data.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/27/account_data.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/27/forgotten_memberships.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/27/ts.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/event_push_actions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/events_room_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/public_roms_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/receipts_user_id_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/upgrade_times.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/users_is_guest.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/29/push_actions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/alias_creator.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/as_users.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/deleted_pushers.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/presence_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/public_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/push_rule_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/threepid_guest_access_tokens.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/invites.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/local_media_repository_url_cache.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/pushers.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/pushers_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/search_update.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/events.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/openid.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/pusher_throttle.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/remove_indices.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/reports.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/access_tokens_device_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/devices.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/devices_for_e2e_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/devices_for_e2e_keys_clear_unknown_device.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/event_fields.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/remote_media_ts.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/user_ips_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/appservice_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/cache_stream.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/device_inbox.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/push_display_name_rename.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/received_txn_purge.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/contains_url.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/device_outbox.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/device_stream_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/event_push_actions_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/public_room_list_change_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/stream_order_to_extrem.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/36/readd_public_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/37/remove_auth_idx.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/37/user_threepids.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/38/postgres_fts_gist.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/appservice_room_list.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/device_federation_stream_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/event_push_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/federation_out_position.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/membership_profile.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/current_state_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/device_inbox.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/device_list_streams.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/event_push_summary.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/pushers.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/41/device_list_stream_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/41/device_outbound_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/41/event_search_event_id_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/41/ratelimit.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/42/current_state_delta.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/42/device_list_last_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/42/event_auth_state_only.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/42/user_dir.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/43/blocked_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/43/quarantine_media.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/43/url_cache.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/43/user_share.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/44/expire_url_cache.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/45/group_server.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/45/profile_cache.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/drop_refresh_tokens.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/drop_unique_deleted_pushers.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/group_server.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/local_media_repository_url_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/user_dir_null_room_ids.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/user_dir_typos.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/47/last_access_media.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/47/postgres_fts_gin.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/47/push_actions_staging.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/add_user_consent.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/add_user_ips_last_seen_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/deactivated_users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/group_unique_indexes.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/groups_joinable.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/49/add_user_consent_server_notice_sent.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/49/add_user_daily_visits.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/49/add_user_ips_last_seen_only_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/50/add_creation_ts_users_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/50/erasure_store.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/50/make_event_content_nullable.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/51/e2e_room_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/51/monthly_active_users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/52/add_event_to_state_group_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/52/device_list_streams_unique_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/52/e2e_room_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/add_user_type_to_users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/drop_sent_transactions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/event_format_version.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/user_dir_populate.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/user_ips_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/user_share.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/user_threepid_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/users_in_public_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/account_validity_with_renewal.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/add_validity_to_server_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/delete_forward_extremities.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/drop_legacy_tables.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/drop_presence_list.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/relations.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/stats.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/stats2.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/55/access_token_expiry.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/55/track_threepid_validations.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/55/users_alter_deactivated.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/add_spans_to_device_lists.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/current_state_events_membership.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/current_state_events_membership_mk2.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/delete_keys_from_deleted_backups.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/destinations_failure_ts.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/destinations_retry_interval_type.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/device_stream_id_insert.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/devices_last_seen.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/drop_unused_event_tables.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/event_expiry.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/event_labels.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/event_labels_background_update.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/fix_room_keys_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/hidden_devices.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/hidden_devices_fix.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/nuke_empty_communities_from_db.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/public_room_list_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/redaction_censor.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/redaction_censor2.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/redaction_censor3_fix_update.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/redaction_censor4.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/remove_tombstoned_rooms_from_directory.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/room_key_etag.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/room_membership_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/room_retention.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/signing_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/signing_keys_nonunique_signatures.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/stats_separated.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/unique_user_filter_index.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/user_external_ids.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/users_in_public_rooms_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/delete_old_current_state_events.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/device_list_remote_cache_stale.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/local_current_membership.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/remove_sent_outbound_pokes.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column_2.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column_2.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column_3.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column_3.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/02remove_dup_outbound_pokes.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/03persist_ui_auth.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/05cache_instance.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/06dlols_unique_idx.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/07add_method_to_thumbnail_constraint.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/07persist_ui_auth_ips.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/08_media_safe_from_quarantine.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/08_media_safe_from_quarantine.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/09shadow_ban.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/10_pushrules_enabled_delete_obsolete.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/10drop_local_rejections_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/10federation_pos_instance_name.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/11dehydration.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/11fallback.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/11user_id_seq.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/12room_stats.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/13remove_presence_allow_inbound.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/14events_instance_name.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/14events_instance_name.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/15_catchup_destination_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/15unread_count.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/16populate_stats_process_rooms_fix.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/17_catchup_last_successful.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/18stream_positions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/19instance_map.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/19txn_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/20instance_name_event_tables.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/20user_daily_visits.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/21as_device_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/21drop_device_max_stream_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/22puppet_token.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/22users_have_local_media.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/23e2e_cross_signing_keys_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/24drop_event_json_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/25user_external_ids_user_id_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/26access_token_last_validated.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/27local_invites.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/28drop_last_used_column.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/28drop_last_used_column.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/01ignored_user.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/02shard_send_to_device.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/03shard_send_to_device_sequence.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/04_event_auth_chains.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/04_event_auth_chains.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/04drop_account_data.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/05cache_invalidation.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/06chain_cover_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/06shard_account_data.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/06shard_account_data.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/07shard_account_data_fix.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/08delete_pushers_for_deactivated_accounts.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/08delete_stale_pushers.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/09rejected_events_metadata.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/10delete_purged_chain_cover.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/11drop_thumbnail_constraint.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/12account_validity_token_used_ts_ms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/12presence_stream_instance.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/12presence_stream_instance_seq.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/application_services.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/event_edges.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/event_signatures.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/im.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/media_repository.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/presence.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/profiles.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/push.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/redactions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/room_aliases.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/state.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/transactions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/54/full.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/54/full.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/54/stream_positions.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/23/drop_state_index.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/30/state_stream.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/32/remove_state_indices.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/35/add_state_index.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/35/state.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/35/state_dedupe.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/47/state_group_seq.py (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/56/state_group_room_idx.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/full_schemas/54/full.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/full_schemas/54/sequence.sql.postgres (100%) diff --git a/changelog.d/9932.misc b/changelog.d/9932.misc new file mode 100644 index 000000000..9e16a3617 --- /dev/null +++ b/changelog.d/9932.misc @@ -0,0 +1 @@ +Move database schema files into a common directory. diff --git a/synapse/storage/databases/main/schema/full_schemas/README.md b/synapse/storage/databases/main/schema/full_schemas/README.md deleted file mode 100644 index c00f28719..000000000 --- a/synapse/storage/databases/main/schema/full_schemas/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Synapse Database Schemas - -These schemas are used as a basis to create brand new Synapse databases, on both -SQLite3 and Postgres. - -## Building full schema dumps - -If you want to recreate these schemas, they need to be made from a database that -has had all background updates run. - -To do so, use `scripts-dev/make_full_schema.sh`. This will produce new -`full.sql.postgres ` and `full.sql.sqlite` files. - -Ensure postgres is installed and your user has the ability to run bash commands -such as `createdb`, then call - - ./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ - -There are currently two folders with full-schema snapshots. `16` is a snapshot -from 2015, for historical reference. The other contains the most recent full -schema snapshot. diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 7a2cbee42..3799d4673 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -26,16 +26,13 @@ from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.schema import SCHEMA_VERSION from synapse.storage.types import Cursor logger = logging.getLogger(__name__) -# Remember to update this number every time a change is made to database -# schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 59 - -dir_path = os.path.abspath(os.path.dirname(__file__)) +schema_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "schema") class PrepareDatabaseException(Exception): @@ -167,7 +164,14 @@ def _setup_new_database( Example directory structure: - schema/ + schema/ + common/ + delta/ + ... + full_schemas/ + 11/ + foo.sql + main/ delta/ ... full_schemas/ @@ -175,15 +179,14 @@ def _setup_new_database( test.sql ... 11/ - foo.sql bar.sql ... In the example foo.sql and bar.sql would be run, and then any delta files for versions strictly greater than 11. - Note: we apply the full schemas and deltas from the top level `schema/` - folder as well those in the data stores specified. + Note: we apply the full schemas and deltas from the `schema/common` + folder as well those in the databases specified. Args: cur: a database cursor @@ -195,12 +198,12 @@ def _setup_new_database( # configured to our liking. database_engine.check_new_database(cur) - current_dir = os.path.join(dir_path, "schema", "full_schemas") + full_schemas_dir = os.path.join(schema_path, "common", "full_schemas") # First we find the highest full schema version we have valid_versions = [] - for filename in os.listdir(current_dir): + for filename in os.listdir(full_schemas_dir): try: ver = int(filename) except ValueError: @@ -218,15 +221,13 @@ def _setup_new_database( logger.debug("Initialising schema v%d", max_current_ver) - # Now lets find all the full schema files, both in the global schema and - # in data store schemas. - directories = [os.path.join(current_dir, str(max_current_ver))] + # Now let's find all the full schema files, both in the common schema and + # in database schemas. + directories = [os.path.join(full_schemas_dir, str(max_current_ver))] directories.extend( os.path.join( - dir_path, - "databases", + schema_path, database, - "schema", "full_schemas", str(max_current_ver), ) @@ -357,6 +358,9 @@ def _upgrade_existing_database( check_database_before_upgrade(cur, database_engine, config) start_ver = current_version + + # if we got to this schema version by running a full_schema rather than a series + # of deltas, we should not run the deltas for this version. if not upgraded: start_ver += 1 @@ -385,12 +389,10 @@ def _upgrade_existing_database( # directories for schema updates. # First we find the directories to search in - delta_dir = os.path.join(dir_path, "schema", "delta", str(v)) + delta_dir = os.path.join(schema_path, "common", "delta", str(v)) directories = [delta_dir] for database in databases: - directories.append( - os.path.join(dir_path, "databases", database, "schema", "delta", str(v)) - ) + directories.append(os.path.join(schema_path, database, "delta", str(v))) # Used to check if we have any duplicate file names file_name_counter = Counter() # type: CounterType[str] @@ -621,8 +623,8 @@ def _get_or_create_schema_state( txn: Cursor, database_engine: BaseDatabaseEngine ) -> Optional[Tuple[int, List[str], bool]]: # Bluntly try creating the schema_version tables. - schema_path = os.path.join(dir_path, "schema", "schema_version.sql") - executescript(txn, schema_path) + sql_path = os.path.join(schema_path, "common", "schema_version.sql") + executescript(txn, sql_path) txn.execute("SELECT version, upgraded FROM schema_version") row = txn.fetchone() diff --git a/synapse/storage/schema/README.md b/synapse/storage/schema/README.md new file mode 100644 index 000000000..030153db6 --- /dev/null +++ b/synapse/storage/schema/README.md @@ -0,0 +1,37 @@ +# Synapse Database Schemas + +This directory contains the schema files used to build Synapse databases. + +Synapse supports splitting its datastore across multiple physical databases (which can +be useful for large installations), and the schema files are therefore split according +to the logical database they are apply to. + +At the time of writing, the following "logical" databases are supported: + +* `state` - used to store Matrix room state (more specifically, `state_groups`, + their relationships and contents.) +* `main` - stores everything else. + +Addionally, the `common` directory contains schema files for tables which must be +present on *all* physical databases. + +## Full schema dumps + +In the `full_schemas` directories, only the most recently-numbered snapshot is useful +(`54` at the time of writing). Older snapshots (eg, `16`) are present for historical +reference only. + +## Building full schema dumps + +If you want to recreate these schemas, they need to be made from a database that +has had all background updates run. + +To do so, use `scripts-dev/make_full_schema.sh`. This will produce new +`full.sql.postgres` and `full.sql.sqlite` files. + +Ensure postgres is installed, then run: + + ./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ + +NB at the time of writing, this script predates the split into separate `state`/`main` +databases so will require updates to handle that correctly. diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py new file mode 100644 index 000000000..f0d9f2316 --- /dev/null +++ b/synapse/storage/schema/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Remember to update this number every time a change is made to database +# schema files, so the users will be informed on server restarts. +SCHEMA_VERSION = 59 diff --git a/synapse/storage/schema/delta/25/00background_updates.sql b/synapse/storage/schema/common/delta/25/00background_updates.sql similarity index 100% rename from synapse/storage/schema/delta/25/00background_updates.sql rename to synapse/storage/schema/common/delta/25/00background_updates.sql diff --git a/synapse/storage/schema/delta/35/00background_updates_add_col.sql b/synapse/storage/schema/common/delta/35/00background_updates_add_col.sql similarity index 100% rename from synapse/storage/schema/delta/35/00background_updates_add_col.sql rename to synapse/storage/schema/common/delta/35/00background_updates_add_col.sql diff --git a/synapse/storage/schema/delta/58/00background_update_ordering.sql b/synapse/storage/schema/common/delta/58/00background_update_ordering.sql similarity index 100% rename from synapse/storage/schema/delta/58/00background_update_ordering.sql rename to synapse/storage/schema/common/delta/58/00background_update_ordering.sql diff --git a/synapse/storage/schema/full_schemas/54/full.sql b/synapse/storage/schema/common/full_schemas/54/full.sql similarity index 100% rename from synapse/storage/schema/full_schemas/54/full.sql rename to synapse/storage/schema/common/full_schemas/54/full.sql diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/common/schema_version.sql similarity index 100% rename from synapse/storage/schema/schema_version.sql rename to synapse/storage/schema/common/schema_version.sql diff --git a/synapse/storage/databases/main/schema/delta/12/v12.sql b/synapse/storage/schema/main/delta/12/v12.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/12/v12.sql rename to synapse/storage/schema/main/delta/12/v12.sql diff --git a/synapse/storage/databases/main/schema/delta/13/v13.sql b/synapse/storage/schema/main/delta/13/v13.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/13/v13.sql rename to synapse/storage/schema/main/delta/13/v13.sql diff --git a/synapse/storage/databases/main/schema/delta/14/v14.sql b/synapse/storage/schema/main/delta/14/v14.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/14/v14.sql rename to synapse/storage/schema/main/delta/14/v14.sql diff --git a/synapse/storage/databases/main/schema/delta/15/appservice_txns.sql b/synapse/storage/schema/main/delta/15/appservice_txns.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/appservice_txns.sql rename to synapse/storage/schema/main/delta/15/appservice_txns.sql diff --git a/synapse/storage/databases/main/schema/delta/15/presence_indices.sql b/synapse/storage/schema/main/delta/15/presence_indices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/presence_indices.sql rename to synapse/storage/schema/main/delta/15/presence_indices.sql diff --git a/synapse/storage/databases/main/schema/delta/15/v15.sql b/synapse/storage/schema/main/delta/15/v15.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/v15.sql rename to synapse/storage/schema/main/delta/15/v15.sql diff --git a/synapse/storage/databases/main/schema/delta/16/events_order_index.sql b/synapse/storage/schema/main/delta/16/events_order_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/events_order_index.sql rename to synapse/storage/schema/main/delta/16/events_order_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql b/synapse/storage/schema/main/delta/16/remote_media_cache_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql rename to synapse/storage/schema/main/delta/16/remote_media_cache_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql b/synapse/storage/schema/main/delta/16/remove_duplicates.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql rename to synapse/storage/schema/main/delta/16/remove_duplicates.sql diff --git a/synapse/storage/databases/main/schema/delta/16/room_alias_index.sql b/synapse/storage/schema/main/delta/16/room_alias_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/room_alias_index.sql rename to synapse/storage/schema/main/delta/16/room_alias_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/unique_constraints.sql b/synapse/storage/schema/main/delta/16/unique_constraints.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/unique_constraints.sql rename to synapse/storage/schema/main/delta/16/unique_constraints.sql diff --git a/synapse/storage/databases/main/schema/delta/16/users.sql b/synapse/storage/schema/main/delta/16/users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/users.sql rename to synapse/storage/schema/main/delta/16/users.sql diff --git a/synapse/storage/databases/main/schema/delta/17/drop_indexes.sql b/synapse/storage/schema/main/delta/17/drop_indexes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/drop_indexes.sql rename to synapse/storage/schema/main/delta/17/drop_indexes.sql diff --git a/synapse/storage/databases/main/schema/delta/17/server_keys.sql b/synapse/storage/schema/main/delta/17/server_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/server_keys.sql rename to synapse/storage/schema/main/delta/17/server_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/17/user_threepids.sql b/synapse/storage/schema/main/delta/17/user_threepids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/user_threepids.sql rename to synapse/storage/schema/main/delta/17/user_threepids.sql diff --git a/synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql rename to synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql diff --git a/synapse/storage/databases/main/schema/delta/19/event_index.sql b/synapse/storage/schema/main/delta/19/event_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/19/event_index.sql rename to synapse/storage/schema/main/delta/19/event_index.sql diff --git a/synapse/storage/databases/main/schema/delta/20/dummy.sql b/synapse/storage/schema/main/delta/20/dummy.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/20/dummy.sql rename to synapse/storage/schema/main/delta/20/dummy.sql diff --git a/synapse/storage/databases/main/schema/delta/20/pushers.py b/synapse/storage/schema/main/delta/20/pushers.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/20/pushers.py rename to synapse/storage/schema/main/delta/20/pushers.py diff --git a/synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql b/synapse/storage/schema/main/delta/21/end_to_end_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql rename to synapse/storage/schema/main/delta/21/end_to_end_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/21/receipts.sql b/synapse/storage/schema/main/delta/21/receipts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/21/receipts.sql rename to synapse/storage/schema/main/delta/21/receipts.sql diff --git a/synapse/storage/databases/main/schema/delta/22/receipts_index.sql b/synapse/storage/schema/main/delta/22/receipts_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/22/receipts_index.sql rename to synapse/storage/schema/main/delta/22/receipts_index.sql diff --git a/synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql b/synapse/storage/schema/main/delta/22/user_threepids_unique.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql rename to synapse/storage/schema/main/delta/22/user_threepids_unique.sql diff --git a/synapse/storage/databases/main/schema/delta/24/stats_reporting.sql b/synapse/storage/schema/main/delta/24/stats_reporting.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/24/stats_reporting.sql rename to synapse/storage/schema/main/delta/24/stats_reporting.sql diff --git a/synapse/storage/databases/main/schema/delta/25/fts.py b/synapse/storage/schema/main/delta/25/fts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/fts.py rename to synapse/storage/schema/main/delta/25/fts.py diff --git a/synapse/storage/databases/main/schema/delta/25/guest_access.sql b/synapse/storage/schema/main/delta/25/guest_access.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/guest_access.sql rename to synapse/storage/schema/main/delta/25/guest_access.sql diff --git a/synapse/storage/databases/main/schema/delta/25/history_visibility.sql b/synapse/storage/schema/main/delta/25/history_visibility.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/history_visibility.sql rename to synapse/storage/schema/main/delta/25/history_visibility.sql diff --git a/synapse/storage/databases/main/schema/delta/25/tags.sql b/synapse/storage/schema/main/delta/25/tags.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/tags.sql rename to synapse/storage/schema/main/delta/25/tags.sql diff --git a/synapse/storage/databases/main/schema/delta/26/account_data.sql b/synapse/storage/schema/main/delta/26/account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/26/account_data.sql rename to synapse/storage/schema/main/delta/26/account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/27/account_data.sql b/synapse/storage/schema/main/delta/27/account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/account_data.sql rename to synapse/storage/schema/main/delta/27/account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql b/synapse/storage/schema/main/delta/27/forgotten_memberships.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql rename to synapse/storage/schema/main/delta/27/forgotten_memberships.sql diff --git a/synapse/storage/databases/main/schema/delta/27/ts.py b/synapse/storage/schema/main/delta/27/ts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/ts.py rename to synapse/storage/schema/main/delta/27/ts.py diff --git a/synapse/storage/databases/main/schema/delta/28/event_push_actions.sql b/synapse/storage/schema/main/delta/28/event_push_actions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/event_push_actions.sql rename to synapse/storage/schema/main/delta/28/event_push_actions.sql diff --git a/synapse/storage/databases/main/schema/delta/28/events_room_stream.sql b/synapse/storage/schema/main/delta/28/events_room_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/events_room_stream.sql rename to synapse/storage/schema/main/delta/28/events_room_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/28/public_roms_index.sql b/synapse/storage/schema/main/delta/28/public_roms_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/public_roms_index.sql rename to synapse/storage/schema/main/delta/28/public_roms_index.sql diff --git a/synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql b/synapse/storage/schema/main/delta/28/receipts_user_id_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql rename to synapse/storage/schema/main/delta/28/receipts_user_id_index.sql diff --git a/synapse/storage/databases/main/schema/delta/28/upgrade_times.sql b/synapse/storage/schema/main/delta/28/upgrade_times.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/upgrade_times.sql rename to synapse/storage/schema/main/delta/28/upgrade_times.sql diff --git a/synapse/storage/databases/main/schema/delta/28/users_is_guest.sql b/synapse/storage/schema/main/delta/28/users_is_guest.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/users_is_guest.sql rename to synapse/storage/schema/main/delta/28/users_is_guest.sql diff --git a/synapse/storage/databases/main/schema/delta/29/push_actions.sql b/synapse/storage/schema/main/delta/29/push_actions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/29/push_actions.sql rename to synapse/storage/schema/main/delta/29/push_actions.sql diff --git a/synapse/storage/databases/main/schema/delta/30/alias_creator.sql b/synapse/storage/schema/main/delta/30/alias_creator.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/alias_creator.sql rename to synapse/storage/schema/main/delta/30/alias_creator.sql diff --git a/synapse/storage/databases/main/schema/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/as_users.py rename to synapse/storage/schema/main/delta/30/as_users.py diff --git a/synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql b/synapse/storage/schema/main/delta/30/deleted_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql rename to synapse/storage/schema/main/delta/30/deleted_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/30/presence_stream.sql b/synapse/storage/schema/main/delta/30/presence_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/presence_stream.sql rename to synapse/storage/schema/main/delta/30/presence_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/30/public_rooms.sql b/synapse/storage/schema/main/delta/30/public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/public_rooms.sql rename to synapse/storage/schema/main/delta/30/public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql b/synapse/storage/schema/main/delta/30/push_rule_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql rename to synapse/storage/schema/main/delta/30/push_rule_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql b/synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql rename to synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql diff --git a/synapse/storage/databases/main/schema/delta/31/invites.sql b/synapse/storage/schema/main/delta/31/invites.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/invites.sql rename to synapse/storage/schema/main/delta/31/invites.sql diff --git a/synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql b/synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql rename to synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/31/pushers.py b/synapse/storage/schema/main/delta/31/pushers.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/pushers.py rename to synapse/storage/schema/main/delta/31/pushers.py diff --git a/synapse/storage/databases/main/schema/delta/31/pushers_index.sql b/synapse/storage/schema/main/delta/31/pushers_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/pushers_index.sql rename to synapse/storage/schema/main/delta/31/pushers_index.sql diff --git a/synapse/storage/databases/main/schema/delta/31/search_update.py b/synapse/storage/schema/main/delta/31/search_update.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/search_update.py rename to synapse/storage/schema/main/delta/31/search_update.py diff --git a/synapse/storage/databases/main/schema/delta/32/events.sql b/synapse/storage/schema/main/delta/32/events.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/events.sql rename to synapse/storage/schema/main/delta/32/events.sql diff --git a/synapse/storage/databases/main/schema/delta/32/openid.sql b/synapse/storage/schema/main/delta/32/openid.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/openid.sql rename to synapse/storage/schema/main/delta/32/openid.sql diff --git a/synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql b/synapse/storage/schema/main/delta/32/pusher_throttle.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql rename to synapse/storage/schema/main/delta/32/pusher_throttle.sql diff --git a/synapse/storage/databases/main/schema/delta/32/remove_indices.sql b/synapse/storage/schema/main/delta/32/remove_indices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/remove_indices.sql rename to synapse/storage/schema/main/delta/32/remove_indices.sql diff --git a/synapse/storage/databases/main/schema/delta/32/reports.sql b/synapse/storage/schema/main/delta/32/reports.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/reports.sql rename to synapse/storage/schema/main/delta/32/reports.sql diff --git a/synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql b/synapse/storage/schema/main/delta/33/access_tokens_device_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql rename to synapse/storage/schema/main/delta/33/access_tokens_device_index.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices.sql b/synapse/storage/schema/main/delta/33/devices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices.sql rename to synapse/storage/schema/main/delta/33/devices.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql b/synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql rename to synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql b/synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql rename to synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql diff --git a/synapse/storage/databases/main/schema/delta/33/event_fields.py b/synapse/storage/schema/main/delta/33/event_fields.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/event_fields.py rename to synapse/storage/schema/main/delta/33/event_fields.py diff --git a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py b/synapse/storage/schema/main/delta/33/remote_media_ts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/remote_media_ts.py rename to synapse/storage/schema/main/delta/33/remote_media_ts.py diff --git a/synapse/storage/databases/main/schema/delta/33/user_ips_index.sql b/synapse/storage/schema/main/delta/33/user_ips_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/user_ips_index.sql rename to synapse/storage/schema/main/delta/33/user_ips_index.sql diff --git a/synapse/storage/databases/main/schema/delta/34/appservice_stream.sql b/synapse/storage/schema/main/delta/34/appservice_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/appservice_stream.sql rename to synapse/storage/schema/main/delta/34/appservice_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/34/cache_stream.py b/synapse/storage/schema/main/delta/34/cache_stream.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/cache_stream.py rename to synapse/storage/schema/main/delta/34/cache_stream.py diff --git a/synapse/storage/databases/main/schema/delta/34/device_inbox.sql b/synapse/storage/schema/main/delta/34/device_inbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/device_inbox.sql rename to synapse/storage/schema/main/delta/34/device_inbox.sql diff --git a/synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql b/synapse/storage/schema/main/delta/34/push_display_name_rename.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql rename to synapse/storage/schema/main/delta/34/push_display_name_rename.sql diff --git a/synapse/storage/databases/main/schema/delta/34/received_txn_purge.py b/synapse/storage/schema/main/delta/34/received_txn_purge.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/received_txn_purge.py rename to synapse/storage/schema/main/delta/34/received_txn_purge.py diff --git a/synapse/storage/databases/main/schema/delta/35/contains_url.sql b/synapse/storage/schema/main/delta/35/contains_url.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/contains_url.sql rename to synapse/storage/schema/main/delta/35/contains_url.sql diff --git a/synapse/storage/databases/main/schema/delta/35/device_outbox.sql b/synapse/storage/schema/main/delta/35/device_outbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/device_outbox.sql rename to synapse/storage/schema/main/delta/35/device_outbox.sql diff --git a/synapse/storage/databases/main/schema/delta/35/device_stream_id.sql b/synapse/storage/schema/main/delta/35/device_stream_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/device_stream_id.sql rename to synapse/storage/schema/main/delta/35/device_stream_id.sql diff --git a/synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql b/synapse/storage/schema/main/delta/35/event_push_actions_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql rename to synapse/storage/schema/main/delta/35/event_push_actions_index.sql diff --git a/synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql b/synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql rename to synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql b/synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql rename to synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql diff --git a/synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql b/synapse/storage/schema/main/delta/36/readd_public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql rename to synapse/storage/schema/main/delta/36/readd_public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py b/synapse/storage/schema/main/delta/37/remove_auth_idx.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py rename to synapse/storage/schema/main/delta/37/remove_auth_idx.py diff --git a/synapse/storage/databases/main/schema/delta/37/user_threepids.sql b/synapse/storage/schema/main/delta/37/user_threepids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/37/user_threepids.sql rename to synapse/storage/schema/main/delta/37/user_threepids.sql diff --git a/synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql b/synapse/storage/schema/main/delta/38/postgres_fts_gist.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql rename to synapse/storage/schema/main/delta/38/postgres_fts_gist.sql diff --git a/synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql b/synapse/storage/schema/main/delta/39/appservice_room_list.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql rename to synapse/storage/schema/main/delta/39/appservice_room_list.sql diff --git a/synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql b/synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql rename to synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/39/event_push_index.sql b/synapse/storage/schema/main/delta/39/event_push_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/event_push_index.sql rename to synapse/storage/schema/main/delta/39/event_push_index.sql diff --git a/synapse/storage/databases/main/schema/delta/39/federation_out_position.sql b/synapse/storage/schema/main/delta/39/federation_out_position.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/federation_out_position.sql rename to synapse/storage/schema/main/delta/39/federation_out_position.sql diff --git a/synapse/storage/databases/main/schema/delta/39/membership_profile.sql b/synapse/storage/schema/main/delta/39/membership_profile.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/membership_profile.sql rename to synapse/storage/schema/main/delta/39/membership_profile.sql diff --git a/synapse/storage/databases/main/schema/delta/40/current_state_idx.sql b/synapse/storage/schema/main/delta/40/current_state_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/current_state_idx.sql rename to synapse/storage/schema/main/delta/40/current_state_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/40/device_inbox.sql b/synapse/storage/schema/main/delta/40/device_inbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/device_inbox.sql rename to synapse/storage/schema/main/delta/40/device_inbox.sql diff --git a/synapse/storage/databases/main/schema/delta/40/device_list_streams.sql b/synapse/storage/schema/main/delta/40/device_list_streams.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/device_list_streams.sql rename to synapse/storage/schema/main/delta/40/device_list_streams.sql diff --git a/synapse/storage/databases/main/schema/delta/40/event_push_summary.sql b/synapse/storage/schema/main/delta/40/event_push_summary.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/event_push_summary.sql rename to synapse/storage/schema/main/delta/40/event_push_summary.sql diff --git a/synapse/storage/databases/main/schema/delta/40/pushers.sql b/synapse/storage/schema/main/delta/40/pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/pushers.sql rename to synapse/storage/schema/main/delta/40/pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql b/synapse/storage/schema/main/delta/41/device_list_stream_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql rename to synapse/storage/schema/main/delta/41/device_list_stream_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql b/synapse/storage/schema/main/delta/41/device_outbound_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql rename to synapse/storage/schema/main/delta/41/device_outbound_index.sql diff --git a/synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql b/synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql rename to synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/41/ratelimit.sql b/synapse/storage/schema/main/delta/41/ratelimit.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/ratelimit.sql rename to synapse/storage/schema/main/delta/41/ratelimit.sql diff --git a/synapse/storage/databases/main/schema/delta/42/current_state_delta.sql b/synapse/storage/schema/main/delta/42/current_state_delta.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/current_state_delta.sql rename to synapse/storage/schema/main/delta/42/current_state_delta.sql diff --git a/synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql b/synapse/storage/schema/main/delta/42/device_list_last_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql rename to synapse/storage/schema/main/delta/42/device_list_last_id.sql diff --git a/synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql b/synapse/storage/schema/main/delta/42/event_auth_state_only.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql rename to synapse/storage/schema/main/delta/42/event_auth_state_only.sql diff --git a/synapse/storage/databases/main/schema/delta/42/user_dir.py b/synapse/storage/schema/main/delta/42/user_dir.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/user_dir.py rename to synapse/storage/schema/main/delta/42/user_dir.py diff --git a/synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql b/synapse/storage/schema/main/delta/43/blocked_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql rename to synapse/storage/schema/main/delta/43/blocked_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/43/quarantine_media.sql b/synapse/storage/schema/main/delta/43/quarantine_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/quarantine_media.sql rename to synapse/storage/schema/main/delta/43/quarantine_media.sql diff --git a/synapse/storage/databases/main/schema/delta/43/url_cache.sql b/synapse/storage/schema/main/delta/43/url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/url_cache.sql rename to synapse/storage/schema/main/delta/43/url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/43/user_share.sql b/synapse/storage/schema/main/delta/43/user_share.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/user_share.sql rename to synapse/storage/schema/main/delta/43/user_share.sql diff --git a/synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql b/synapse/storage/schema/main/delta/44/expire_url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql rename to synapse/storage/schema/main/delta/44/expire_url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/45/group_server.sql b/synapse/storage/schema/main/delta/45/group_server.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/45/group_server.sql rename to synapse/storage/schema/main/delta/45/group_server.sql diff --git a/synapse/storage/databases/main/schema/delta/45/profile_cache.sql b/synapse/storage/schema/main/delta/45/profile_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/45/profile_cache.sql rename to synapse/storage/schema/main/delta/45/profile_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql b/synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql rename to synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql diff --git a/synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql b/synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql rename to synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/46/group_server.sql b/synapse/storage/schema/main/delta/46/group_server.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/group_server.sql rename to synapse/storage/schema/main/delta/46/group_server.sql diff --git a/synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql b/synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql rename to synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql b/synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql rename to synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql diff --git a/synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql b/synapse/storage/schema/main/delta/46/user_dir_typos.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql rename to synapse/storage/schema/main/delta/46/user_dir_typos.sql diff --git a/synapse/storage/databases/main/schema/delta/47/last_access_media.sql b/synapse/storage/schema/main/delta/47/last_access_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/last_access_media.sql rename to synapse/storage/schema/main/delta/47/last_access_media.sql diff --git a/synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql b/synapse/storage/schema/main/delta/47/postgres_fts_gin.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql rename to synapse/storage/schema/main/delta/47/postgres_fts_gin.sql diff --git a/synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql b/synapse/storage/schema/main/delta/47/push_actions_staging.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql rename to synapse/storage/schema/main/delta/47/push_actions_staging.sql diff --git a/synapse/storage/databases/main/schema/delta/48/add_user_consent.sql b/synapse/storage/schema/main/delta/48/add_user_consent.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/add_user_consent.sql rename to synapse/storage/schema/main/delta/48/add_user_consent.sql diff --git a/synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql b/synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql rename to synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql diff --git a/synapse/storage/databases/main/schema/delta/48/deactivated_users.sql b/synapse/storage/schema/main/delta/48/deactivated_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/deactivated_users.sql rename to synapse/storage/schema/main/delta/48/deactivated_users.sql diff --git a/synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py b/synapse/storage/schema/main/delta/48/group_unique_indexes.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py rename to synapse/storage/schema/main/delta/48/group_unique_indexes.py diff --git a/synapse/storage/databases/main/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/main/delta/48/groups_joinable.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/groups_joinable.sql rename to synapse/storage/schema/main/delta/48/groups_joinable.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql b/synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql rename to synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql b/synapse/storage/schema/main/delta/49/add_user_daily_visits.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql rename to synapse/storage/schema/main/delta/49/add_user_daily_visits.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql b/synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql rename to synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql diff --git a/synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql b/synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql rename to synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql diff --git a/synapse/storage/databases/main/schema/delta/50/erasure_store.sql b/synapse/storage/schema/main/delta/50/erasure_store.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/50/erasure_store.sql rename to synapse/storage/schema/main/delta/50/erasure_store.sql diff --git a/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py b/synapse/storage/schema/main/delta/50/make_event_content_nullable.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py rename to synapse/storage/schema/main/delta/50/make_event_content_nullable.py diff --git a/synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql b/synapse/storage/schema/main/delta/51/e2e_room_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql rename to synapse/storage/schema/main/delta/51/e2e_room_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql b/synapse/storage/schema/main/delta/51/monthly_active_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql rename to synapse/storage/schema/main/delta/51/monthly_active_users.sql diff --git a/synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql rename to synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql diff --git a/synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql b/synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql rename to synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql b/synapse/storage/schema/main/delta/52/e2e_room_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql rename to synapse/storage/schema/main/delta/52/e2e_room_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql b/synapse/storage/schema/main/delta/53/add_user_type_to_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql rename to synapse/storage/schema/main/delta/53/add_user_type_to_users.sql diff --git a/synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql b/synapse/storage/schema/main/delta/53/drop_sent_transactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql rename to synapse/storage/schema/main/delta/53/drop_sent_transactions.sql diff --git a/synapse/storage/databases/main/schema/delta/53/event_format_version.sql b/synapse/storage/schema/main/delta/53/event_format_version.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/event_format_version.sql rename to synapse/storage/schema/main/delta/53/event_format_version.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql b/synapse/storage/schema/main/delta/53/user_dir_populate.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql rename to synapse/storage/schema/main/delta/53/user_dir_populate.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_ips_index.sql b/synapse/storage/schema/main/delta/53/user_ips_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_ips_index.sql rename to synapse/storage/schema/main/delta/53/user_ips_index.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_share.sql b/synapse/storage/schema/main/delta/53/user_share.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_share.sql rename to synapse/storage/schema/main/delta/53/user_share.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql b/synapse/storage/schema/main/delta/53/user_threepid_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql rename to synapse/storage/schema/main/delta/53/user_threepid_id.sql diff --git a/synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql b/synapse/storage/schema/main/delta/53/users_in_public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql rename to synapse/storage/schema/main/delta/53/users_in_public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql b/synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql rename to synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql diff --git a/synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql b/synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql rename to synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql rename to synapse/storage/schema/main/delta/54/delete_forward_extremities.sql diff --git a/synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql b/synapse/storage/schema/main/delta/54/drop_legacy_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql rename to synapse/storage/schema/main/delta/54/drop_legacy_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql b/synapse/storage/schema/main/delta/54/drop_presence_list.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql rename to synapse/storage/schema/main/delta/54/drop_presence_list.sql diff --git a/synapse/storage/databases/main/schema/delta/54/relations.sql b/synapse/storage/schema/main/delta/54/relations.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/relations.sql rename to synapse/storage/schema/main/delta/54/relations.sql diff --git a/synapse/storage/databases/main/schema/delta/54/stats.sql b/synapse/storage/schema/main/delta/54/stats.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/stats.sql rename to synapse/storage/schema/main/delta/54/stats.sql diff --git a/synapse/storage/databases/main/schema/delta/54/stats2.sql b/synapse/storage/schema/main/delta/54/stats2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/stats2.sql rename to synapse/storage/schema/main/delta/54/stats2.sql diff --git a/synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql b/synapse/storage/schema/main/delta/55/access_token_expiry.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql rename to synapse/storage/schema/main/delta/55/access_token_expiry.sql diff --git a/synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql b/synapse/storage/schema/main/delta/55/track_threepid_validations.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql rename to synapse/storage/schema/main/delta/55/track_threepid_validations.sql diff --git a/synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql b/synapse/storage/schema/main/delta/55/users_alter_deactivated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql rename to synapse/storage/schema/main/delta/55/users_alter_deactivated.sql diff --git a/synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql b/synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql rename to synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql diff --git a/synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/main/delta/56/current_state_events_membership.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql rename to synapse/storage/schema/main/delta/56/current_state_events_membership.sql diff --git a/synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql b/synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql rename to synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql diff --git a/synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql b/synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql rename to synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql diff --git a/synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql b/synapse/storage/schema/main/delta/56/destinations_failure_ts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql rename to synapse/storage/schema/main/delta/56/destinations_failure_ts.sql diff --git a/synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres b/synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres rename to synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql b/synapse/storage/schema/main/delta/56/device_stream_id_insert.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql rename to synapse/storage/schema/main/delta/56/device_stream_id_insert.sql diff --git a/synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql b/synapse/storage/schema/main/delta/56/devices_last_seen.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql rename to synapse/storage/schema/main/delta/56/devices_last_seen.sql diff --git a/synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql b/synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql rename to synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_expiry.sql b/synapse/storage/schema/main/delta/56/event_expiry.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_expiry.sql rename to synapse/storage/schema/main/delta/56/event_expiry.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_labels.sql b/synapse/storage/schema/main/delta/56/event_labels.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_labels.sql rename to synapse/storage/schema/main/delta/56/event_labels.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql b/synapse/storage/schema/main/delta/56/event_labels_background_update.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql rename to synapse/storage/schema/main/delta/56/event_labels_background_update.sql diff --git a/synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql b/synapse/storage/schema/main/delta/56/fix_room_keys_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql rename to synapse/storage/schema/main/delta/56/fix_room_keys_index.sql diff --git a/synapse/storage/databases/main/schema/delta/56/hidden_devices.sql b/synapse/storage/schema/main/delta/56/hidden_devices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/hidden_devices.sql rename to synapse/storage/schema/main/delta/56/hidden_devices.sql diff --git a/synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite b/synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite rename to synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql b/synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql rename to synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql diff --git a/synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql b/synapse/storage/schema/main/delta/56/public_room_list_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql rename to synapse/storage/schema/main/delta/56/public_room_list_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor.sql b/synapse/storage/schema/main/delta/56/redaction_censor.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor.sql rename to synapse/storage/schema/main/delta/56/redaction_censor.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql b/synapse/storage/schema/main/delta/56/redaction_censor2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql rename to synapse/storage/schema/main/delta/56/redaction_censor2.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres b/synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres rename to synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql b/synapse/storage/schema/main/delta/56/redaction_censor4.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql rename to synapse/storage/schema/main/delta/56/redaction_censor4.sql diff --git a/synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql b/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql rename to synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_key_etag.sql b/synapse/storage/schema/main/delta/56/room_key_etag.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_key_etag.sql rename to synapse/storage/schema/main/delta/56/room_key_etag.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql b/synapse/storage/schema/main/delta/56/room_membership_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql rename to synapse/storage/schema/main/delta/56/room_membership_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_retention.sql b/synapse/storage/schema/main/delta/56/room_retention.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_retention.sql rename to synapse/storage/schema/main/delta/56/room_retention.sql diff --git a/synapse/storage/databases/main/schema/delta/56/signing_keys.sql b/synapse/storage/schema/main/delta/56/signing_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/signing_keys.sql rename to synapse/storage/schema/main/delta/56/signing_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql b/synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql rename to synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql diff --git a/synapse/storage/databases/main/schema/delta/56/stats_separated.sql b/synapse/storage/schema/main/delta/56/stats_separated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/stats_separated.sql rename to synapse/storage/schema/main/delta/56/stats_separated.sql diff --git a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py rename to synapse/storage/schema/main/delta/56/unique_user_filter_index.py diff --git a/synapse/storage/databases/main/schema/delta/56/user_external_ids.sql b/synapse/storage/schema/main/delta/56/user_external_ids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/user_external_ids.sql rename to synapse/storage/schema/main/delta/56/user_external_ids.sql diff --git a/synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql b/synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql rename to synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql rename to synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql diff --git a/synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql b/synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql rename to synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql diff --git a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py b/synapse/storage/schema/main/delta/57/local_current_membership.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/local_current_membership.py rename to synapse/storage/schema/main/delta/57/local_current_membership.py diff --git a/synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql b/synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql rename to synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql b/synapse/storage/schema/main/delta/57/rooms_version_column.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql rename to synapse/storage/schema/main/delta/57/rooms_version_column.sql diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres b/synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres rename to synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite b/synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite rename to synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres b/synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres rename to synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite b/synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite rename to synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql b/synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql rename to synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql diff --git a/synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql b/synapse/storage/schema/main/delta/58/03persist_ui_auth.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql rename to synapse/storage/schema/main/delta/58/03persist_ui_auth.sql diff --git a/synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres b/synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres rename to synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py b/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py rename to synapse/storage/schema/main/delta/58/06dlols_unique_idx.py diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres b/synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres rename to synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite b/synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite rename to synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/07persist_ui_auth_ips.sql b/synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07persist_ui_auth_ips.sql rename to synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql diff --git a/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres b/synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres rename to synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite b/synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite rename to synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/09shadow_ban.sql b/synapse/storage/schema/main/delta/58/09shadow_ban.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/09shadow_ban.sql rename to synapse/storage/schema/main/delta/58/09shadow_ban.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql b/synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql rename to synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql b/synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql rename to synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql b/synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql rename to synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11dehydration.sql b/synapse/storage/schema/main/delta/58/11dehydration.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11dehydration.sql rename to synapse/storage/schema/main/delta/58/11dehydration.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11fallback.sql b/synapse/storage/schema/main/delta/58/11fallback.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11fallback.sql rename to synapse/storage/schema/main/delta/58/11fallback.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11user_id_seq.py b/synapse/storage/schema/main/delta/58/11user_id_seq.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11user_id_seq.py rename to synapse/storage/schema/main/delta/58/11user_id_seq.py diff --git a/synapse/storage/databases/main/schema/delta/58/12room_stats.sql b/synapse/storage/schema/main/delta/58/12room_stats.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/12room_stats.sql rename to synapse/storage/schema/main/delta/58/12room_stats.sql diff --git a/synapse/storage/databases/main/schema/delta/58/13remove_presence_allow_inbound.sql b/synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/13remove_presence_allow_inbound.sql rename to synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql b/synapse/storage/schema/main/delta/58/14events_instance_name.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql rename to synapse/storage/schema/main/delta/58/14events_instance_name.sql diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres b/synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres rename to synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql b/synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql rename to synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/58/15unread_count.sql b/synapse/storage/schema/main/delta/58/15unread_count.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/15unread_count.sql rename to synapse/storage/schema/main/delta/58/15unread_count.sql diff --git a/synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql b/synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql rename to synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql diff --git a/synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql b/synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql rename to synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql diff --git a/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql b/synapse/storage/schema/main/delta/58/18stream_positions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/18stream_positions.sql rename to synapse/storage/schema/main/delta/58/18stream_positions.sql diff --git a/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres b/synapse/storage/schema/main/delta/58/19instance_map.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres rename to synapse/storage/schema/main/delta/58/19instance_map.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/19txn_id.sql b/synapse/storage/schema/main/delta/58/19txn_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/19txn_id.sql rename to synapse/storage/schema/main/delta/58/19txn_id.sql diff --git a/synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql b/synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql rename to synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql b/synapse/storage/schema/main/delta/58/20user_daily_visits.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql rename to synapse/storage/schema/main/delta/58/20user_daily_visits.sql diff --git a/synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql b/synapse/storage/schema/main/delta/58/21as_device_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql rename to synapse/storage/schema/main/delta/58/21as_device_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql b/synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql rename to synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql diff --git a/synapse/storage/databases/main/schema/delta/58/22puppet_token.sql b/synapse/storage/schema/main/delta/58/22puppet_token.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/22puppet_token.sql rename to synapse/storage/schema/main/delta/58/22puppet_token.sql diff --git a/synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql b/synapse/storage/schema/main/delta/58/22users_have_local_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql rename to synapse/storage/schema/main/delta/58/22users_have_local_media.sql diff --git a/synapse/storage/databases/main/schema/delta/58/23e2e_cross_signing_keys_idx.sql b/synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/23e2e_cross_signing_keys_idx.sql rename to synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/58/24drop_event_json_index.sql b/synapse/storage/schema/main/delta/58/24drop_event_json_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/24drop_event_json_index.sql rename to synapse/storage/schema/main/delta/58/24drop_event_json_index.sql diff --git a/synapse/storage/databases/main/schema/delta/58/25user_external_ids_user_id_idx.sql b/synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/25user_external_ids_user_id_idx.sql rename to synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/58/26access_token_last_validated.sql b/synapse/storage/schema/main/delta/58/26access_token_last_validated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/26access_token_last_validated.sql rename to synapse/storage/schema/main/delta/58/26access_token_last_validated.sql diff --git a/synapse/storage/databases/main/schema/delta/58/27local_invites.sql b/synapse/storage/schema/main/delta/58/27local_invites.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/27local_invites.sql rename to synapse/storage/schema/main/delta/58/27local_invites.sql diff --git a/synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.postgres b/synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.postgres rename to synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.sqlite b/synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.sqlite rename to synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/59/01ignored_user.py b/synapse/storage/schema/main/delta/59/01ignored_user.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/01ignored_user.py rename to synapse/storage/schema/main/delta/59/01ignored_user.py diff --git a/synapse/storage/databases/main/schema/delta/59/02shard_send_to_device.sql b/synapse/storage/schema/main/delta/59/02shard_send_to_device.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/02shard_send_to_device.sql rename to synapse/storage/schema/main/delta/59/02shard_send_to_device.sql diff --git a/synapse/storage/databases/main/schema/delta/59/03shard_send_to_device_sequence.sql.postgres b/synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/03shard_send_to_device_sequence.sql.postgres rename to synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql b/synapse/storage/schema/main/delta/59/04_event_auth_chains.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql rename to synapse/storage/schema/main/delta/59/04_event_auth_chains.sql diff --git a/synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql.postgres b/synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql.postgres rename to synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/04drop_account_data.sql b/synapse/storage/schema/main/delta/59/04drop_account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04drop_account_data.sql rename to synapse/storage/schema/main/delta/59/04drop_account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/59/05cache_invalidation.sql b/synapse/storage/schema/main/delta/59/05cache_invalidation.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/05cache_invalidation.sql rename to synapse/storage/schema/main/delta/59/05cache_invalidation.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06chain_cover_index.sql b/synapse/storage/schema/main/delta/59/06chain_cover_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06chain_cover_index.sql rename to synapse/storage/schema/main/delta/59/06chain_cover_index.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql b/synapse/storage/schema/main/delta/59/06shard_account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql rename to synapse/storage/schema/main/delta/59/06shard_account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql.postgres b/synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql.postgres rename to synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/07shard_account_data_fix.sql b/synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/07shard_account_data_fix.sql rename to synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql b/synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql rename to synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql b/synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql rename to synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/59/09rejected_events_metadata.sql b/synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/09rejected_events_metadata.sql rename to synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql diff --git a/synapse/storage/databases/main/schema/delta/59/10delete_purged_chain_cover.sql b/synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/10delete_purged_chain_cover.sql rename to synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql diff --git a/synapse/storage/databases/main/schema/delta/59/11drop_thumbnail_constraint.sql.postgres b/synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/11drop_thumbnail_constraint.sql.postgres rename to synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/12account_validity_token_used_ts_ms.sql b/synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/12account_validity_token_used_ts_ms.sql rename to synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql diff --git a/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance.sql b/synapse/storage/schema/main/delta/59/12presence_stream_instance.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/12presence_stream_instance.sql rename to synapse/storage/schema/main/delta/59/12presence_stream_instance.sql diff --git a/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance_seq.sql.postgres b/synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/12presence_stream_instance_seq.sql.postgres rename to synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres diff --git a/synapse/storage/databases/main/schema/full_schemas/16/application_services.sql b/synapse/storage/schema/main/full_schemas/16/application_services.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/application_services.sql rename to synapse/storage/schema/main/full_schemas/16/application_services.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql b/synapse/storage/schema/main/full_schemas/16/event_edges.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql rename to synapse/storage/schema/main/full_schemas/16/event_edges.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql b/synapse/storage/schema/main/full_schemas/16/event_signatures.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql rename to synapse/storage/schema/main/full_schemas/16/event_signatures.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/im.sql b/synapse/storage/schema/main/full_schemas/16/im.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/im.sql rename to synapse/storage/schema/main/full_schemas/16/im.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/keys.sql b/synapse/storage/schema/main/full_schemas/16/keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/keys.sql rename to synapse/storage/schema/main/full_schemas/16/keys.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql b/synapse/storage/schema/main/full_schemas/16/media_repository.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql rename to synapse/storage/schema/main/full_schemas/16/media_repository.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/presence.sql b/synapse/storage/schema/main/full_schemas/16/presence.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/presence.sql rename to synapse/storage/schema/main/full_schemas/16/presence.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/profiles.sql b/synapse/storage/schema/main/full_schemas/16/profiles.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/profiles.sql rename to synapse/storage/schema/main/full_schemas/16/profiles.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/push.sql b/synapse/storage/schema/main/full_schemas/16/push.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/push.sql rename to synapse/storage/schema/main/full_schemas/16/push.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/redactions.sql b/synapse/storage/schema/main/full_schemas/16/redactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/redactions.sql rename to synapse/storage/schema/main/full_schemas/16/redactions.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql b/synapse/storage/schema/main/full_schemas/16/room_aliases.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql rename to synapse/storage/schema/main/full_schemas/16/room_aliases.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/state.sql b/synapse/storage/schema/main/full_schemas/16/state.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/state.sql rename to synapse/storage/schema/main/full_schemas/16/state.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/transactions.sql b/synapse/storage/schema/main/full_schemas/16/transactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/transactions.sql rename to synapse/storage/schema/main/full_schemas/16/transactions.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/users.sql b/synapse/storage/schema/main/full_schemas/16/users.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/users.sql rename to synapse/storage/schema/main/full_schemas/16/users.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/main/full_schemas/54/full.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres rename to synapse/storage/schema/main/full_schemas/54/full.sql.postgres diff --git a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/main/full_schemas/54/full.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite rename to synapse/storage/schema/main/full_schemas/54/full.sql.sqlite diff --git a/synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/main/full_schemas/54/stream_positions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql rename to synapse/storage/schema/main/full_schemas/54/stream_positions.sql diff --git a/synapse/storage/databases/state/schema/delta/23/drop_state_index.sql b/synapse/storage/schema/state/delta/23/drop_state_index.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/23/drop_state_index.sql rename to synapse/storage/schema/state/delta/23/drop_state_index.sql diff --git a/synapse/storage/databases/state/schema/delta/30/state_stream.sql b/synapse/storage/schema/state/delta/30/state_stream.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/30/state_stream.sql rename to synapse/storage/schema/state/delta/30/state_stream.sql diff --git a/synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql b/synapse/storage/schema/state/delta/32/remove_state_indices.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql rename to synapse/storage/schema/state/delta/32/remove_state_indices.sql diff --git a/synapse/storage/databases/state/schema/delta/35/add_state_index.sql b/synapse/storage/schema/state/delta/35/add_state_index.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/add_state_index.sql rename to synapse/storage/schema/state/delta/35/add_state_index.sql diff --git a/synapse/storage/databases/state/schema/delta/35/state.sql b/synapse/storage/schema/state/delta/35/state.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/state.sql rename to synapse/storage/schema/state/delta/35/state.sql diff --git a/synapse/storage/databases/state/schema/delta/35/state_dedupe.sql b/synapse/storage/schema/state/delta/35/state_dedupe.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/state_dedupe.sql rename to synapse/storage/schema/state/delta/35/state_dedupe.sql diff --git a/synapse/storage/databases/state/schema/delta/47/state_group_seq.py b/synapse/storage/schema/state/delta/47/state_group_seq.py similarity index 100% rename from synapse/storage/databases/state/schema/delta/47/state_group_seq.py rename to synapse/storage/schema/state/delta/47/state_group_seq.py diff --git a/synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql b/synapse/storage/schema/state/delta/56/state_group_room_idx.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql rename to synapse/storage/schema/state/delta/56/state_group_room_idx.sql diff --git a/synapse/storage/databases/state/schema/full_schemas/54/full.sql b/synapse/storage/schema/state/full_schemas/54/full.sql similarity index 100% rename from synapse/storage/databases/state/schema/full_schemas/54/full.sql rename to synapse/storage/schema/state/full_schemas/54/full.sql diff --git a/synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres b/synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres similarity index 100% rename from synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres rename to synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index aa20588bb..77c4fe721 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -47,10 +47,8 @@ def run_background_update(self): ) schema_path = os.path.join( - prepare_database.dir_path, - "databases", + prepare_database.schema_path, "main", - "schema", "delta", "54", "delete_forward_extremities.sql", From 765473567cd7e9520bdb9f85491bb5fe719c360b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 May 2021 14:01:57 +0100 Subject: [PATCH 21/40] Fix make_full_schema to create the db with the right options and user (#9931) --- changelog.d/9931.misc | 1 + scripts-dev/make_full_schema.sh | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 changelog.d/9931.misc diff --git a/changelog.d/9931.misc b/changelog.d/9931.misc new file mode 100644 index 000000000..326adc7f3 --- /dev/null +++ b/changelog.d/9931.misc @@ -0,0 +1 @@ +Minor fixes to the `make_full_schema.sh` script. diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index bc8f97866..39bf30d25 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -6,7 +6,7 @@ # It does so by having Synapse generate an up-to-date SQLite DB, then running # synapse_port_db to convert it to Postgres. It then dumps the contents of both. -POSTGRES_HOST="localhost" +export PGHOST="localhost" POSTGRES_DB_NAME="synapse_full_schema.$$" SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite" @@ -32,7 +32,7 @@ usage() { while getopts "p:co:h" opt; do case $opt in p) - POSTGRES_USERNAME=$OPTARG + export PGUSER=$OPTARG ;; c) # Print all commands that are being executed @@ -69,7 +69,7 @@ if [ ${#unsatisfied_requirements} -ne 0 ]; then exit 1 fi -if [ -z "$POSTGRES_USERNAME" ]; then +if [ -z "$PGUSER" ]; then echo "No postgres username supplied" usage exit 1 @@ -84,8 +84,9 @@ fi # Create the output directory if it doesn't exist mkdir -p "$OUTPUT_DIR" -read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD +read -rsp "Postgres password for '$PGUSER': " PGPASSWORD echo "" +export PGPASSWORD # Exit immediately if a command fails set -e @@ -131,9 +132,9 @@ report_stats: false database: name: "psycopg2" args: - user: "$POSTGRES_USERNAME" - host: "$POSTGRES_HOST" - password: "$POSTGRES_PASSWORD" + user: "$PGUSER" + host: "$PGHOST" + password: "$PGPASSWORD" database: "$POSTGRES_DB_NAME" # Suppress the key server warning. @@ -150,7 +151,7 @@ scripts-dev/update_database --database-config "$SQLITE_CONFIG" # Create the PostgreSQL database. echo "Creating postgres database..." -createdb $POSTGRES_DB_NAME +createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME" echo "Copying data from SQLite3 to Postgres with synapse_port_db..." if [ -z "$COVERAGE" ]; then @@ -181,7 +182,7 @@ DROP TABLE user_directory_search_docsize; DROP TABLE user_directory_search_stat; " sqlite3 "$SQLITE_DB" <<< "$SQL" -psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL" +psql "$POSTGRES_DB_NAME" -w <<< "$SQL" echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..." sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE" From 6c847785494473c0a430f368a0d79c9202b8ddd0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 May 2021 14:54:09 +0100 Subject: [PATCH 22/40] Always cache 'event_to_prev_state_group' (#9950) Fixes regression in send PDU times introduced in #9905. --- changelog.d/9950.feature | 1 + synapse/handlers/message.py | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/9950.feature diff --git a/changelog.d/9950.feature b/changelog.d/9950.feature new file mode 100644 index 000000000..96a0e7f09 --- /dev/null +++ b/changelog.d/9950.feature @@ -0,0 +1 @@ +Improve performance of sending events for worker-based deployments using Redis. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8729332d4..5afb7fc26 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1050,6 +1050,13 @@ async def cache_joined_hosts_for_event( ) if state_entry.state_group: + await self._external_cache.set( + "event_to_prev_state_group", + event.event_id, + state_entry.state_group, + expiry_ms=60 * 60 * 1000, + ) + if state_entry.state_group in self._external_cache_joined_hosts_updates: return @@ -1057,12 +1064,6 @@ async def cache_joined_hosts_for_event( # Note that the expiry times must be larger than the expiry time in # _external_cache_joined_hosts_updates. - await self._external_cache.set( - "event_to_prev_state_group", - event.event_id, - state_entry.state_group, - expiry_ms=60 * 60 * 1000, - ) await self._external_cache.set( "get_joined_hosts", str(state_entry.state_group), From 51065c44bb0875373fada2838e69e4bc5005a95d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 10 May 2021 13:02:55 +0100 Subject: [PATCH 23/40] Fix port_db on empty db (#9930) ... and test it. --- .buildkite/scripts/create_postgres_db.py | 36 ---------------------- .buildkite/scripts/postgres_exec.py | 31 +++++++++++++++++++ .buildkite/scripts/test_synapse_port_db.sh | 35 +++++++++++++++------ .github/workflows/tests.yml | 2 +- changelog.d/9930.bugfix | 1 + scripts/synapse_port_db | 18 ++++++----- 6 files changed, 69 insertions(+), 54 deletions(-) delete mode 100755 .buildkite/scripts/create_postgres_db.py create mode 100755 .buildkite/scripts/postgres_exec.py create mode 100644 changelog.d/9930.bugfix diff --git a/.buildkite/scripts/create_postgres_db.py b/.buildkite/scripts/create_postgres_db.py deleted file mode 100755 index cc829db21..000000000 --- a/.buildkite/scripts/create_postgres_db.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from synapse.storage.engines import create_engine - -logger = logging.getLogger("create_postgres_db") - -if __name__ == "__main__": - # Create a PostgresEngine. - db_engine = create_engine({"name": "psycopg2", "args": {}}) - - # Connect to postgres to create the base database. - # We use "postgres" as a database because it's bound to exist and the "synapse" one - # doesn't exist yet. - db_conn = db_engine.module.connect( - user="postgres", host="postgres", password="postgres", dbname="postgres" - ) - db_conn.autocommit = True - cur = db_conn.cursor() - cur.execute("CREATE DATABASE synapse;") - cur.close() - db_conn.close() diff --git a/.buildkite/scripts/postgres_exec.py b/.buildkite/scripts/postgres_exec.py new file mode 100755 index 000000000..086b39172 --- /dev/null +++ b/.buildkite/scripts/postgres_exec.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import psycopg2 + +# a very simple replacment for `psql`, to make up for the lack of the postgres client +# libraries in the synapse docker image. + +# We use "postgres" as a database because it's bound to exist and the "synapse" one +# doesn't exist yet. +db_conn = psycopg2.connect( + user="postgres", host="postgres", password="postgres", dbname="postgres" +) +db_conn.autocommit = True +cur = db_conn.cursor() +for c in sys.argv[1:]: + cur.execute(c) diff --git a/.buildkite/scripts/test_synapse_port_db.sh b/.buildkite/scripts/test_synapse_port_db.sh index 8914319e3..a7e245476 100755 --- a/.buildkite/scripts/test_synapse_port_db.sh +++ b/.buildkite/scripts/test_synapse_port_db.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash # -# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along -# with additional dependencies needed for the test (such as coverage or the PostgreSQL -# driver), update the schema of the test SQLite database and run background updates on it, -# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to -# test porting the SQLite database to the PostgreSQL database (with coverage). +# Test script for 'synapse_port_db'. +# - sets up synapse and deps +# - runs the port script on a prepopulated test sqlite db +# - also runs it against an new sqlite db + set -xe cd `dirname $0`/../.. @@ -22,15 +22,32 @@ echo "--- Generate the signing key" # Generate the server's signing key. python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml -echo "--- Prepare the databases" +echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml # Create the PostgreSQL database. -./.buildkite/scripts/create_postgres_db.py +./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse" + +echo "+++ Run synapse_port_db against test database" +coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml + +##### + +# Now do the same again, on an empty database. + +echo "--- Prepare empty SQLite database" + +# we do this by deleting the sqlite db, and then doing the same again. +rm .buildkite/test_db.db + +scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml -echo "+++ Run synapse_port_db" +# re-create the PostgreSQL database. +./.buildkite/scripts/postgres_exec.py \ + "DROP DATABASE synapse" \ + "CREATE DATABASE synapse" -# Run the script +echo "+++ Run synapse_port_db against empty database" coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 12c82ac62..e7f3be1b4 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -273,7 +273,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Patch Buildkite-specific test scripts run: | - sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/create_postgres_db.py + sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc diff --git a/changelog.d/9930.bugfix b/changelog.d/9930.bugfix new file mode 100644 index 000000000..9b22ed445 --- /dev/null +++ b/changelog.d/9930.bugfix @@ -0,0 +1 @@ +Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index f0c93d522..5fb5bb35f 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -913,10 +913,11 @@ class Porter(object): (curr_forward_id + 1,), ) - txn.execute( - "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", - (curr_backward_id + 1,), - ) + if curr_backward_id: + txn.execute( + "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", + (curr_backward_id + 1,), + ) await self.postgres_store.db_pool.runInteraction( "_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos, @@ -954,10 +955,11 @@ class Porter(object): (curr_chain_id,), ) - await self.postgres_store.db_pool.runInteraction( - "_setup_event_auth_chain_id", r, - ) - + if curr_chain_id is not None: + await self.postgres_store.db_pool.runInteraction( + "_setup_event_auth_chain_id", + r, + ) ############################################## From 2b2985b5cfc4267dfb0f6b900e1a0f69f3f2cdc5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 May 2021 13:29:02 +0100 Subject: [PATCH 24/40] Improve performance of backfilling in large rooms. (#9935) We were pulling the full auth chain for the room out of the DB each time we backfilled, which can be *huge* for large rooms and is totally unnecessary. --- changelog.d/9935.feature | 1 + synapse/handlers/federation.py | 123 +++++++++++++++------------------ 2 files changed, 55 insertions(+), 69 deletions(-) create mode 100644 changelog.d/9935.feature diff --git a/changelog.d/9935.feature b/changelog.d/9935.feature new file mode 100644 index 000000000..eeda5bf50 --- /dev/null +++ b/changelog.d/9935.feature @@ -0,0 +1 @@ +Improve performance of backfilling in large rooms. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e8330a2b5..798ed75b3 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -552,8 +552,12 @@ async def _get_state_for_room( destination: str, room_id: str, event_id: str, - ) -> Tuple[List[EventBase], List[EventBase]]: - """Requests all of the room state at a given event from a remote homeserver. + ) -> List[EventBase]: + """Requests all of the room state at a given event from a remote + homeserver. + + Will also fetch any missing events reported in the `auth_chain_ids` + section of `/state_ids`. Args: destination: The remote homeserver to query for the state. @@ -561,8 +565,7 @@ async def _get_state_for_room( event_id: The id of the event we want the state at. Returns: - A list of events in the state, not including the event itself, and - a list of events in the auth chain for the given event. + A list of events in the state, not including the event itself. """ ( state_event_ids, @@ -571,68 +574,53 @@ async def _get_state_for_room( destination, room_id, event_id=event_id ) - desired_events = set(state_event_ids + auth_event_ids) - - event_map = await self._get_events_from_store_or_dest( - destination, room_id, desired_events - ) + # Fetch the state events from the DB, and check we have the auth events. + event_map = await self.store.get_events(state_event_ids, allow_rejected=True) + auth_events_in_store = await self.store.have_seen_events(auth_event_ids) - failed_to_fetch = desired_events - event_map.keys() - if failed_to_fetch: - logger.warning( - "Failed to fetch missing state/auth events for %s %s", - event_id, - failed_to_fetch, + # Check for missing events. We handle state and auth event seperately, + # as we want to pull the state from the DB, but we don't for the auth + # events. (Note: we likely won't use the majority of the auth chain, and + # it can be *huge* for large rooms, so it's worth ensuring that we don't + # unnecessarily pull it from the DB). + missing_state_events = set(state_event_ids) - set(event_map) + missing_auth_events = set(auth_event_ids) - set(auth_events_in_store) + if missing_state_events or missing_auth_events: + await self._get_events_and_persist( + destination=destination, + room_id=room_id, + events=missing_state_events | missing_auth_events, ) - remote_state = [ - event_map[e_id] for e_id in state_event_ids if e_id in event_map - ] - - auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] - auth_chain.sort(key=lambda e: e.depth) - - return remote_state, auth_chain - - async def _get_events_from_store_or_dest( - self, destination: str, room_id: str, event_ids: Iterable[str] - ) -> Dict[str, EventBase]: - """Fetch events from a remote destination, checking if we already have them. - - Persists any events we don't already have as outliers. - - If we fail to fetch any of the events, a warning will be logged, and the event - will be omitted from the result. Likewise, any events which turn out not to - be in the given room. + if missing_state_events: + new_events = await self.store.get_events( + missing_state_events, allow_rejected=True + ) + event_map.update(new_events) - This function *does not* automatically get missing auth events of the - newly fetched events. Callers must include the full auth chain of - of the missing events in the `event_ids` argument, to ensure that any - missing auth events are correctly fetched. + missing_state_events.difference_update(new_events) - Returns: - map from event_id to event - """ - fetched_events = await self.store.get_events(event_ids, allow_rejected=True) - - missing_events = set(event_ids) - fetched_events.keys() + if missing_state_events: + logger.warning( + "Failed to fetch missing state events for %s %s", + event_id, + missing_state_events, + ) - if missing_events: - logger.debug( - "Fetching unknown state/auth events %s for room %s", - missing_events, - room_id, - ) + if missing_auth_events: + auth_events_in_store = await self.store.have_seen_events( + missing_auth_events + ) + missing_auth_events.difference_update(auth_events_in_store) - await self._get_events_and_persist( - destination=destination, room_id=room_id, events=missing_events - ) + if missing_auth_events: + logger.warning( + "Failed to fetch missing auth events for %s %s", + event_id, + missing_auth_events, + ) - # we need to make sure we re-load from the database to get the rejected - # state correct. - fetched_events.update( - (await self.store.get_events(missing_events, allow_rejected=True)) - ) + remote_state = list(event_map.values()) # check for events which were in the wrong room. # @@ -640,8 +628,8 @@ async def _get_events_from_store_or_dest( # auth_events at an event in room A are actually events in room B bad_events = [ - (event_id, event.room_id) - for event_id, event in fetched_events.items() + (event.event_id, event.room_id) + for event in remote_state if event.room_id != room_id ] @@ -658,9 +646,10 @@ async def _get_events_from_store_or_dest( room_id, ) - del fetched_events[bad_event_id] + if bad_events: + remote_state = [e for e in remote_state if e.room_id == room_id] - return fetched_events + return remote_state async def _get_state_after_missing_prev_event( self, @@ -963,27 +952,23 @@ async def backfill( # For each edge get the current state. - auth_events = {} state_events = {} events_to_state = {} for e_id in edges: - state, auth = await self._get_state_for_room( + state = await self._get_state_for_room( destination=dest, room_id=room_id, event_id=e_id, ) - auth_events.update({a.event_id: a for a in auth}) - auth_events.update({s.event_id: s for s in state}) state_events.update({s.event_id: s for s in state}) events_to_state[e_id] = state required_auth = { a_id - for event in events - + list(state_events.values()) - + list(auth_events.values()) + for event in events + list(state_events.values()) for a_id in event.auth_event_ids() } + auth_events = await self.store.get_events(required_auth, allow_rejected=True) auth_events.update( {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map} ) From 7967b36efe6a033f46cd882d0b31a8c3eb18631c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 May 2021 11:02:56 +0100 Subject: [PATCH 25/40] Fix `m.room_key_request` to-device messages (#9961) fixes #9960 --- changelog.d/9961.bugfix | 1 + synapse/api/constants.py | 5 +++- synapse/federation/federation_server.py | 19 -------------- synapse/handlers/devicemessage.py | 33 ++++++++++++++++++++----- 4 files changed, 32 insertions(+), 26 deletions(-) create mode 100644 changelog.d/9961.bugfix diff --git a/changelog.d/9961.bugfix b/changelog.d/9961.bugfix new file mode 100644 index 000000000..e26d141a5 --- /dev/null +++ b/changelog.d/9961.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index ab628b2be..3940da5c8 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -116,9 +116,12 @@ class EventTypes: MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent" +class ToDeviceEventTypes: + RoomKeyRequest = "m.room_key_request" + + class EduTypes: Presence = "m.presence" - RoomKeyRequest = "m.room_key_request" class RejectedReason: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index b729a6920..ace30aa45 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -44,7 +44,6 @@ SynapseError, UnsupportedRoomVersionError, ) -from synapse.api.ratelimiting import Ratelimiter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.federation.federation_base import FederationBase, event_from_pdu_json @@ -865,14 +864,6 @@ def __init__(self, hs: "HomeServer"): # EDU received. self._edu_type_to_instance = {} # type: Dict[str, List[str]] - # A rate limiter for incoming room key requests per origin. - self._room_key_request_rate_limiter = Ratelimiter( - store=hs.get_datastore(), - clock=self.clock, - rate_hz=self.config.rc_key_requests.per_second, - burst_count=self.config.rc_key_requests.burst_count, - ) - def register_edu_handler( self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]] ) -> None: @@ -926,16 +917,6 @@ async def on_edu(self, edu_type: str, origin: str, content: dict) -> None: if not self.config.use_presence and edu_type == EduTypes.Presence: return - # If the incoming room key requests from a particular origin are over - # the limit, drop them. - if ( - edu_type == EduTypes.RoomKeyRequest - and not await self._room_key_request_rate_limiter.can_do_action( - None, origin - ) - ): - return - # Check if we have a handler on this instance handler = self.edu_handlers.get(edu_type) if handler: diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index c5d631de0..580b94159 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -15,7 +15,7 @@ import logging from typing import TYPE_CHECKING, Any, Dict -from synapse.api.constants import EduTypes +from synapse.api.constants import ToDeviceEventTypes from synapse.api.errors import SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.logging.context import run_in_background @@ -79,6 +79,8 @@ def __init__(self, hs: "HomeServer"): ReplicationUserDevicesResyncRestServlet.make_client(hs) ) + # a rate limiter for room key requests. The keys are + # (sending_user_id, sending_device_id). self._ratelimiter = Ratelimiter( store=self.store, clock=hs.get_clock(), @@ -100,12 +102,25 @@ async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None: for user_id, by_device in content["messages"].items(): # we use UserID.from_string to catch invalid user ids if not self.is_mine(UserID.from_string(user_id)): - logger.warning("Request for keys for non-local user %s", user_id) + logger.warning("To-device message to non-local user %s", user_id) raise SynapseError(400, "Not a user here") if not by_device: continue + # Ratelimit key requests by the sending user. + if message_type == ToDeviceEventTypes.RoomKeyRequest: + allowed, _ = await self._ratelimiter.can_do_action( + None, (sender_user_id, None) + ) + if not allowed: + logger.info( + "Dropping room_key_request from %s to %s due to rate limit", + sender_user_id, + user_id, + ) + continue + messages_by_device = { device_id: { "content": message_content, @@ -192,13 +207,19 @@ async def send_device_message( for user_id, by_device in messages.items(): # Ratelimit local cross-user key requests by the sending device. if ( - message_type == EduTypes.RoomKeyRequest + message_type == ToDeviceEventTypes.RoomKeyRequest and user_id != sender_user_id - and await self._ratelimiter.can_do_action( + ): + allowed, _ = await self._ratelimiter.can_do_action( requester, (sender_user_id, requester.device_id) ) - ): - continue + if not allowed: + logger.info( + "Dropping room_key_request from %s to %s due to rate limit", + sender_user_id, + user_id, + ) + continue # we use UserID.from_string to catch invalid user ids if self.is_mine(UserID.from_string(user_id)): From b378d98c8f93412bc51f0d4b9c4aa2d1701cf523 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 May 2021 11:04:03 +0100 Subject: [PATCH 26/40] Add debug logging for issue #9533 (#9959) Hopefully this will help us track down where to-device messages are getting lost/delayed. --- changelog.d/9959.misc | 1 + .../federation/sender/per_destination_queue.py | 9 +++++++++ synapse/logging/__init__.py | 7 ++++++- synapse/notifier.py | 8 ++++++++ synapse/replication/tcp/client.py | 1 - synapse/storage/databases/main/deviceinbox.py | 18 ++++++++++++++++++ 6 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9959.misc diff --git a/changelog.d/9959.misc b/changelog.d/9959.misc new file mode 100644 index 000000000..7231f29d7 --- /dev/null +++ b/changelog.d/9959.misc @@ -0,0 +1 @@ +Add debug logging for lost/delayed to-device messages. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 3b053ebcf..3a2efd56e 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -28,6 +28,7 @@ from synapse.events import EventBase from synapse.federation.units import Edu from synapse.handlers.presence import format_user_presence_state +from synapse.logging import issue9533_logger from synapse.logging.opentracing import SynapseTags, set_tag from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process @@ -574,6 +575,14 @@ async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int] for content in contents ] + if edus: + issue9533_logger.debug( + "Sending %i to-device messages to %s, up to stream id %i", + len(edus), + self._destination, + stream_id, + ) + return (edus, stream_id) def _start_catching_up(self) -> None: diff --git a/synapse/logging/__init__.py b/synapse/logging/__init__.py index e00969f8b..b50a4f95e 100644 --- a/synapse/logging/__init__.py +++ b/synapse/logging/__init__.py @@ -12,8 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -# These are imported to allow for nicer logging configuration files. +import logging + from synapse.logging._remote import RemoteHandler from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter +# These are imported to allow for nicer logging configuration files. __all__ = ["RemoteHandler", "JsonFormatter", "TerseJsonFormatter"] + +# Debug logger for https://github.com/matrix-org/synapse/issues/9533 etc +issue9533_logger = logging.getLogger("synapse.9533_debug") diff --git a/synapse/notifier.py b/synapse/notifier.py index b9531007e..24b4e6649 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -38,6 +38,7 @@ from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state +from synapse.logging import issue9533_logger from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils import log_function @@ -426,6 +427,13 @@ def on_new_event( for room in rooms: user_streams |= self.room_to_user_streams.get(room, set()) + if stream_key == "to_device_key": + issue9533_logger.debug( + "to-device messages stream id %s, awaking streams for %s", + new_token, + users, + ) + time_now_ms = self.clock.time_msec() for user_stream in user_streams: try: diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 4f3c6a18b..62d780917 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -51,7 +51,6 @@ logger = logging.getLogger(__name__) - # How long we allow callers to wait for replication updates before timing out. _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 30 diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 7c9d1f744..50e7ddd73 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -15,6 +15,7 @@ import logging from typing import List, Optional, Tuple +from synapse.logging import issue9533_logger from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.replication.tcp.streams import ToDeviceStream from synapse.storage._base import SQLBaseStore, db_to_json @@ -404,6 +405,13 @@ def add_messages_txn(txn, now_ms, stream_id): ], ) + if remote_messages_by_destination: + issue9533_logger.debug( + "Queued outgoing to-device messages with stream_id %i for %s", + stream_id, + list(remote_messages_by_destination.keys()), + ) + async with self._device_inbox_id_gen.get_next() as stream_id: now_ms = self.clock.time_msec() await self.db_pool.runInteraction( @@ -533,6 +541,16 @@ def _add_messages_to_local_device_inbox_txn( ], ) + issue9533_logger.debug( + "Stored to-device messages with stream_id %i for %s", + stream_id, + [ + (user_id, device_id) + for (user_id, messages_by_device) in local_by_user_then_device.items() + for device_id in messages_by_device.keys() + ], + ) + class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" From dc6366a9bd370a0f772f376a2053c0ce48cb6607 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 11 May 2021 08:03:23 -0500 Subject: [PATCH 27/40] Add config option to hide device names over federation (#9945) Now that cross signing exists there is much less of a need for other people to look at devices and verify them individually. This PR adds a config option to allow you to prevent device display names from being shared with other servers. Signed-off-by: Aaron Raimist --- changelog.d/9945.feature | 1 + docs/sample_config.yaml | 6 ++++++ synapse/config/federation.py | 10 ++++++++++ synapse/storage/databases/main/end_to_end_keys.py | 4 +++- 4 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9945.feature diff --git a/changelog.d/9945.feature b/changelog.d/9945.feature new file mode 100644 index 000000000..84308e8cc --- /dev/null +++ b/changelog.d/9945.feature @@ -0,0 +1 @@ +Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index f469d6e54..7cf222d35 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -741,6 +741,12 @@ acme: # #allow_profile_lookup_over_federation: false +# Uncomment to disable device display name lookup over federation. By default, the +# Federation API allows other homeservers to obtain device display names of any user +# on this homeserver. Defaults to 'true'. +# +#allow_device_name_lookup_over_federation: false + ## Caching ## diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 090ba047f..cdd7a1ef0 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -44,6 +44,10 @@ def read_config(self, config, **kwargs): "allow_profile_lookup_over_federation", True ) + self.allow_device_name_lookup_over_federation = config.get( + "allow_device_name_lookup_over_federation", True + ) + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ ## Federation ## @@ -75,6 +79,12 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # on this homeserver. Defaults to 'true'. # #allow_profile_lookup_over_federation: false + + # Uncomment to disable device display name lookup over federation. By default, the + # Federation API allows other homeservers to obtain device display names of any user + # on this homeserver. Defaults to 'true'. + # + #allow_device_name_lookup_over_federation: false """ diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 88afe97c4..398d6b6ac 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -84,7 +84,9 @@ async def get_e2e_device_keys_for_federation_query( if keys: result["keys"] = keys - device_display_name = device.display_name + device_display_name = None + if self.hs.config.allow_device_name_lookup_over_federation: + device_display_name = device.display_name if device_display_name: result["device_display_name"] = device_display_name From 28c68411028e15858819f2a5896313ce0e71c25b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 11 May 2021 10:58:58 -0400 Subject: [PATCH 28/40] Send the `m.room.create` stripped event with invites (support MSC1772). (#9966) MSC1772 specifies the m.room.create event should be sent as part of the invite_state. This was done optionally behind an experimental flag, but is now done by default due to MSC1772 being approved. --- UPGRADE.rst | 29 +++++++++++++++++++++++++++++ changelog.d/9915.feature | 2 +- changelog.d/9966.feature | 1 + docs/sample_config.yaml | 1 + synapse/config/api.py | 6 ++---- 5 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9966.feature diff --git a/UPGRADE.rst b/UPGRADE.rst index e921e0c08..606e357b6 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -85,6 +85,35 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.34.0 +==================== + +`room_invite_state_types` configuration setting +----------------------------------------------- + +The ``room_invite_state_types`` configuration setting has been deprecated and +replaced with ``room_prejoin_state``. See the `sample configuration file `_. + +If you have set ``room_invite_state_types`` to the default value you should simply +remove it from your configuration file. The default value used to be: + +.. code:: yaml + + room_invite_state_types: + - "m.room.join_rules" + - "m.room.canonical_alias" + - "m.room.avatar" + - "m.room.encryption" + - "m.room.name" + +If you have customised this value by adding addition state types, you should +remove ``room_invite_state_types`` and configure ``additional_event_types`` with +your customisations. + +If you have customised this value by removing state types, you should rename +``room_invite_state_types`` to ``additional_event_types``, and set +``disable_default_event_types`` to ``true``. + Upgrading to v1.33.0 ==================== diff --git a/changelog.d/9915.feature b/changelog.d/9915.feature index 832916cb0..7b81faabe 100644 --- a/changelog.d/9915.feature +++ b/changelog.d/9915.feature @@ -1 +1 @@ -Support stable identifiers from [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772). +Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. \ No newline at end of file diff --git a/changelog.d/9966.feature b/changelog.d/9966.feature new file mode 100644 index 000000000..7b81faabe --- /dev/null +++ b/changelog.d/9966.feature @@ -0,0 +1 @@ +Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7cf222d35..67ad57b1a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1521,6 +1521,7 @@ room_prejoin_state: # - m.room.avatar # - m.room.encryption # - m.room.name + # - m.room.create # # Uncomment the following to disable these defaults (so that only the event # types listed in 'additional_event_types' are shared). Defaults to 'false'. diff --git a/synapse/config/api.py b/synapse/config/api.py index 55c038c0c..b18044f98 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -88,10 +88,6 @@ def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: if not room_prejoin_state_config.get("disable_default_event_types"): yield from _DEFAULT_PREJOIN_STATE_TYPES - if self.spaces_enabled: - # MSC1772 suggests adding m.room.create to the prejoin state - yield EventTypes.Create - yield from room_prejoin_state_config.get("additional_event_types", []) @@ -109,6 +105,8 @@ def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: EventTypes.RoomAvatar, EventTypes.RoomEncryption, EventTypes.Name, + # Per MSC1772. + EventTypes.Create, ] From f4833e0c062e189fcfd8186fc197d1fc1052814a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 11 May 2021 12:21:43 -0400 Subject: [PATCH 29/40] Support fetching the spaces summary via GET over federation. (#9947) Per changes in MSC2946, the C-S and S-S APIs for spaces summary should use GET requests. Until this is stable, the POST endpoints still exist. This does not switch federation requests to use the GET version yet since it is newly added and already deployed servers might not support it. When switching to the stable endpoint we should switch to GET requests. --- changelog.d/9947.feature | 1 + synapse/federation/transport/client.py | 1 + synapse/federation/transport/server.py | 26 ++++++++++++++++++++++++++ synapse/rest/client/v1/room.py | 1 + 4 files changed, 29 insertions(+) create mode 100644 changelog.d/9947.feature diff --git a/changelog.d/9947.feature b/changelog.d/9947.feature new file mode 100644 index 000000000..ce8874f81 --- /dev/null +++ b/changelog.d/9947.feature @@ -0,0 +1 @@ +Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index ada322a81..497848a2b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -995,6 +995,7 @@ async def get_space_summary( returned per space exclude_rooms: a list of any rooms we can skip """ + # TODO When switching to the stable endpoint, use GET instead of POST. path = _create_path( FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/spaces/%s", room_id ) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a3759bdda..e1b746247 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1376,6 +1376,32 @@ class FederationSpaceSummaryServlet(BaseFederationServlet): PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946" PATH = "/spaces/(?P[^/]*)" + async def on_GET( + self, + origin: str, + content: JsonDict, + query: Mapping[bytes, Sequence[bytes]], + room_id: str, + ) -> Tuple[int, JsonDict]: + suggested_only = parse_boolean_from_args(query, "suggested_only", default=False) + max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space") + + exclude_rooms = [] + if b"exclude_rooms" in query: + try: + exclude_rooms = [ + room_id.decode("ascii") for room_id in query[b"exclude_rooms"] + ] + except Exception: + raise SynapseError( + 400, "Bad query parameter for exclude_rooms", Codes.INVALID_PARAM + ) + + return 200, await self.handler.federation_space_summary( + room_id, suggested_only, max_rooms_per_space, exclude_rooms + ) + + # TODO When switching to the stable endpoint, remove the POST handler. async def on_POST( self, origin: str, diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 5cab4d3c7..51813cccb 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -1020,6 +1020,7 @@ async def on_GET( max_rooms_per_space=parse_integer(request, "max_rooms_per_space"), ) + # TODO When switching to the stable endpoint, remove the POST handler. async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: From 27c375f812725bead5fa41a55aacf316f6e2376c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 11 May 2021 12:57:39 -0400 Subject: [PATCH 30/40] Sort child events according to MSC1772 for the spaces summary API. (#9954) This should help ensure that equivalent results are achieved between homeservers querying for the summary of a space. This implements modified MSC1772 rules, according to MSC2946. The different is that the origin_server_ts of the m.room.create event is not used as a tie-breaker since this might not be known if the homeserver is not part of the room. --- changelog.d/9954.feature | 1 + synapse/handlers/space_summary.py | 71 +++++++++++++++++++++++- tests/handlers/test_space_summary.py | 81 ++++++++++++++++++++++++++++ 3 files changed, 151 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9954.feature create mode 100644 tests/handlers/test_space_summary.py diff --git a/changelog.d/9954.feature b/changelog.d/9954.feature new file mode 100644 index 000000000..ce8874f81 --- /dev/null +++ b/changelog.d/9954.feature @@ -0,0 +1 @@ +Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index 2e997841f..e35d91832 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -14,6 +14,7 @@ import itertools import logging +import re from collections import deque from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple, cast @@ -226,6 +227,23 @@ async def _summarize_local_room( suggested_only: bool, max_children: Optional[int], ) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]: + """ + Generate a room entry and a list of event entries for a given room. + + Args: + requester: The requesting user, or None if this is over federation. + room_id: The room ID to summarize. + suggested_only: True if only suggested children should be returned. + Otherwise, all children are returned. + max_children: The maximum number of children to return for this node. + + Returns: + A tuple of: + An iterable of a single value of the room. + + An iterable of the sorted children events. This may be limited + to a maximum size or may include all children. + """ if not await self._is_room_accessible(room_id, requester): return (), () @@ -357,6 +375,18 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: return room_entry async def _get_child_events(self, room_id: str) -> Iterable[EventBase]: + """ + Get the child events for a given room. + + The returned results are sorted for stability. + + Args: + room_id: The room id to get the children of. + + Returns: + An iterable of sorted child events. + """ + # look for child rooms/spaces. current_state_ids = await self._store.get_current_state_ids(room_id) @@ -370,8 +400,9 @@ async def _get_child_events(self, room_id: str) -> Iterable[EventBase]: ] ) - # filter out any events without a "via" (which implies it has been redacted) - return (e for e in events if _has_valid_via(e)) + # filter out any events without a "via" (which implies it has been redacted), + # and order to ensure we return stable results. + return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key) @attr.s(frozen=True, slots=True) @@ -397,3 +428,39 @@ def _is_suggested_child_event(edge_event: EventBase) -> bool: return True logger.debug("Ignorning not-suggested child %s", edge_event.state_key) return False + + +# Order may only contain characters in the range of \x20 (space) to \x7F (~). +_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7F]") + + +def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]: + """ + Generate a value for comparing two child events for ordering. + + The rules for ordering are supposed to be: + + 1. The 'order' key, if it is valid. + 2. The 'origin_server_ts' of the 'm.room.create' event. + 3. The 'room_id'. + + But we skip step 2 since we may not have any state from the room. + + Args: + child: The event for generating a comparison key. + + Returns: + The comparison key as a tuple of: + False if the ordering is valid. + The ordering field. + The room ID. + """ + order = child.content.get("order") + # If order is not a string or doesn't meet the requirements, ignore it. + if not isinstance(order, str): + order = None + elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order): + order = None + + # Items without an order come last. + return (order is None, order, child.room_id) diff --git a/tests/handlers/test_space_summary.py b/tests/handlers/test_space_summary.py new file mode 100644 index 000000000..2c5e81531 --- /dev/null +++ b/tests/handlers/test_space_summary.py @@ -0,0 +1,81 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional +from unittest import mock + +from synapse.handlers.space_summary import _child_events_comparison_key + +from tests import unittest + + +def _create_event(room_id: str, order: Optional[Any] = None): + result = mock.Mock() + result.room_id = room_id + result.content = {} + if order is not None: + result.content["order"] = order + return result + + +def _order(*events): + return sorted(events, key=_child_events_comparison_key) + + +class TestSpaceSummarySort(unittest.TestCase): + def test_no_order_last(self): + """An event with no ordering is placed behind those with an ordering.""" + ev1 = _create_event("!abc:test") + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_order(self): + """The ordering should be used.""" + ev1 = _create_event("!abc:test", "xyz") + ev2 = _create_event("!xyz:test", "abc") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_order_room_id(self): + """Room ID is a tie-breaker for ordering.""" + ev1 = _create_event("!abc:test", "abc") + ev2 = _create_event("!xyz:test", "abc") + + self.assertEqual([ev1, ev2], _order(ev1, ev2)) + + def test_invalid_ordering_type(self): + """Invalid orderings are considered the same as missing.""" + ev1 = _create_event("!abc:test", 1) + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", {}) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", []) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", True) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_invalid_ordering_value(self): + """Invalid orderings are considered the same as missing.""" + ev1 = _create_event("!abc:test", "foo\n") + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", "a" * 51) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) From 63fb220e5f90b63acfa4bdfb61788f2c2d867c86 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 May 2021 18:01:11 +0100 Subject: [PATCH 31/40] Tests for to-device messages (#9965) --- changelog.d/9965.bugfix | 1 + .../rest/client/v2_alpha/test_sendtodevice.py | 201 ++++++++++++++++++ 2 files changed, 202 insertions(+) create mode 100644 changelog.d/9965.bugfix create mode 100644 tests/rest/client/v2_alpha/test_sendtodevice.py diff --git a/changelog.d/9965.bugfix b/changelog.d/9965.bugfix new file mode 100644 index 000000000..e26d141a5 --- /dev/null +++ b/changelog.d/9965.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. diff --git a/tests/rest/client/v2_alpha/test_sendtodevice.py b/tests/rest/client/v2_alpha/test_sendtodevice.py new file mode 100644 index 000000000..c9c99cc5d --- /dev/null +++ b/tests/rest/client/v2_alpha/test_sendtodevice.py @@ -0,0 +1,201 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest import admin +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import sendtodevice, sync + +from tests.unittest import HomeserverTestCase, override_config + + +class SendToDeviceTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + sendtodevice.register_servlets, + sync.register_servlets, + ] + + def test_user_to_user(self): + """A to-device message from one user to another should get delivered""" + + user1 = self.register_user("u1", "pass") + user1_tok = self.login("u1", "pass", "d1") + + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + # send the message + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user2: {"d2": test_msg}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # check it appears + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + expected_result = { + "events": [ + { + "sender": user1, + "type": "m.test", + "content": test_msg, + } + ] + } + self.assertEqual(channel.json_body["to_device"], expected_result) + + # it should re-appear if we do another sync + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body["to_device"], expected_result) + + # it should *not* appear if we do an incremental sync + sync_token = channel.json_body["next_batch"] + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body.get("to_device", {}).get("events", []), []) + + @override_config({"rc_key_requests": {"per_second": 10, "burst_count": 2}}) + def test_local_room_key_request(self): + """m.room_key_request has special-casing; test from local user""" + user1 = self.register_user("u1", "pass") + user1_tok = self.login("u1", "pass", "d1") + + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + # send three messages + for i in range(3): + chan = self.make_request( + "PUT", + f"/_matrix/client/r0/sendToDevice/m.room_key_request/{i}", + content={"messages": {user2: {"d2": {"idx": i}}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # now sync: we should get two of the three + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 2) + for i in range(2): + self.assertEqual( + msgs[i], + {"sender": user1, "type": "m.room_key_request", "content": {"idx": i}}, + ) + sync_token = channel.json_body["next_batch"] + + # ... time passes + self.reactor.advance(1) + + # and we can send more messages + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/3", + content={"messages": {user2: {"d2": {"idx": 3}}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # ... which should arrive + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 1) + self.assertEqual( + msgs[0], + {"sender": user1, "type": "m.room_key_request", "content": {"idx": 3}}, + ) + + @override_config({"rc_key_requests": {"per_second": 10, "burst_count": 2}}) + def test_remote_room_key_request(self): + """m.room_key_request has special-casing; test from remote user""" + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + federation_registry = self.hs.get_federation_registry() + + # send three messages + for i in range(3): + self.get_success( + federation_registry.on_edu( + "m.direct_to_device", + "remote_server", + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "messages": {user2: {"d2": {"idx": i}}}, + "message_id": f"{i}", + }, + ) + ) + + # now sync: we should get two of the three + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 2) + for i in range(2): + self.assertEqual( + msgs[i], + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "content": {"idx": i}, + }, + ) + sync_token = channel.json_body["next_batch"] + + # ... time passes + self.reactor.advance(1) + + # and we can send more messages + self.get_success( + federation_registry.on_edu( + "m.direct_to_device", + "remote_server", + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "messages": {user2: {"d2": {"idx": 3}}}, + "message_id": "3", + }, + ) + ) + + # ... which should arrive + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 1) + self.assertEqual( + msgs[0], + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "content": {"idx": 3}, + }, + ) From affaffb0abc3993501ec024e00c286da85e121e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 May 2021 13:17:11 +0100 Subject: [PATCH 32/40] Run cache_joined_hosts_for_event in background (#9951) --- changelog.d/9951.feature | 1 + synapse/handlers/message.py | 45 ++++++++++++++++++++++++++++++++----- 2 files changed, 41 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9951.feature diff --git a/changelog.d/9951.feature b/changelog.d/9951.feature new file mode 100644 index 000000000..96a0e7f09 --- /dev/null +++ b/changelog.d/9951.feature @@ -0,0 +1 @@ +Improve performance of sending events for worker-based deployments using Redis. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5afb7fc26..9f365eb5a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -19,6 +19,7 @@ from canonicaljson import encode_canonical_json +from twisted.internet import defer from twisted.internet.interfaces import IDelayedCall from synapse import event_auth @@ -43,14 +44,14 @@ from synapse.events.builder import EventBuilder from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator -from synapse.logging.context import run_in_background +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.state import StateFilter from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester -from synapse.util import json_decoder, json_encoder -from synapse.util.async_helpers import Linearizer +from synapse.util import json_decoder, json_encoder, log_failure +from synapse.util.async_helpers import Linearizer, unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client @@ -979,9 +980,43 @@ async def handle_new_client_event( logger.exception("Failed to encode content: %r", event.content) raise - await self.action_generator.handle_push_actions_for_event(event, context) + # We now persist the event (and update the cache in parallel, since we + # don't want to block on it). + result = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self._persist_event, + requester=requester, + event=event, + context=context, + ratelimit=ratelimit, + extra_users=extra_users, + ), + run_in_background( + self.cache_joined_hosts_for_event, event, context + ).addErrback(log_failure, "cache_joined_hosts_for_event failed"), + ], + consumeErrors=True, + ) + ).addErrback(unwrapFirstError) + + return result[0] + + async def _persist_event( + self, + requester: Requester, + event: EventBase, + context: EventContext, + ratelimit: bool = True, + extra_users: Optional[List[UserID]] = None, + ) -> EventBase: + """Actually persists the event. Should only be called by + `handle_new_client_event`, and see its docstring for documentation of + the arguments. + """ - await self.cache_joined_hosts_for_event(event, context) + await self.action_generator.handle_push_actions_for_event(event, context) try: # If we're a worker we need to hit out to the master. From 7562d887e159f404c8d752271310f4432f246656 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 12 May 2021 15:04:51 +0100 Subject: [PATCH 33/40] Change the format of access tokens away from macaroons (#5588) --- changelog.d/5588.misc | 1 + scripts-dev/dump_macaroon.py | 2 +- synapse/handlers/auth.py | 28 +++++++++++---- synapse/handlers/register.py | 4 +-- synapse/util/stringutils.py | 20 +++++++++++ tests/api/test_auth.py | 63 --------------------------------- tests/handlers/test_auth.py | 43 +++++++++++----------- tests/handlers/test_register.py | 12 +++---- tests/util/test_stringutils.py | 8 ++++- 9 files changed, 78 insertions(+), 103 deletions(-) create mode 100644 changelog.d/5588.misc diff --git a/changelog.d/5588.misc b/changelog.d/5588.misc new file mode 100644 index 000000000..b8f52a212 --- /dev/null +++ b/changelog.d/5588.misc @@ -0,0 +1 @@ +Reduce the length of Synapse's access tokens. diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index 980b5e709..0ca75d3fe 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python import sys diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 36f2450e2..8a6666a4a 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -17,6 +17,7 @@ import time import unicodedata import urllib.parse +from binascii import crc32 from typing import ( TYPE_CHECKING, Any, @@ -34,6 +35,7 @@ import attr import bcrypt import pymacaroons +import unpaddedbase64 from twisted.web.server import Request @@ -66,6 +68,7 @@ from synapse.util.async_helpers import maybe_awaitable from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.stringutils import base62_encode from synapse.util.threepids import canonicalise_email if TYPE_CHECKING: @@ -808,10 +811,12 @@ async def get_access_token_for_user_id( logger.info( "Logging in user %s as %s%s", user_id, puppets_user_id, fmt_expiry ) + target_user_id_obj = UserID.from_string(puppets_user_id) else: logger.info( "Logging in user %s on device %s%s", user_id, device_id, fmt_expiry ) + target_user_id_obj = UserID.from_string(user_id) if ( not is_appservice_ghost @@ -819,7 +824,7 @@ async def get_access_token_for_user_id( ): await self.auth.check_auth_blocking(user_id) - access_token = self.macaroon_gen.generate_access_token(user_id) + access_token = self.generate_access_token(target_user_id_obj) await self.store.add_access_token_to_user( user_id=user_id, token=access_token, @@ -1192,6 +1197,19 @@ async def _check_local_password(self, user_id: str, password: str) -> Optional[s return None return user_id + def generate_access_token(self, for_user: UserID) -> str: + """Generates an opaque string, for use as an access token""" + + # we use the following format for access tokens: + # syt___ + + b64local = unpaddedbase64.encode_base64(for_user.localpart.encode("utf-8")) + random_string = stringutils.random_string(20) + base = f"syt_{b64local}_{random_string}" + + crc = base62_encode(crc32(base.encode("ascii")), minwidth=6) + return f"{base}_{crc}" + async def validate_short_term_login_token( self, login_token: str ) -> LoginTokenAttributes: @@ -1585,10 +1603,7 @@ class MacaroonGenerator: hs = attr.ib() - def generate_access_token( - self, user_id: str, extra_caveats: Optional[List[str]] = None - ) -> str: - extra_caveats = extra_caveats or [] + def generate_guest_access_token(self, user_id: str) -> str: macaroon = self._generate_base_macaroon(user_id) macaroon.add_first_party_caveat("type = access") # Include a nonce, to make sure that each login gets a different @@ -1596,8 +1611,7 @@ def generate_access_token( macaroon.add_first_party_caveat( "nonce = %s" % (stringutils.random_string_with_symbols(16),) ) - for caveat in extra_caveats: - macaroon.add_first_party_caveat(caveat) + macaroon.add_first_party_caveat("guest = true") return macaroon.serialize() def generate_short_term_login_token( diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 007fb1284..4ceef3fab 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -722,9 +722,7 @@ class and RegisterDeviceReplicationServlet. ) if is_guest: assert valid_until_ms is None - access_token = self.macaroon_gen.generate_access_token( - user_id, ["guest = true"] - ) + access_token = self.macaroon_gen.generate_guest_access_token(user_id) else: access_token = await self._auth_handler.get_access_token_for_user_id( user_id, diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index cd82777f8..4f25cd1d2 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -220,3 +220,23 @@ def strtobool(val: str) -> bool: return False else: raise ValueError("invalid truth value %r" % (val,)) + + +_BASE62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + + +def base62_encode(num: int, minwidth: int = 1) -> str: + """Encode a number using base62 + + Args: + num: number to be encoded + minwidth: width to pad to, if the number is small + """ + res = "" + while num: + num, rem = divmod(num, 62) + res = _BASE62[rem] + res + + # pad to minimum width + pad = "0" * (minwidth - len(res)) + return pad + res diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index c0ed64f78..1b0a81575 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -21,13 +21,11 @@ from synapse.api.errors import ( AuthError, Codes, - InvalidClientCredentialsError, InvalidClientTokenError, MissingClientTokenError, ResourceLimitError, ) from synapse.storage.databases.main.registration import TokenLookupResult -from synapse.types import UserID from tests import unittest from tests.test_utils import simple_async_mock @@ -253,67 +251,6 @@ def test_get_guest_user_from_macaroon(self): self.assertTrue(user_info.is_guest) self.store.get_user_by_id.assert_called_with(user_id) - def test_cannot_use_regular_token_as_guest(self): - USER_ID = "@percy:matrix.org" - self.store.add_access_token_to_user = simple_async_mock(None) - self.store.get_device = simple_async_mock(None) - - token = self.get_success( - self.hs.get_auth_handler().get_access_token_for_user_id( - USER_ID, "DEVICE", valid_until_ms=None - ) - ) - self.store.add_access_token_to_user.assert_called_with( - user_id=USER_ID, - token=token, - device_id="DEVICE", - valid_until_ms=None, - puppets_user_id=None, - ) - - async def get_user(tok): - if token != tok: - return None - return TokenLookupResult( - user_id=USER_ID, - is_guest=False, - token_id=1234, - device_id="DEVICE", - ) - - self.store.get_user_by_access_token = get_user - self.store.get_user_by_id = simple_async_mock({"is_guest": False}) - - # check the token works - request = Mock(args={}) - request.args[b"access_token"] = [token.encode("ascii")] - request.requestHeaders.getRawHeaders = mock_getRawHeaders() - requester = self.get_success( - self.auth.get_user_by_req(request, allow_guest=True) - ) - self.assertEqual(UserID.from_string(USER_ID), requester.user) - self.assertFalse(requester.is_guest) - - # add an is_guest caveat - mac = pymacaroons.Macaroon.deserialize(token) - mac.add_first_party_caveat("guest = true") - guest_tok = mac.serialize() - - # the token should *not* work now - request = Mock(args={}) - request.args[b"access_token"] = [guest_tok.encode("ascii")] - request.requestHeaders.getRawHeaders = mock_getRawHeaders() - - cm = self.get_failure( - self.auth.get_user_by_req(request, allow_guest=True), - InvalidClientCredentialsError, - ) - - self.assertEqual(401, cm.value.code) - self.assertEqual("Guest access token used for regular user", cm.value.msg) - - self.store.get_user_by_id.assert_called_with(USER_ID) - def test_blocking_mau(self): self.auth_blocking._limit_usage_by_mau = False self.auth_blocking._max_mau_value = 50 diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index fe7e9484f..5f3350e49 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -16,12 +16,17 @@ import pymacaroons from synapse.api.errors import AuthError, ResourceLimitError +from synapse.rest import admin from tests import unittest from tests.test_utils import make_awaitable class AuthTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + ] + def prepare(self, reactor, clock, hs): self.auth_handler = hs.get_auth_handler() self.macaroon_generator = hs.get_macaroon_generator() @@ -35,16 +40,10 @@ def prepare(self, reactor, clock, hs): self.small_number_of_users = 1 self.large_number_of_users = 100 - def test_token_is_a_macaroon(self): - token = self.macaroon_generator.generate_access_token("some_user") - # Check that we can parse the thing with pymacaroons - macaroon = pymacaroons.Macaroon.deserialize(token) - # The most basic of sanity checks - if "some_user" not in macaroon.inspect(): - self.fail("some_user was not in %s" % macaroon.inspect()) + self.user1 = self.register_user("a_user", "pass") def test_macaroon_caveats(self): - token = self.macaroon_generator.generate_access_token("a_user") + token = self.macaroon_generator.generate_guest_access_token("a_user") macaroon = pymacaroons.Macaroon.deserialize(token) def verify_gen(caveat): @@ -59,19 +58,23 @@ def verify_type(caveat): def verify_nonce(caveat): return caveat.startswith("nonce =") + def verify_guest(caveat): + return caveat == "guest = true" + v = pymacaroons.Verifier() v.satisfy_general(verify_gen) v.satisfy_general(verify_user) v.satisfy_general(verify_type) v.satisfy_general(verify_nonce) + v.satisfy_general(verify_guest) v.verify(macaroon, self.hs.config.macaroon_secret_key) def test_short_term_login_token_gives_user_id(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", "", 5000 + self.user1, "", 5000 ) res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) self.assertEqual("", res.auth_provider_id) # when we advance the clock, the token should be rejected @@ -83,22 +86,22 @@ def test_short_term_login_token_gives_user_id(self): def test_short_term_login_token_gives_auth_provider(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", auth_provider_id="my_idp" + self.user1, auth_provider_id="my_idp" ) res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) self.assertEqual("my_idp", res.auth_provider_id) def test_short_term_login_token_cannot_replace_user_id(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", "", 5000 + self.user1, "", 5000 ) macaroon = pymacaroons.Macaroon.deserialize(token) res = self.get_success( self.auth_handler.validate_short_term_login_token(macaroon.serialize()) ) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) # add another "user_id" caveat, which might allow us to override the # user_id. @@ -114,7 +117,7 @@ def test_mau_limits_disabled(self): # Ensure does not throw exception self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) @@ -132,7 +135,7 @@ def test_mau_limits_exceeded_large(self): self.get_failure( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ), ResourceLimitError, ) @@ -160,7 +163,7 @@ def test_mau_limits_parity(self): # If not in monthly active cohort self.get_failure( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ), ResourceLimitError, ) @@ -177,7 +180,7 @@ def test_mau_limits_parity(self): ) self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) self.get_success( @@ -195,7 +198,7 @@ def test_mau_limits_not_exceeded(self): # Ensure does not raise exception self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) @@ -210,6 +213,6 @@ def test_mau_limits_not_exceeded(self): def _get_macaroon(self): token = self.macaroon_generator.generate_short_term_login_token( - "user_a", "", 5000 + self.user1, "", 5000 ) return pymacaroons.Macaroon.deserialize(token) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 608f8f3d3..bd4319052 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -48,10 +48,6 @@ def prepare(self, reactor, clock, hs): self.mock_distributor = Mock() self.mock_distributor.declare("registered_user") self.mock_captcha_client = Mock() - self.macaroon_generator = Mock( - generate_access_token=Mock(return_value="secret") - ) - self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) self.handler = self.hs.get_registration_handler() self.store = self.hs.get_datastore() self.lots_of_users = 100 @@ -67,8 +63,8 @@ def test_user_is_created_and_logged_in_if_doesnt_exist(self): self.get_or_create_user(requester, frank.localpart, "Frankie") ) self.assertEquals(result_user_id, user_id) - self.assertTrue(result_token is not None) - self.assertEquals(result_token, "secret") + self.assertIsInstance(result_token, str) + self.assertGreater(len(result_token), 20) def test_if_user_exists(self): store = self.hs.get_datastore() @@ -500,7 +496,7 @@ def check_registration_for_spam( user_id = self.get_success(self.handler.register_user(localpart="user")) # Get an access token. - token = self.macaroon_generator.generate_access_token(user_id) + token = "testtok" self.get_success( self.store.add_access_token_to_user( user_id=user_id, token=token, device_id=None, valid_until_ms=None @@ -577,7 +573,7 @@ async def get_or_create_user( user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - token = self.macaroon_generator.generate_access_token(user_id) + token = self.hs.get_auth_handler().generate_access_token(user) if need_register: await self.handler.register_with_store( diff --git a/tests/util/test_stringutils.py b/tests/util/test_stringutils.py index f7fecd9cf..ad4dd7f00 100644 --- a/tests/util/test_stringutils.py +++ b/tests/util/test_stringutils.py @@ -13,7 +13,7 @@ # limitations under the License. from synapse.api.errors import SynapseError -from synapse.util.stringutils import assert_valid_client_secret +from synapse.util.stringutils import assert_valid_client_secret, base62_encode from .. import unittest @@ -45,3 +45,9 @@ def test_client_secret_regex(self): for client_secret in bad: with self.assertRaises(SynapseError): assert_valid_client_secret(client_secret) + + def test_base62_encode(self): + self.assertEqual("0", base62_encode(0)) + self.assertEqual("10", base62_encode(62)) + self.assertEqual("1c", base62_encode(100)) + self.assertEqual("001c", base62_encode(100, minwidth=4)) From a683028d81606708f686b890c0a44f5a20b54798 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 May 2021 16:05:28 +0200 Subject: [PATCH 34/40] Correctly ratelimit invites when creating a room (#9968) * Correctly ratelimit invites when creating a room Also allow ratelimiting for more than one action at a time. --- changelog.d/9968.bugfix | 1 + synapse/api/ratelimiting.py | 22 +++++++++--- synapse/handlers/room.py | 27 ++++++++++---- synapse/handlers/room_member.py | 25 +++++++++++++ tests/api/test_ratelimiting.py | 57 ++++++++++++++++++++++++++++++ tests/rest/client/v1/test_rooms.py | 37 +++++++++++++++++++ 6 files changed, 157 insertions(+), 12 deletions(-) create mode 100644 changelog.d/9968.bugfix diff --git a/changelog.d/9968.bugfix b/changelog.d/9968.bugfix new file mode 100644 index 000000000..39e75f995 --- /dev/null +++ b/changelog.d/9968.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 2244b8a34..b9a10283f 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -57,6 +57,7 @@ async def can_do_action( rate_hz: Optional[float] = None, burst_count: Optional[int] = None, update: bool = True, + n_actions: int = 1, _time_now_s: Optional[int] = None, ) -> Tuple[bool, float]: """Can the entity (e.g. user or IP address) perform the action? @@ -76,6 +77,9 @@ async def can_do_action( burst_count: How many actions that can be performed before being limited. Overrides the value set during instantiation if set. update: Whether to count this check as performing the action + n_actions: The number of times the user wants to do this action. If the user + cannot do all of the actions, the user's action count is not incremented + at all. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. @@ -124,17 +128,20 @@ async def can_do_action( time_delta = time_now_s - time_start performed_count = action_count - time_delta * rate_hz if performed_count < 0: - # Allow, reset back to count 1 - allowed = True + performed_count = 0 time_start = time_now_s - action_count = 1.0 - elif performed_count > burst_count - 1.0: + + # This check would be easier read as performed_count + n_actions > burst_count, + # but performed_count might be a very precise float (with lots of numbers + # following the point) in which case Python might round it up when adding it to + # n_actions. Writing it this way ensures it doesn't happen. + if performed_count > burst_count - n_actions: # Deny, we have exceeded our burst count allowed = False else: # We haven't reached our limit yet allowed = True - action_count += 1.0 + action_count = performed_count + n_actions if update: self.actions[key] = (action_count, time_start, rate_hz) @@ -182,6 +189,7 @@ async def ratelimit( rate_hz: Optional[float] = None, burst_count: Optional[int] = None, update: bool = True, + n_actions: int = 1, _time_now_s: Optional[int] = None, ): """Checks if an action can be performed. If not, raises a LimitExceededError @@ -201,6 +209,9 @@ async def ratelimit( burst_count: How many actions that can be performed before being limited. Overrides the value set during instantiation if set. update: Whether to count this check as performing the action + n_actions: The number of times the user wants to do this action. If the user + cannot do all of the actions, the user's action count is not incremented + at all. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. @@ -216,6 +227,7 @@ async def ratelimit( rate_hz=rate_hz, burst_count=burst_count, update=update, + n_actions=n_actions, _time_now_s=time_now_s, ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index fb4823a5c..835d874ce 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -32,7 +32,14 @@ RoomCreationPreset, RoomEncryptionAlgorithms, ) -from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + LimitExceededError, + NotFoundError, + StoreError, + SynapseError, +) from synapse.api.filtering import Filter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase @@ -126,10 +133,6 @@ def __init__(self, hs: "HomeServer"): self.third_party_event_rules = hs.get_third_party_event_rules() - self._invite_burst_count = ( - hs.config.ratelimiting.rc_invites_per_room.burst_count - ) - async def upgrade_room( self, requester: Requester, old_room_id: str, new_version: RoomVersion ) -> str: @@ -676,8 +679,18 @@ async def create_room( invite_3pid_list = [] invite_list = [] - if len(invite_list) + len(invite_3pid_list) > self._invite_burst_count: - raise SynapseError(400, "Cannot invite so many users at once") + if invite_list or invite_3pid_list: + try: + # If there are invites in the request, see if the ratelimiting settings + # allow that number of invites to be sent from the current user. + await self.room_member_handler.ratelimit_multiple_invites( + requester, + room_id=None, + n_invites=len(invite_list) + len(invite_3pid_list), + update=False, + ) + except LimitExceededError: + raise SynapseError(400, "Cannot invite so many users at once") await self.event_creation_handler.assert_accepted_privacy_policy(requester) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 20700fc5a..9a092da71 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -163,6 +163,31 @@ async def _user_left_room(self, target: UserID, room_id: str) -> None: async def forget(self, user: UserID, room_id: str) -> None: raise NotImplementedError() + async def ratelimit_multiple_invites( + self, + requester: Optional[Requester], + room_id: Optional[str], + n_invites: int, + update: bool = True, + ): + """Ratelimit more than one invite sent by the given requester in the given room. + + Args: + requester: The requester sending the invites. + room_id: The room the invites are being sent in. + n_invites: The amount of invites to ratelimit for. + update: Whether to update the ratelimiter's cache. + + Raises: + LimitExceededError: The requester can't send that many invites in the room. + """ + await self._invites_per_room_limiter.ratelimit( + requester, + room_id, + update=update, + n_actions=n_invites, + ) + async def ratelimit_invite( self, requester: Optional[Requester], diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index fa96ba07a..dcf0110c1 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -230,3 +230,60 @@ def test_db_user_override(self): # Shouldn't raise for _ in range(20): self.get_success_or_raise(limiter.ratelimit(requester, _time_now_s=0)) + + def test_multiple_actions(self): + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=3 + ) + # Test that 4 actions aren't allowed with a maximum burst of 3. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=4, _time_now_s=0) + ) + self.assertFalse(allowed) + + # Test that 3 actions are allowed with a maximum burst of 3. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=3, _time_now_s=0) + ) + self.assertTrue(allowed) + self.assertEquals(10.0, time_allowed) + + # Test that, after doing these 3 actions, we can't do any more action without + # waiting. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=1, _time_now_s=0) + ) + self.assertFalse(allowed) + self.assertEquals(10.0, time_allowed) + + # Test that after waiting we can do only 1 action. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action( + None, + key="test_id", + update=False, + n_actions=1, + _time_now_s=10, + ) + ) + self.assertTrue(allowed) + # The time allowed is the current time because we could still repeat the action + # once. + self.assertEquals(10.0, time_allowed) + + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=10) + ) + self.assertFalse(allowed) + # The time allowed doesn't change despite allowed being False because, while we + # don't allow 2 actions, we could still do 1. + self.assertEquals(10.0, time_allowed) + + # Test that after waiting a bit more we can do 2 actions. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=20) + ) + self.assertTrue(allowed) + # The time allowed is the current time because we could still repeat the action + # once. + self.assertEquals(20.0, time_allowed) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index a3694f3d0..7c4bdcdfd 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -463,6 +463,43 @@ def test_post_room_invitees_invalid_mxid(self): ) self.assertEquals(400, channel.code) + @unittest.override_config({"rc_invites": {"per_room": {"burst_count": 3}}}) + def test_post_room_invitees_ratelimit(self): + """Test that invites sent when creating a room are ratelimited by a RateLimiter, + which ratelimits them correctly, including by not limiting when the requester is + exempt from ratelimiting. + """ + + # Build the request's content. We use local MXIDs because invites over federation + # are more difficult to mock. + content = json.dumps( + { + "invite": [ + "@alice1:red", + "@alice2:red", + "@alice3:red", + "@alice4:red", + ] + } + ).encode("utf8") + + # Test that the invites are correctly ratelimited. + channel = self.make_request("POST", "/createRoom", content) + self.assertEqual(400, channel.code) + self.assertEqual( + "Cannot invite so many users at once", + channel.json_body["error"], + ) + + # Add the current user to the ratelimit overrides, allowing them no ratelimiting. + self.get_success( + self.hs.get_datastore().set_ratelimit_for_user(self.user_id, 0, 0) + ) + + # Test that the invites aren't ratelimited anymore. + channel = self.make_request("POST", "/createRoom", content) + self.assertEqual(200, channel.code) + class RoomTopicTestCase(RoomBase): """ Tests /rooms/$room_id/topic REST events. """ From 47806b0869da4adf84a978e4898ec1b4f5985af5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 May 2021 16:59:46 +0100 Subject: [PATCH 35/40] 1.34.0rc1 --- CHANGES.md | 61 ++++++++++++++++++++++++++++++++++++++++ changelog.d/5588.misc | 1 - changelog.d/9881.feature | 1 - changelog.d/9882.misc | 1 - changelog.d/9885.misc | 1 - changelog.d/9886.misc | 1 - changelog.d/9889.feature | 1 - changelog.d/9889.removal | 1 - changelog.d/9895.bugfix | 1 - changelog.d/9896.bugfix | 1 - changelog.d/9896.misc | 1 - changelog.d/9902.feature | 1 - changelog.d/9904.misc | 1 - changelog.d/9905.feature | 1 - changelog.d/9910.bugfix | 1 - changelog.d/9910.feature | 1 - changelog.d/9911.doc | 1 - changelog.d/9913.docker | 1 - changelog.d/9915.feature | 1 - changelog.d/9916.feature | 1 - changelog.d/9928.bugfix | 1 - changelog.d/9930.bugfix | 1 - changelog.d/9931.misc | 1 - changelog.d/9932.misc | 1 - changelog.d/9935.feature | 1 - changelog.d/9945.feature | 1 - changelog.d/9947.feature | 1 - changelog.d/9950.feature | 1 - changelog.d/9951.feature | 1 - changelog.d/9954.feature | 1 - changelog.d/9959.misc | 1 - changelog.d/9961.bugfix | 1 - changelog.d/9965.bugfix | 1 - changelog.d/9966.feature | 1 - changelog.d/9968.bugfix | 1 - synapse/__init__.py | 2 +- 36 files changed, 62 insertions(+), 35 deletions(-) delete mode 100644 changelog.d/5588.misc delete mode 100644 changelog.d/9881.feature delete mode 100644 changelog.d/9882.misc delete mode 100644 changelog.d/9885.misc delete mode 100644 changelog.d/9886.misc delete mode 100644 changelog.d/9889.feature delete mode 100644 changelog.d/9889.removal delete mode 100644 changelog.d/9895.bugfix delete mode 100644 changelog.d/9896.bugfix delete mode 100644 changelog.d/9896.misc delete mode 100644 changelog.d/9902.feature delete mode 100644 changelog.d/9904.misc delete mode 100644 changelog.d/9905.feature delete mode 100644 changelog.d/9910.bugfix delete mode 100644 changelog.d/9910.feature delete mode 100644 changelog.d/9911.doc delete mode 100644 changelog.d/9913.docker delete mode 100644 changelog.d/9915.feature delete mode 100644 changelog.d/9916.feature delete mode 100644 changelog.d/9928.bugfix delete mode 100644 changelog.d/9930.bugfix delete mode 100644 changelog.d/9931.misc delete mode 100644 changelog.d/9932.misc delete mode 100644 changelog.d/9935.feature delete mode 100644 changelog.d/9945.feature delete mode 100644 changelog.d/9947.feature delete mode 100644 changelog.d/9950.feature delete mode 100644 changelog.d/9951.feature delete mode 100644 changelog.d/9954.feature delete mode 100644 changelog.d/9959.misc delete mode 100644 changelog.d/9961.bugfix delete mode 100644 changelog.d/9965.bugfix delete mode 100644 changelog.d/9966.feature delete mode 100644 changelog.d/9968.bugfix diff --git a/CHANGES.md b/CHANGES.md index 93efa3ce5..ddc1f13a3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,64 @@ +Synapse 1.34.0rc1 (2021-05-12) +============================== + +Features +-------- + +- Add experimental option to track memory usage of the caches. ([\#9881](https://github.com/matrix-org/synapse/issues/9881)) +- Add support for `DELETE /_synapse/admin/v1/rooms/`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889)) +- Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902)) +- Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951)) +- Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916)) +- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) +- Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935)) +- Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945)) +- Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954)) + + +Bugfixes +-------- + +- Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. ([\#9895](https://github.com/matrix-org/synapse/issues/9895)) +- Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. ([\#9896](https://github.com/matrix-org/synapse/issues/9896)) +- Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. ([\#9910](https://github.com/matrix-org/synapse/issues/9910)) +- Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. ([\#9928](https://github.com/matrix-org/synapse/issues/9928)) +- Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. ([\#9930](https://github.com/matrix-org/synapse/issues/9930)) +- Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. ([\#9961](https://github.com/matrix-org/synapse/issues/9961), [\#9965](https://github.com/matrix-org/synapse/issues/9965)) +- Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. ([\#9968](https://github.com/matrix-org/synapse/issues/9968)) + + +Updates to the Docker image +--------------------------- + +- Added startup_delay to docker healthcheck to reduce waiting time for coming online, updated readme for extra options, contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913)) + + +Improved Documentation +---------------------- + +- Add `port` argument to the Postgres database sample config section. ([\#9911](https://github.com/matrix-org/synapse/issues/9911)) + + +Deprecations and Removals +------------------------- + +- Mark as deprecated `POST /_synapse/admin/v1/rooms//delete`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889)) + + +Internal Changes +---------------- + +- Reduce the length of Synapse's access tokens. ([\#5588](https://github.com/matrix-org/synapse/issues/5588)) +- Export jemalloc stats to Prometheus if it is being used. ([\#9882](https://github.com/matrix-org/synapse/issues/9882)) +- Add type hints to presence handler. ([\#9885](https://github.com/matrix-org/synapse/issues/9885)) +- Reduce memory usage of the LRU caches. ([\#9886](https://github.com/matrix-org/synapse/issues/9886)) +- Add type hints to the `synapse.handlers` module. ([\#9896](https://github.com/matrix-org/synapse/issues/9896)) +- Time response time for external cache requests. ([\#9904](https://github.com/matrix-org/synapse/issues/9904)) +- Minor fixes to the `make_full_schema.sh` script. ([\#9931](https://github.com/matrix-org/synapse/issues/9931)) +- Move database schema files into a common directory. ([\#9932](https://github.com/matrix-org/synapse/issues/9932)) +- Add debug logging for lost/delayed to-device messages. ([\#9959](https://github.com/matrix-org/synapse/issues/9959)) + + Synapse 1.33.2 (2021-05-11) =========================== diff --git a/changelog.d/5588.misc b/changelog.d/5588.misc deleted file mode 100644 index b8f52a212..000000000 --- a/changelog.d/5588.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the length of Synapse's access tokens. diff --git a/changelog.d/9881.feature b/changelog.d/9881.feature deleted file mode 100644 index 088a517e0..000000000 --- a/changelog.d/9881.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental option to track memory usage of the caches. diff --git a/changelog.d/9882.misc b/changelog.d/9882.misc deleted file mode 100644 index facfa31f3..000000000 --- a/changelog.d/9882.misc +++ /dev/null @@ -1 +0,0 @@ -Export jemalloc stats to Prometheus if it is being used. diff --git a/changelog.d/9885.misc b/changelog.d/9885.misc deleted file mode 100644 index 492fccea4..000000000 --- a/changelog.d/9885.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to presence handler. diff --git a/changelog.d/9886.misc b/changelog.d/9886.misc deleted file mode 100644 index 8ff869e65..000000000 --- a/changelog.d/9886.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory usage of the LRU caches. diff --git a/changelog.d/9889.feature b/changelog.d/9889.feature deleted file mode 100644 index 74d46f222..000000000 --- a/changelog.d/9889.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for `DELETE /_synapse/admin/v1/rooms/`. \ No newline at end of file diff --git a/changelog.d/9889.removal b/changelog.d/9889.removal deleted file mode 100644 index 398b9e129..000000000 --- a/changelog.d/9889.removal +++ /dev/null @@ -1 +0,0 @@ -Mark as deprecated `POST /_synapse/admin/v1/rooms//delete`. \ No newline at end of file diff --git a/changelog.d/9895.bugfix b/changelog.d/9895.bugfix deleted file mode 100644 index 1053f975b..000000000 --- a/changelog.d/9895.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. diff --git a/changelog.d/9896.bugfix b/changelog.d/9896.bugfix deleted file mode 100644 index 07a8e87f9..000000000 --- a/changelog.d/9896.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. diff --git a/changelog.d/9896.misc b/changelog.d/9896.misc deleted file mode 100644 index e41c7d1f0..000000000 --- a/changelog.d/9896.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to the `synapse.handlers` module. diff --git a/changelog.d/9902.feature b/changelog.d/9902.feature deleted file mode 100644 index 4d9f324d4..000000000 --- a/changelog.d/9902.feature +++ /dev/null @@ -1 +0,0 @@ -Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. diff --git a/changelog.d/9904.misc b/changelog.d/9904.misc deleted file mode 100644 index 3db1e625a..000000000 --- a/changelog.d/9904.misc +++ /dev/null @@ -1 +0,0 @@ -Time response time for external cache requests. diff --git a/changelog.d/9905.feature b/changelog.d/9905.feature deleted file mode 100644 index 96a0e7f09..000000000 --- a/changelog.d/9905.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending events for worker-based deployments using Redis. diff --git a/changelog.d/9910.bugfix b/changelog.d/9910.bugfix deleted file mode 100644 index 06d523fd4..000000000 --- a/changelog.d/9910.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. diff --git a/changelog.d/9910.feature b/changelog.d/9910.feature deleted file mode 100644 index 54165cce1..000000000 --- a/changelog.d/9910.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance after joining a large room when presence is enabled. diff --git a/changelog.d/9911.doc b/changelog.d/9911.doc deleted file mode 100644 index f7fd9f1ba..000000000 --- a/changelog.d/9911.doc +++ /dev/null @@ -1 +0,0 @@ -Add `port` argument to the Postgres database sample config section. \ No newline at end of file diff --git a/changelog.d/9913.docker b/changelog.d/9913.docker deleted file mode 100644 index 93835e14c..000000000 --- a/changelog.d/9913.docker +++ /dev/null @@ -1 +0,0 @@ -Added startup_delay to docker healthcheck to reduce waiting time for coming online, updated readme for extra options, contributed by @Maquis196. diff --git a/changelog.d/9915.feature b/changelog.d/9915.feature deleted file mode 100644 index 7b81faabe..000000000 --- a/changelog.d/9915.feature +++ /dev/null @@ -1 +0,0 @@ -Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. \ No newline at end of file diff --git a/changelog.d/9916.feature b/changelog.d/9916.feature deleted file mode 100644 index 54165cce1..000000000 --- a/changelog.d/9916.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance after joining a large room when presence is enabled. diff --git a/changelog.d/9928.bugfix b/changelog.d/9928.bugfix deleted file mode 100644 index 7b74cd9fb..000000000 --- a/changelog.d/9928.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. diff --git a/changelog.d/9930.bugfix b/changelog.d/9930.bugfix deleted file mode 100644 index 9b22ed445..000000000 --- a/changelog.d/9930.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. diff --git a/changelog.d/9931.misc b/changelog.d/9931.misc deleted file mode 100644 index 326adc7f3..000000000 --- a/changelog.d/9931.misc +++ /dev/null @@ -1 +0,0 @@ -Minor fixes to the `make_full_schema.sh` script. diff --git a/changelog.d/9932.misc b/changelog.d/9932.misc deleted file mode 100644 index 9e16a3617..000000000 --- a/changelog.d/9932.misc +++ /dev/null @@ -1 +0,0 @@ -Move database schema files into a common directory. diff --git a/changelog.d/9935.feature b/changelog.d/9935.feature deleted file mode 100644 index eeda5bf50..000000000 --- a/changelog.d/9935.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of backfilling in large rooms. diff --git a/changelog.d/9945.feature b/changelog.d/9945.feature deleted file mode 100644 index 84308e8cc..000000000 --- a/changelog.d/9945.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. diff --git a/changelog.d/9947.feature b/changelog.d/9947.feature deleted file mode 100644 index ce8874f81..000000000 --- a/changelog.d/9947.feature +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. diff --git a/changelog.d/9950.feature b/changelog.d/9950.feature deleted file mode 100644 index 96a0e7f09..000000000 --- a/changelog.d/9950.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending events for worker-based deployments using Redis. diff --git a/changelog.d/9951.feature b/changelog.d/9951.feature deleted file mode 100644 index 96a0e7f09..000000000 --- a/changelog.d/9951.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending events for worker-based deployments using Redis. diff --git a/changelog.d/9954.feature b/changelog.d/9954.feature deleted file mode 100644 index ce8874f81..000000000 --- a/changelog.d/9954.feature +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. diff --git a/changelog.d/9959.misc b/changelog.d/9959.misc deleted file mode 100644 index 7231f29d7..000000000 --- a/changelog.d/9959.misc +++ /dev/null @@ -1 +0,0 @@ -Add debug logging for lost/delayed to-device messages. diff --git a/changelog.d/9961.bugfix b/changelog.d/9961.bugfix deleted file mode 100644 index e26d141a5..000000000 --- a/changelog.d/9961.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. diff --git a/changelog.d/9965.bugfix b/changelog.d/9965.bugfix deleted file mode 100644 index e26d141a5..000000000 --- a/changelog.d/9965.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. diff --git a/changelog.d/9966.feature b/changelog.d/9966.feature deleted file mode 100644 index 7b81faabe..000000000 --- a/changelog.d/9966.feature +++ /dev/null @@ -1 +0,0 @@ -Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. \ No newline at end of file diff --git a/changelog.d/9968.bugfix b/changelog.d/9968.bugfix deleted file mode 100644 index 39e75f995..000000000 --- a/changelog.d/9968.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. diff --git a/synapse/__init__.py b/synapse/__init__.py index ce822ccb0..15d54a1ce 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.33.2" +__version__ = "1.34.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 91143bb24ee69df71f935fc8062b11508f6c4d76 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 May 2021 17:04:00 +0100 Subject: [PATCH 36/40] Refer and link to the upgrade notes rather than to the file name --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ddc1f13a3..e6c455033 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ Features - Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902)) - Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951)) - Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916)) -- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) +- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/master/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) - Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935)) - Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945)) - Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954)) From 451f25172afc0ce46e416c73fa703c5edf279d54 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 May 2021 17:10:42 +0100 Subject: [PATCH 37/40] Incorporate changes from review --- CHANGES.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e6c455033..2ceae0ac8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,10 @@ Synapse 1.34.0rc1 (2021-05-12) ============================== +This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting. + +This release also deprecates the `POST /_synapse/admin/v1/rooms//delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/` route instead. + Features -------- @@ -9,7 +13,7 @@ Features - Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902)) - Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951)) - Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916)) -- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/master/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) +- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) - Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935)) - Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945)) - Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954)) @@ -30,7 +34,7 @@ Bugfixes Updates to the Docker image --------------------------- -- Added startup_delay to docker healthcheck to reduce waiting time for coming online, updated readme for extra options, contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913)) +- Add `startup_delay` to docker healthcheck to reduce waiting time for coming online and update the documentation with extra options. Contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913)) Improved Documentation From afb6dcf806d5a290d8cbd2c911c6a712ae3cf391 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 17 May 2021 11:34:39 +0100 Subject: [PATCH 38/40] 1.34.0 --- CHANGES.md | 11 +++++++++-- debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2ceae0ac8..1e3fd130f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,17 @@ -Synapse 1.34.0rc1 (2021-05-12) -============================== +Synapse 1.34.0 (2021-05-17) +=========================== This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting. This release also deprecates the `POST /_synapse/admin/v1/rooms//delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/` route instead. + +No significant changes. + + +Synapse 1.34.0rc1 (2021-05-12) +============================== + Features -------- diff --git a/debian/changelog b/debian/changelog index 76b82c172..bf99ae772 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.34.0) stable; urgency=medium + + * New synapse release 1.34.0. + + -- Synapse Packaging team Mon, 17 May 2021 11:34:18 +0100 + matrix-synapse-py3 (1.33.2) stable; urgency=medium * New synapse release 1.33.2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 15d54a1ce..7498a6016 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.34.0rc1" +__version__ = "1.34.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 8dde0bf8b3faa75763d6b0f0fb9413f3b8691067 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 17 May 2021 11:50:08 +0100 Subject: [PATCH 39/40] Update UPGRADE.rst --- UPGRADE.rst | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/UPGRADE.rst b/UPGRADE.rst index 606e357b6..9f61aad41 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -88,7 +88,7 @@ for example: Upgrading to v1.34.0 ==================== -`room_invite_state_types` configuration setting +``room_invite_state_types`` configuration setting ----------------------------------------------- The ``room_invite_state_types`` configuration setting has been deprecated and @@ -106,13 +106,10 @@ remove it from your configuration file. The default value used to be: - "m.room.encryption" - "m.room.name" -If you have customised this value by adding addition state types, you should -remove ``room_invite_state_types`` and configure ``additional_event_types`` with -your customisations. +If you have customised this value, you should remove ``room_invite_state_types`` and +configure ``room_prejoin_state`` instead. + -If you have customised this value by removing state types, you should rename -``room_invite_state_types`` to ``additional_event_types``, and set -``disable_default_event_types`` to ``true``. Upgrading to v1.33.0 ==================== From 13b0673b5a0bceafbcfce1407544c2421fd69210 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 17 May 2021 12:00:28 +0100 Subject: [PATCH 40/40] Changelog --- CHANGES.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1e3fd130f..709436da9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ This release deprecates the `room_invite_state_types` configuration setting. See This release also deprecates the `POST /_synapse/admin/v1/rooms//delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/` route instead. -No significant changes. +No significant changes since v1.34.0rc1. Synapse 1.34.0rc1 (2021-05-12) @@ -181,7 +181,7 @@ Synapse 1.32.1 (2021-04-21) =========================== This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853) -in Synapse 1.32.0 that caused connected Prometheus instances to become unstable. +in Synapse 1.32.0 that caused connected Prometheus instances to become unstable. However, as this release is still subject to the `LoggingContext` change in 1.32.0, it is recommended to remain on or downgrade to 1.31.0. @@ -197,11 +197,11 @@ Synapse 1.32.0 (2021-04-20) **Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853) that can overwhelm connected Prometheus instances. This issue was not present in -1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and +1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and follow [these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183) to clean up any excess writeahead logs. -**Note:** This release also mistakenly included a change that may affected Synapse +**Note:** This release also mistakenly included a change that may affected Synapse modules that import `synapse.logging.context.LoggingContext`, such as [synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider). This will be fixed in a later Synapse version. @@ -212,8 +212,8 @@ This release removes the deprecated `GET /_synapse/admin/v1/users/` adm This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. -If you are using the `packages.matrix.org` Debian repository for Synapse packages, -note that we have recently updated the expiry date on the gpg signing key. If you see an +If you are using the `packages.matrix.org` Debian repository for Synapse packages, +note that we have recently updated the expiry date on the gpg signing key. If you see an error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you will need to get a fresh copy of the keys. You can do so with: