From 4676e53e659aea9736fc901610c4154cb1b2fece Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 May 2023 18:54:22 -0500 Subject: [PATCH 01/11] Start of idea to re-use work of getting state for a given state_group --- synapse/storage/databases/state/bg_updates.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 097dea51828c..c26860e0d600 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -89,6 +89,11 @@ def _get_state_groups_from_groups_txn( groups: List[int], state_filter: Optional[StateFilter] = None, ) -> Mapping[int, StateMap[str]]: + """ + We can sort from smallest to largest state_group and re-use the work from the + small state_group for a larger one if we see that the edge chain links up. + """ + state_filter = state_filter or StateFilter.all() results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups} From 6a19afcdad24a7cf46e5b31ac84f0b9ccb5c3940 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 May 2023 20:03:28 -0500 Subject: [PATCH 02/11] Re-use work from previous state_groups --- synapse/storage/databases/state/bg_updates.py | 78 +++++++++++++++---- 1 file changed, 65 insertions(+), 13 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index c26860e0d600..73bcc5e6139d 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Union +from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, Tuple, Union from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( @@ -90,8 +90,7 @@ def _get_state_groups_from_groups_txn( state_filter: Optional[StateFilter] = None, ) -> Mapping[int, StateMap[str]]: """ - We can sort from smallest to largest state_group and re-use the work from the - small state_group for a larger one if we see that the edge chain links up. + TODO """ state_filter = state_filter or StateFilter.all() @@ -111,11 +110,22 @@ def _get_state_groups_from_groups_txn( # This may return multiple rows per (type, state_key), but last_value # should be the same. sql = """ - WITH RECURSIVE sgs(state_group) AS ( - VALUES(?::bigint) + WITH RECURSIVE sgs(state_group, state_group_reached) AS ( + VALUES(?::bigint, NULL::bigint) UNION ALL - SELECT prev_state_group FROM state_group_edges e, sgs s - WHERE s.state_group = e.state_group + SELECT + prev_state_group, + CASE + /* Specify state_groups we have already done the work for */ + WHEN @prev_state_group IN (%s) THEN prev_state_group + ELSE NULL + END AS state_group_reached + FROM + state_group_edges e, sgs s + WHERE + s.state_group = e.state_group + /* Stop when we connect up to another state_group that we already did the work for */ + AND s.state_group_reached IS NULL ) %s """ @@ -159,7 +169,7 @@ def _get_state_groups_from_groups_txn( f""" ( SELECT DISTINCT ON (type, state_key) - type, state_key, event_id + type, state_key, event_id, state_group FROM state_groups_state INNER JOIN sgs USING (state_group) WHERE {where_clause} @@ -180,7 +190,7 @@ def _get_state_groups_from_groups_txn( overall_select_clause = f""" SELECT DISTINCT ON (type, state_key) - type, state_key, event_id + type, state_key, event_id, state_group FROM state_groups_state WHERE state_group IN ( SELECT state_group FROM sgs @@ -188,15 +198,57 @@ def _get_state_groups_from_groups_txn( ORDER BY type, state_key, state_group DESC """ - for group in groups: + # We can sort from smallest to largest state_group and re-use the work from + # the small state_group for a larger one if we see that the edge chain links + # up. + sorted_groups = sorted(groups) + state_groups_we_have_already_fetched: Set[int] = set() + for group in sorted_groups: args: List[Union[int, str]] = [group] args.extend(overall_select_query_args) - txn.execute(sql % (overall_select_clause,), args) + state_groups_we_have_already_fetched_string = [ + f"{state_group}::bigint" + for state_group in state_groups_we_have_already_fetched + ].join(", ") + + txn.execute( + sql + % ( + state_groups_we_have_already_fetched_string, + overall_select_clause, + ), + args, + ) + + min_state_group: Optional[int] = None + partial_state_map_for_state_group: MutableStateMap[str] = {} for row in txn: - typ, state_key, event_id = row + typ, state_key, event_id, state_group = row key = (intern_string(typ), intern_string(state_key)) - results[group][key] = event_id + partial_state_map_for_state_group[key] = event_id + + if state_group < min_state_group or min_state_group is None: + min_state_group = state_group + + # If we see a state group edge link to a previous state_group that we + # already fetched from the database, link up the base state to the + # partial state we retrieved from the database to build on top of. + if results[min_state_group] is not None: + base_state_map = results[min_state_group].copy() + + results[group] = base_state_map.update( + partial_state_map_for_state_group + ) + else: + # It's also completely normal for us not to have a previous + # state_group to build on top of if this is the first group being + # processes or we are processing a bunch of groups from different + # rooms which of course will never link together. + results[group] = partial_state_map_for_state_group + + state_groups_we_have_already_fetched.add(group) + else: max_entries_returned = state_filter.max_entries_returned() From 02a9959a6ffe2ac7142ac7da3e38c9efc8093c53 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 May 2023 20:24:51 -0500 Subject: [PATCH 03/11] Add changelog --- changelog.d/15617.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/15617.feature diff --git a/changelog.d/15617.feature b/changelog.d/15617.feature new file mode 100644 index 000000000000..092d5f483147 --- /dev/null +++ b/changelog.d/15617.feature @@ -0,0 +1 @@ +Make `/messages` faster by efficiently grabbing state out of database whenever we have to backfill and process new events. From 5704e3b0fd0854e44cc05c1b158625400f387fd3 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 May 2023 20:37:19 -0500 Subject: [PATCH 04/11] Fix lints --- synapse/storage/databases/state/bg_updates.py | 29 +++++++++++-------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 73bcc5e6139d..9be343a60f39 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -207,10 +207,12 @@ def _get_state_groups_from_groups_txn( args: List[Union[int, str]] = [group] args.extend(overall_select_query_args) - state_groups_we_have_already_fetched_string = [ - f"{state_group}::bigint" - for state_group in state_groups_we_have_already_fetched - ].join(", ") + state_groups_we_have_already_fetched_string = ", ".join( + [ + f"{state_group}::bigint" + for state_group in state_groups_we_have_already_fetched + ] + ) txn.execute( sql @@ -234,12 +236,14 @@ def _get_state_groups_from_groups_txn( # If we see a state group edge link to a previous state_group that we # already fetched from the database, link up the base state to the # partial state we retrieved from the database to build on top of. - if results[min_state_group] is not None: - base_state_map = results[min_state_group].copy() - - results[group] = base_state_map.update( - partial_state_map_for_state_group - ) + if ( + min_state_group is not None + and results.get(min_state_group) is not None + ): + resultant_state_map = dict(results[min_state_group]) + resultant_state_map.update(partial_state_map_for_state_group) + + results[group] = resultant_state_map else: # It's also completely normal for us not to have a previous # state_group to build on top of if this is the first group being @@ -258,8 +262,9 @@ def _get_state_groups_from_groups_txn( if where_clause: where_clause = " AND (%s)" % (where_clause,) - # We don't use WITH RECURSIVE on sqlite3 as there are distributions - # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) + # XXX: We could `WITH RECURSIVE` here since it's supported on SQLite 3.8.3 + # or higher and our minimum supported version is greater than that. We just + # haven't put in the time to refactor this. for group in groups: next_group: Optional[int] = group From 3d80449d6b14065384584983ec61687ad922e1bc Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 17 May 2023 21:57:13 -0500 Subject: [PATCH 05/11] Fix empty case --- synapse/storage/databases/state/bg_updates.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 9be343a60f39..05bce339e822 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -210,7 +210,9 @@ def _get_state_groups_from_groups_txn( state_groups_we_have_already_fetched_string = ", ".join( [ f"{state_group}::bigint" - for state_group in state_groups_we_have_already_fetched + # We default to `[-1]` just to fill in the query with something + # that will have no effct + for state_group in state_groups_we_have_already_fetched or [-1] ] ) @@ -230,7 +232,7 @@ def _get_state_groups_from_groups_txn( key = (intern_string(typ), intern_string(state_key)) partial_state_map_for_state_group[key] = event_id - if state_group < min_state_group or min_state_group is None: + if min_state_group is None or state_group < min_state_group: min_state_group = state_group # If we see a state group edge link to a previous state_group that we From ab576b6b6bc14ec28bfabe24b6836f109a9b12b0 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 May 2023 01:59:34 -0500 Subject: [PATCH 06/11] Fix when the state_filter prevented us from returning any rows before --- synapse/storage/databases/state/bg_updates.py | 64 +++++++++++++++---- 1 file changed, 53 insertions(+), 11 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 05bce339e822..131314b1286c 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -131,6 +131,21 @@ def _get_state_groups_from_groups_txn( """ overall_select_query_args: List[Union[int, str]] = [] + # Make sure we always have a row that tells us if we linked up to another + # state group that we already processed (`state_group_reached`) regardless + # of whether we find any state according to the state_filter. + # + # We use a `UNION ALL` to make sure it is always the first row returned. + # `UNION` will merge and sort in with the rows from the next query + # otherwise. + overall_select_clause = """ + ( + SELECT NULL, NULL, NULL, state_group_reached + FROM sgs + ORDER BY state_group ASC + LIMIT 1 + ) UNION ALL (%s) + """ # This is an optimization to create a select clause per-condition. This # makes the query planner a lot smarter on what rows should pull out in the @@ -178,7 +193,7 @@ def _get_state_groups_from_groups_txn( """ ) - overall_select_clause = " UNION ".join(select_clause_list) + main_select_clause = " UNION ".join(select_clause_list) else: where_clause, where_args = state_filter.make_sql_filter_clause() # Unless the filter clause is empty, we're going to append it after an @@ -188,7 +203,7 @@ def _get_state_groups_from_groups_txn( overall_select_query_args.extend(where_args) - overall_select_clause = f""" + main_select_clause = f""" SELECT DISTINCT ON (type, state_key) type, state_key, event_id, state_group FROM state_groups_state @@ -209,6 +224,7 @@ def _get_state_groups_from_groups_txn( state_groups_we_have_already_fetched_string = ", ".join( [ + # TODO: Is this string manipulation safe? f"{state_group}::bigint" # We default to `[-1]` just to fill in the query with something # that will have no effct @@ -217,6 +233,16 @@ def _get_state_groups_from_groups_txn( ) txn.execute( + sql + % ( + state_groups_we_have_already_fetched_string, + overall_select_clause % (main_select_clause,), + ), + args, + ) + + logger.info( + "sql=%s, args=%s", sql % ( state_groups_we_have_already_fetched_string, @@ -225,24 +251,39 @@ def _get_state_groups_from_groups_txn( args, ) - min_state_group: Optional[int] = None + # The first row is always our special `state_group_reached` row which + # tells us if we linked up to any other existing state_group that we + # already fetched and if so, which one we linked up to (see the `UNION + # ALL` above) + first_row = txn.fetchone() + if first_row: + _, _, _, state_group_reached = first_row + partial_state_map_for_state_group: MutableStateMap[str] = {} for row in txn: - typ, state_key, event_id, state_group = row + typ, state_key, event_id, _state_group = row + logger.info( + "row from db -> group=%s type=%s state_key=%s event_id=%s", + group, + typ, + state_key, + event_id, + ) key = (intern_string(typ), intern_string(state_key)) partial_state_map_for_state_group[key] = event_id - if min_state_group is None or state_group < min_state_group: - min_state_group = state_group + logger.info( + "group=%s state_group_reached=%s, partial_state_map_for_state_group=%s", + group, + state_group_reached, + partial_state_map_for_state_group, + ) # If we see a state group edge link to a previous state_group that we # already fetched from the database, link up the base state to the # partial state we retrieved from the database to build on top of. - if ( - min_state_group is not None - and results.get(min_state_group) is not None - ): - resultant_state_map = dict(results[min_state_group]) + if state_group_reached in results: + resultant_state_map = dict(results[state_group_reached]) resultant_state_map.update(partial_state_map_for_state_group) results[group] = resultant_state_map @@ -310,6 +351,7 @@ def _get_state_groups_from_groups_txn( allow_none=True, ) + logger.info("_get_state_groups_from_groups_txn results=%s", results) # The results shouldn't be considered mutable. return results From 1f60fcb1afba14d50ba7bdf064d33cbc839de1f4 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 May 2023 02:06:24 -0500 Subject: [PATCH 07/11] Remove debug logs --- synapse/storage/databases/state/bg_updates.py | 25 ------------------- 1 file changed, 25 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 131314b1286c..5fc562bda5a5 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -241,16 +241,6 @@ def _get_state_groups_from_groups_txn( args, ) - logger.info( - "sql=%s, args=%s", - sql - % ( - state_groups_we_have_already_fetched_string, - overall_select_clause, - ), - args, - ) - # The first row is always our special `state_group_reached` row which # tells us if we linked up to any other existing state_group that we # already fetched and if so, which one we linked up to (see the `UNION @@ -262,23 +252,9 @@ def _get_state_groups_from_groups_txn( partial_state_map_for_state_group: MutableStateMap[str] = {} for row in txn: typ, state_key, event_id, _state_group = row - logger.info( - "row from db -> group=%s type=%s state_key=%s event_id=%s", - group, - typ, - state_key, - event_id, - ) key = (intern_string(typ), intern_string(state_key)) partial_state_map_for_state_group[key] = event_id - logger.info( - "group=%s state_group_reached=%s, partial_state_map_for_state_group=%s", - group, - state_group_reached, - partial_state_map_for_state_group, - ) - # If we see a state group edge link to a previous state_group that we # already fetched from the database, link up the base state to the # partial state we retrieved from the database to build on top of. @@ -351,7 +327,6 @@ def _get_state_groups_from_groups_txn( allow_none=True, ) - logger.info("_get_state_groups_from_groups_txn results=%s", results) # The results shouldn't be considered mutable. return results From 333fc51e6cd339abb3eba415b3cd6fcb9189ab55 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 May 2023 02:20:31 -0500 Subject: [PATCH 08/11] Replace sketchy string manip with placeholders --- synapse/storage/databases/state/bg_updates.py | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 5fc562bda5a5..d29caaf9479a 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -117,7 +117,7 @@ def _get_state_groups_from_groups_txn( prev_state_group, CASE /* Specify state_groups we have already done the work for */ - WHEN @prev_state_group IN (%s) THEN prev_state_group + WHEN @prev_state_group IN (%s /* state_groups_we_have_already_fetched_string */) THEN prev_state_group ELSE NULL END AS state_group_reached FROM @@ -127,13 +127,14 @@ def _get_state_groups_from_groups_txn( /* Stop when we connect up to another state_group that we already did the work for */ AND s.state_group_reached IS NULL ) - %s + %s /* overall_select_clause */ """ overall_select_query_args: List[Union[int, str]] = [] # Make sure we always have a row that tells us if we linked up to another - # state group that we already processed (`state_group_reached`) regardless - # of whether we find any state according to the state_filter. + # state_group chain that we already processed (indicated by + # `state_group_reached`) regardless of whether we find any state according + # to the state_filter. # # We use a `UNION ALL` to make sure it is always the first row returned. # `UNION` will merge and sort in with the rows from the next query @@ -144,7 +145,7 @@ def _get_state_groups_from_groups_txn( FROM sgs ORDER BY state_group ASC LIMIT 1 - ) UNION ALL (%s) + ) UNION ALL (%s /* main_select_clause */) """ # This is an optimization to create a select clause per-condition. This @@ -217,19 +218,18 @@ def _get_state_groups_from_groups_txn( # the small state_group for a larger one if we see that the edge chain links # up. sorted_groups = sorted(groups) - state_groups_we_have_already_fetched: Set[int] = set() + state_groups_we_have_already_fetched: Set[int] = set( + # We default to `[-1]` just to fill in the query with something + # that will have no effect but not bork our query when it would be empty otherwise + [-1] + ) for group in sorted_groups: args: List[Union[int, str]] = [group] + args.extend(state_groups_we_have_already_fetched) args.extend(overall_select_query_args) state_groups_we_have_already_fetched_string = ", ".join( - [ - # TODO: Is this string manipulation safe? - f"{state_group}::bigint" - # We default to `[-1]` just to fill in the query with something - # that will have no effct - for state_group in state_groups_we_have_already_fetched or [-1] - ] + ["?::bigint"] * len(state_groups_we_have_already_fetched) ) txn.execute( From 17aeee764dcaf92640cd9f379524bd2f468dcedf Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 May 2023 02:31:12 -0500 Subject: [PATCH 09/11] More comments --- synapse/storage/databases/state/bg_updates.py | 32 +++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index d29caaf9479a..6e287f8b27a0 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -90,7 +90,15 @@ def _get_state_groups_from_groups_txn( state_filter: Optional[StateFilter] = None, ) -> Mapping[int, StateMap[str]]: """ - TODO + Given a number of state groups, fetch the latest state for each group. + + Args: + txn: The transaction object. + groups: The given state groups that you want to fetch the latest state for. + state_filter: The state filter to apply the state we fetch state from the database. + + Returns: + Map from state_group to a StateMap at that point. """ state_filter = state_filter or StateFilter.all() @@ -102,13 +110,11 @@ def _get_state_groups_from_groups_txn( # a temporary hack until we can add the right indices in txn.execute("SET LOCAL enable_seqscan=off") - # The below query walks the state_group tree so that the "state" + # The query below walks the state_group tree so that the "state" # table includes all state_groups in the tree. It then joins # against `state_groups_state` to fetch the latest state. # It assumes that previous state groups are always numerically # lesser. - # This may return multiple rows per (type, state_key), but last_value - # should be the same. sql = """ WITH RECURSIVE sgs(state_group, state_group_reached) AS ( VALUES(?::bigint, NULL::bigint) @@ -214,13 +220,19 @@ def _get_state_groups_from_groups_txn( ORDER BY type, state_key, state_group DESC """ - # We can sort from smallest to largest state_group and re-use the work from - # the small state_group for a larger one if we see that the edge chain links + # We can sort from least to greatest state_group and re-use the work from a + # lesser state_group for a greater one if we see that the edge chain links # up. + # + # What this means in practice is that if we fetch the latest state for + # `state_group = 20`, and then we want `state_group = 30`, it will traverse + # down the edge chain to `20`, see that we linked up to `20` and bail out + # early and re-use the work we did for `20`. sorted_groups = sorted(groups) state_groups_we_have_already_fetched: Set[int] = set( - # We default to `[-1]` just to fill in the query with something - # that will have no effect but not bork our query when it would be empty otherwise + # We default to `[-1]` just to fill in the query with something that + # will have no effect but not bork our query when it would be empty + # otherwise [-1] ) for group in sorted_groups: @@ -244,7 +256,7 @@ def _get_state_groups_from_groups_txn( # The first row is always our special `state_group_reached` row which # tells us if we linked up to any other existing state_group that we # already fetched and if so, which one we linked up to (see the `UNION - # ALL` above) + # ALL` above which drives this special row) first_row = txn.fetchone() if first_row: _, _, _, state_group_reached = first_row @@ -255,7 +267,7 @@ def _get_state_groups_from_groups_txn( key = (intern_string(typ), intern_string(state_key)) partial_state_map_for_state_group[key] = event_id - # If we see a state group edge link to a previous state_group that we + # If we see a state_group edge link to a previous state_group that we # already fetched from the database, link up the base state to the # partial state we retrieved from the database to build on top of. if state_group_reached in results: From 7abb745003dc6680d2c17e6eaeb082a252e7d946 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 May 2023 02:32:05 -0500 Subject: [PATCH 10/11] Fix lint --- synapse/storage/databases/state/bg_updates.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 6e287f8b27a0..1b18a2c4e4cd 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -227,14 +227,16 @@ def _get_state_groups_from_groups_txn( # What this means in practice is that if we fetch the latest state for # `state_group = 20`, and then we want `state_group = 30`, it will traverse # down the edge chain to `20`, see that we linked up to `20` and bail out - # early and re-use the work we did for `20`. + # early and re-use the work we did for `20`. This can have massive savings + # in rooms like Matrix HQ where the edge chain is 88k events long and + # fetching the mostly-same chain over and over isn't very efficient. sorted_groups = sorted(groups) - state_groups_we_have_already_fetched: Set[int] = set( + state_groups_we_have_already_fetched: Set[int] = { # We default to `[-1]` just to fill in the query with something that # will have no effect but not bork our query when it would be empty # otherwise - [-1] - ) + -1 + } for group in sorted_groups: args: List[Union[int, str]] = [group] args.extend(state_groups_we_have_already_fetched) From 79e6d61141159b4547a7e9939d71cb38f6810f0e Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 18 May 2023 02:49:39 -0500 Subject: [PATCH 11/11] Fix typo --- synapse/storage/databases/state/bg_updates.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 1b18a2c4e4cd..6826e9676187 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -280,8 +280,9 @@ def _get_state_groups_from_groups_txn( else: # It's also completely normal for us not to have a previous # state_group to build on top of if this is the first group being - # processes or we are processing a bunch of groups from different - # rooms which of course will never link together. + # processed or we are processing a bunch of groups from different + # rooms which of course will never link together (competely + # different DAGs). results[group] = partial_state_map_for_state_group state_groups_we_have_already_fetched.add(group)