Skip to content
This repository was archived by the owner on Apr 26, 2024. It is now read-only.

Commit f60236b

Browse files
committed
Raw commit from debugging "Current state for room {room_id} is empty" error
1 parent d80a7ab commit f60236b

7 files changed

Lines changed: 114 additions & 6 deletions

File tree

scripts-dev/complement.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,4 +73,4 @@ docker build -t $COMPLEMENT_BASE_IMAGE -f "docker/complement/$COMPLEMENT_DOCKERF
7373
# Run the tests!
7474
echo "Images built; running complement"
7575
cd "$COMPLEMENT_DIR"
76-
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "${extra_test_args[@]}" "$@" ./tests/...
76+
go test -v -tags synapse_blacklist,msc2716,msc3030,faster_joins -count=1 "${extra_test_args[@]}" "$@" ./tests/

synapse/handlers/federation.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -461,6 +461,8 @@ async def do_invite_join(
461461
# room stuff after join currently doesn't work on workers.
462462
assert self.config.worker.worker_app is None
463463

464+
logger.info("traceFrom(do_invite_join) for _update_current_state_txn")
465+
464466
logger.debug("Joining %s to %s", joinee, room_id)
465467

466468
origin, event, room_version_obj = await self._make_and_verify_event(

synapse/handlers/federation_event.py

Lines changed: 37 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -454,6 +454,10 @@ async def process_remote_join(
454454
room_id, itertools.chain(auth_events, state)
455455
)
456456

457+
logger.info(
458+
"process_remote_join state=%s partial_state=%s", state, partial_state
459+
)
460+
457461
# and now persist the join event itself.
458462
logger.info(
459463
"Peristing join-via-remote %s (partial_state: %s)", event, partial_state
@@ -475,7 +479,19 @@ async def process_remote_join(
475479
# and discover that we do not have it.
476480
event.internal_metadata.proactively_send = False
477481

478-
return await self.persist_events_and_notify(room_id, [(event, context)])
482+
stream_id_after_persist = await self.persist_events_and_notify(
483+
room_id, [(event, context)]
484+
)
485+
486+
# Do this after the state from the remote join was persisted (via
487+
# `persist_events_and_notify`). Otherwise we can run into a
488+
# situation where the create event doesn't exist yet in the
489+
# `current_state_events`
490+
for e in state:
491+
await self._handle_marker_event(origin, e)
492+
# TODO: Loop through previous state to find other markers
493+
494+
return stream_id_after_persist
479495

480496
async def update_state_for_partial_state_event(
481497
self, destination: str, event: EventBase
@@ -1200,25 +1216,40 @@ async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> No
12001216
"""
12011217

12021218
if marker_event.type != EventTypes.MSC2716_MARKER:
1219+
# logger.info(
1220+
# "_handle_marker_event not a marker event marker_event.type=%s",
1221+
# marker_event.type,
1222+
# )
12031223
# Not a marker event
12041224
return
12051225

1226+
logger.info("_handle_marker_event next 0000000000000000000000000000000000")
1227+
12061228
if marker_event.rejected_reason is not None:
1229+
logger.info(
1230+
"_handle_marker_event rejected %s", marker_event.rejected_reason
1231+
)
12071232
# Rejected event
12081233
return
12091234

1235+
logger.info("_handle_marker_event next 1111111111111111111111111111111111")
1236+
12101237
# Skip processing a marker event if the room version doesn't
12111238
# support it or the event is not from the room creator.
12121239
room_version = await self._store.get_room_version(marker_event.room_id)
1240+
logger.info("_handle_marker_event next 2222222222222222222222222222222222")
12131241
create_event = await self._store.get_create_event_for_room(marker_event.room_id)
1242+
logger.info("_handle_marker_event next 3333333333333333333333333333333333")
12141243
room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR)
1244+
logger.info("_handle_marker_event next 4444444444444444444444444444444444")
12151245
if not room_version.msc2716_historical and (
12161246
not self._config.experimental.msc2716_enabled
12171247
or marker_event.sender != room_creator
12181248
):
1249+
logger.info("_handle_marker_event skipping room_version=%s", room_version)
12191250
return
12201251

1221-
logger.debug("_handle_marker_event: received %s", marker_event)
1252+
logger.info("_handle_marker_event: received %s", marker_event)
12221253

12231254
insertion_event_id = marker_event.content.get(
12241255
EventContentFields.MSC2716_MARKER_INSERTION
@@ -1228,7 +1259,7 @@ async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> No
12281259
# Nothing to retrieve then (invalid marker)
12291260
return
12301261

1231-
logger.debug(
1262+
logger.info(
12321263
"_handle_marker_event: backfilling insertion event %s", insertion_event_id
12331264
)
12341265

@@ -1260,7 +1291,7 @@ async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> No
12601291
insertion_event_id, marker_event.room_id
12611292
)
12621293

1263-
logger.debug(
1294+
logger.info(
12641295
"_handle_marker_event: insertion extremity added for %s from marker event %s",
12651296
insertion_event,
12661297
marker_event,
@@ -1947,6 +1978,8 @@ async def persist_events_and_notify(
19471978
Returns:
19481979
The stream ID after which all events have been persisted.
19491980
"""
1981+
# logger.info("persist_events_and_notify event_and_contexts(%d)=%s", len(event_and_contexts), event_and_contexts)
1982+
19501983
if not event_and_contexts:
19511984
return self._store.get_room_max_stream_ordering()
19521985

synapse/rest/client/room.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,14 @@ async def on_POST(
330330
remote_room_hosts,
331331
)
332332

333+
logger.info("/join/:aliasOrId target_user=%s", requester.user)
334+
logger.info(
335+
"++++---------------------------------------------------------------++++"
336+
)
337+
logger.info(
338+
"++++---------------------------------------------------------------++++"
339+
)
340+
333341
await self.room_member_handler.update_membership(
334342
requester=requester,
335343
target=requester.user,
@@ -840,6 +848,14 @@ async def on_POST(
840848
) -> Tuple[int, JsonDict]:
841849
requester = await self.auth.get_user_by_req(request, allow_guest=True)
842850

851+
logger.info("/%s target_user=%s", membership_action, requester.user)
852+
logger.info(
853+
"-----------------------------------------------------------------------"
854+
)
855+
logger.info(
856+
"-----------------------------------------------------------------------"
857+
)
858+
843859
if requester.is_guest and membership_action not in {
844860
Membership.JOIN,
845861
Membership.LEAVE,

synapse/storage/databases/main/events.py

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,13 @@ async def _persist_events_and_state_updates(
159159
Resolves when the events have been persisted
160160
"""
161161

162+
logger.info(
163+
"_persist_events_and_state_updates events_and_contexts(%d) current_state_for_room(%d) state_delta_for_room(%d)",
164+
len(events_and_contexts),
165+
len(current_state_for_room),
166+
len(state_delta_for_room),
167+
)
168+
162169
# We want to calculate the stream orderings as late as possible, as
163170
# we only notify after all events with a lesser stream ordering have
164171
# been persisted. I.e. if we spend 10s inside the with block then
@@ -999,7 +1006,17 @@ def _update_current_state_txn(
9991006
state_delta_by_room: Dict[str, DeltaState],
10001007
stream_id: int,
10011008
):
1009+
logger.info(
1010+
"_update_current_state_txn state_delta_by_room=%s", state_delta_by_room
1011+
)
10021012
for room_id, delta_state in state_delta_by_room.items():
1013+
logger.info(
1014+
"_update_current_state_txn room_id=%s delta_state=%s",
1015+
room_id,
1016+
delta_state,
1017+
exc_info=True,
1018+
)
1019+
10031020
to_delete = delta_state.to_delete
10041021
to_insert = delta_state.to_insert
10051022

@@ -1037,11 +1054,21 @@ def _update_current_state_txn(
10371054
users_in_room = self.store.get_users_in_room_txn(txn, room_id)
10381055
members_changed.update(users_in_room)
10391056

1057+
logger.info(
1058+
"_update_current_state_txn no_longer_in_room deleting all state for room_id=%s (before)",
1059+
room_id,
1060+
)
1061+
10401062
self.db_pool.simple_delete_txn(
10411063
txn,
10421064
table="current_state_events",
10431065
keyvalues={"room_id": room_id},
10441066
)
1067+
1068+
logger.info(
1069+
"_update_current_state_txn no_longer_in_room deleting all state for room_id=%s (after)",
1070+
room_id,
1071+
)
10451072
else:
10461073
# We're still in the room, so we update the current state as normal.
10471074

@@ -1092,6 +1119,12 @@ def _update_current_state_txn(
10921119
),
10931120
)
10941121

1122+
logger.info(
1123+
"_update_current_state_txn inserting current_state_events to_insert=%s to_delete=%s",
1124+
to_insert,
1125+
to_delete,
1126+
)
1127+
10951128
# We include the membership in the current state table, hence we do
10961129
# a lookup when we insert. This assumes that all events have already
10971130
# been inserted into room_memberships.

synapse/storage/databases/main/state.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,10 @@ async def get_create_event_for_room(self, room_id: str) -> EventBase:
179179
"""
180180
state_ids = await self.get_current_state_ids(room_id)
181181

182+
logger.info(
183+
"get_create_event_for_room room_id=%s state_ids=%s", room_id, state_ids
184+
)
185+
182186
if not state_ids:
183187
raise NotFoundError(f"Current state for room {room_id} is empty")
184188

synapse/storage/persist_events.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -313,11 +313,15 @@ async def persist_events(
313313
matched the transcation ID; the existing event is returned in such
314314
a case.
315315
"""
316+
317+
# logger.info("persist_events events_and_contexts(%d)", len(events_and_contexts))
318+
316319
partitioned: Dict[str, List[Tuple[EventBase, EventContext]]] = {}
317320
for event, ctx in events_and_contexts:
318321
partitioned.setdefault(event.room_id, []).append((event, ctx))
319322

320323
async def enqueue(item):
324+
# logger.info("persist_events enqueue=%s", item)
321325
room_id, evs_ctxs = item
322326
return await self._event_persist_queue.add_to_queue(
323327
room_id, evs_ctxs, backfilled=backfilled
@@ -450,6 +454,12 @@ async def _persist_event_batch(
450454
if not events_and_contexts:
451455
return replaced_events
452456

457+
logger.info(
458+
"traceFrom(_persist_event_batch) for _update_current_state_txn events_and_contexts(%d) backfilled=%s",
459+
len(events_and_contexts),
460+
backfilled,
461+
)
462+
453463
# Check if any of the events have a transaction ID that has already been
454464
# persisted, and if so we don't persist it again.
455465
#
@@ -515,14 +525,18 @@ async def _persist_event_batch(
515525
(event, context)
516526
)
517527

518-
for room_id, ev_ctx_rm in events_by_room.items():
528+
events_by_room_items = events_by_room.items()
529+
530+
for room_id, ev_ctx_rm in events_by_room_items:
519531
latest_event_ids = set(
520532
await self.main_store.get_latest_event_ids_in_room(room_id)
521533
)
522534
new_latest_event_ids = await self._calculate_new_extremities(
523535
room_id, ev_ctx_rm, latest_event_ids
524536
)
525537

538+
logger.info("persist_event_batch new_latest_event_ids=%s latest_event_ids=%s", new_latest_event_ids, latest_event_ids)
539+
526540
if new_latest_event_ids == latest_event_ids:
527541
# No change in extremities, so no change in state
528542
continue
@@ -590,6 +604,12 @@ async def _persist_event_batch(
590604

591605
new_forward_extremities[room_id] = new_latest_event_ids
592606

607+
# TODO: Left off here. need to see why
608+
# `state_delta_for_room` is empty in the case where it's
609+
# not working. Need to check how the delta is being
610+
# calculated
611+
logger.info("persist_event_batch delta_ids=%s", delta_ids)
612+
593613
# If either are not None then there has been a change,
594614
# and we need to work out the delta (or use that
595615
# given)

0 commit comments

Comments
 (0)