From 8e0f74851d7cb76579b83fa597dc904e3c339720 Mon Sep 17 00:00:00 2001 From: ch4r10t33r Date: Fri, 24 Apr 2026 13:17:59 +0100 Subject: [PATCH] forkchoice: use only the "new" pool for update_safe_target Treat safe target as an availability signal: compute it strictly from latest_new_aggregated_payloads and ignore latest_known_aggregated_payloads. update_safe_target runs at interval 3, before the migration step at interval 4. That ordering is intentional: votes still in "new" at interval 3 reflect the current slot's online view, while anything already in "known" reflects historical knowledge (block-included attestations, previously migrated gossip, self-attestations stored locally). Counting "known" would let the safe target keep advancing on stale evidence even when live participation has collapsed. This brings the spec in line with the zeam client (see blockblaz/zeam#779) and the Ream reference implementation. The merged-pool test is rewritten to pin the new-only behaviour: with "known" at 2 votes and "new" at 2 votes (threshold 4), the safe target now stays at genesis instead of advancing to block_2. --- src/lean_spec/subspecs/forkchoice/store.py | 75 +++++++------------ tests/consensus/devnet/fc/test_safe_target.py | 26 +++---- 2 files changed, 38 insertions(+), 63 deletions(-) diff --git a/src/lean_spec/subspecs/forkchoice/store.py b/src/lean_spec/subspecs/forkchoice/store.py index 5b333553..17810b2f 100644 --- a/src/lean_spec/subspecs/forkchoice/store.py +++ b/src/lean_spec/subspecs/forkchoice/store.py @@ -815,18 +815,23 @@ def update_safe_target(self) -> "Store": - Interval 3: Safe target update (HERE) - Interval 4: New attestations migrate to "known" pool - Because interval 4 has not yet run, attestations live in two pools: - - - "new": freshly received from gossipsub aggregation this slot - - "known": from block attestations and previously accepted gossip - - Both pools must be merged to get the full attestation picture. - Using only one pool undercounts support. See inline comments for - concrete scenarios where this matters. - - Note: the Ream reference implementation uses only the "new" pool. - Our merge approach is more conservative. It ensures the safe target - reflects every attestation the node knows about. + Only the "new" pool is considered. This is a deliberate design choice + rooted in the ordering above: migration into "known" happens at + interval 4, strictly after safe-target computation. 3sf-mini is free + to run the migration before this step but does not, precisely so + that safe target sees only freshly received votes from the current + slot and ignores what was carried over from earlier slots. + + That ordering encodes a specific semantic: safe target is an + *availability* signal, not a durable-knowledge signal. A block is + "safe" to attest to when a 2/3 supermajority of validators + currently online — as seen by this node right now — vote for a + descendant of it. Votes aggregated into "known" from earlier + slots (block-included attestations, previously migrated gossip, + self-attestations stored locally) reflect historical knowledge, + not current online behaviour. Counting them would let a node keep + advancing its safe target on stale evidence even when live + participation has collapsed. Returns: New Store with updated safe_target. @@ -845,44 +850,16 @@ def update_safe_target(self) -> "Store": # For example, 100 validators => threshold is 67, not 66. min_target_score = -(-num_validators * 2 // 3) - # Merge both attestation pools into a single unified view. - # - # Why merge? At interval 3, the migration step (interval 4) has not - # run yet. Attestations can enter the "known" pool through paths that - # bypass gossipsub entirely: - # - # 1. Proposer's own attestation: the block proposer bundles their - # attestation directly in the block body. When the block is - # processed, this attestation lands in "known" immediately. - # It never appears in "new" because it was never gossipped. - # - # 2. Self-attestation: a node's own gossip attestation does not - # loop back through gossipsub to itself. The node records it - # locally in "known" without going through the "new" pipeline. - # - # Without this merge, those attestations would be invisible to the - # safe target calculation, causing it to undercount support. - # - # The technique: start with a shallow copy of "known", then overlay - # every entry from "new" on top. When both pools contain proofs for - # the same attestation data, merge the proof sets. - all_payloads: dict[AttestationData, set[AggregatedSignatureProof]] = { - attestation_data: set(proofs) - for attestation_data, proofs in self.latest_known_aggregated_payloads.items() - } - for attestation_data, proofs in self.latest_new_aggregated_payloads.items(): - if attestation_data in all_payloads: - # Both pools have proofs for this attestation. Combine them. - all_payloads[attestation_data].update(proofs) - else: - # Only "new" has proofs for this attestation. Add them directly. - all_payloads[attestation_data] = set(proofs) - - # Convert the merged aggregated payloads into per-validator votes. + # Convert the "new" aggregated payloads into per-validator votes. # - # Each proof encodes which validators participated. - # This step unpacks those bitfields into a flat mapping of validator -> vote. - attestations = self.extract_attestations_from_aggregated_payloads(all_payloads) + # Each proof encodes which validators participated. This step + # unpacks those bitfields into a flat mapping of validator -> vote. + # We deliberately do not merge in "known": see the docstring for + # the availability rationale tied to the interval-3/interval-4 + # ordering. + attestations = self.extract_attestations_from_aggregated_payloads( + self.latest_new_aggregated_payloads, + ) # Run LMD GHOST with the supermajority threshold. # diff --git a/tests/consensus/devnet/fc/test_safe_target.py b/tests/consensus/devnet/fc/test_safe_target.py index 2bdfe472..dc291e78 100644 --- a/tests/consensus/devnet/fc/test_safe_target.py +++ b/tests/consensus/devnet/fc/test_safe_target.py @@ -417,10 +417,10 @@ def test_safe_target_is_conservative_relative_to_lmd_ghost_head( ) -def test_safe_target_uses_merged_pools_at_interval_3( +def test_safe_target_ignores_known_pool_at_interval_3( fork_choice_test: ForkChoiceTestFiller, ) -> None: - """Safe target merges both attestation pools before computing weight. + """Safe target only uses the "new" pool at interval 3. 6 validators, threshold = 4. @@ -429,18 +429,17 @@ def test_safe_target_uses_merged_pools_at_interval_3( - "known" pool (from block body): validators 0, 1 -> block_2 - "new" pool (from gossip): validators 2, 3 -> block_2 - Neither pool alone meets threshold (2 < 4). - Merged: 4 >= 4. + Safe target is an availability signal tied to the current slot. + Migration into "known" runs at interval 4, strictly after safe-target + computation, so votes already living in "known" at interval 3 are + historical and are intentionally excluded. Walk (min_score=4): - justified -> block_1 (4) -> block_2 (4) -> block_3 (0, stop) - safe_target = block_2 - - Without the merge, the walk would stop at genesis. + justified -> block_1 (weight 2 < 4, pruned) -> stop - Attestations for block_2 appear in block_3's body because - validators can only attest to blocks they have already seen. + Result: safe_target stays at genesis even though the merged view + would have reached block_2. """ fork_choice_test( anchor_state=generate_pre_state(num_validators=6), @@ -520,15 +519,14 @@ def test_safe_target_uses_merged_pools_at_interval_3( ], ), ), - # Interval 3: merge yields weight 4 at block_1 and block_2. - # Walk reaches block_2, stops before block_3. + # Interval 3: only the "new" pool is considered. + # Weight at block_1 = 2 < 4, so the walk cannot leave genesis. TickStep( time=15, checks=StoreChecks( head_slot=Slot(3), head_root_label="block_3", - safe_target_slot=Slot(2), - safe_target_root_label="block_2", + safe_target_slot=Slot(0), ), ), ],