diff --git a/src/lean_spec/subspecs/networking/client/reqresp_client.py b/src/lean_spec/subspecs/networking/client/reqresp_client.py index 971dc5562..8966c6946 100644 --- a/src/lean_spec/subspecs/networking/client/reqresp_client.py +++ b/src/lean_spec/subspecs/networking/client/reqresp_client.py @@ -33,14 +33,17 @@ from dataclasses import dataclass, field from lean_spec.subspecs.containers import SignedBlock +from lean_spec.subspecs.containers.slot import Slot from lean_spec.subspecs.networking.reqresp.codec import ( CodecError, ResponseCode, encode_request, ) from lean_spec.subspecs.networking.reqresp.message import ( + BLOCKS_BY_RANGE_PROTOCOL_V1, BLOCKS_BY_ROOT_PROTOCOL_V1, STATUS_PROTOCOL_V1, + BlocksByRangeRequest, BlocksByRootRequest, RequestedBlockRoots, Status, @@ -50,7 +53,8 @@ QuicConnection, QuicConnectionManager, ) -from lean_spec.types import Bytes32 +from lean_spec.subspecs.ssz.hash import hash_tree_root +from lean_spec.types import Bytes32, Uint64 logger = logging.getLogger(__name__) @@ -205,6 +209,154 @@ async def _do_blocks_by_root_request( finally: await stream.close() + async def request_blocks_by_range( + self, + peer_id: PeerId, + start_slot: Slot, + count: Uint64, + ) -> list[SignedBlock]: + """ + Request blocks by range from a peer. + + Implements the NetworkRequester protocol method. + + Args: + peer_id: Peer to request from. + start_slot: Start slot of the range. + count: Number of blocks to request. + + Returns: + List of blocks received. May be fewer than requested if peer + doesn't have all blocks. Empty on error. + """ + if count == 0: + return [] + + conn = self._connections.get(peer_id) + if conn is None: + logger.debug("No connection to peer %s for blocks_by_range", peer_id) + return [] + + try: + return await asyncio.wait_for( + self._do_blocks_by_range_request(conn, start_slot, count), + timeout=self.timeout, + ) + except asyncio.TimeoutError: + logger.warning("Timeout requesting blocks from %s", peer_id) + return [] + except Exception as e: + logger.warning("Error requesting blocks from %s: %s", peer_id, e) + return [] + + async def _do_blocks_by_range_request( + self, + conn: QuicConnection, + start_slot: Slot, + count: Uint64, + ) -> list[SignedBlock]: + """ + Execute a BlocksByRange request. + + Opens a stream, negotiates the protocol, sends the request, + and reads all response chunks. + + Args: + conn: QuicConnection to use. + start_slot: Start slot of the range. + count: Number of blocks to request. + + Returns: + List of blocks received. + """ + # Open a new stream and negotiate the protocol. + stream = await conn.open_stream(BLOCKS_BY_RANGE_PROTOCOL_V1) + + try: + # Build and send the request. + request = BlocksByRangeRequest(start_slot=start_slot, count=count) + request_bytes = encode_request(request.encode_bytes()) + await stream.write(request_bytes) + + # Half-close to signal we're done sending. + finish_write = getattr(stream, "finish_write", None) + if finish_write is not None: + await finish_write() + + # Read response chunks. + # + # Each block is sent as a separate response chunk. + # We read until the stream closes or we get all blocks. + blocks: list[SignedBlock] = [] + prev_slot: Slot | None = None + prev_root: Bytes32 | None = None + + for _ in range(int(count)): + try: + response_data = await stream.read() + if not response_data: + # Stream closed, no more blocks. + break + + code, ssz_bytes = ResponseCode.decode(response_data) + + if code == ResponseCode.SUCCESS: + block = SignedBlock.decode_bytes(ssz_bytes) + + # Step 1: Verify slot strictly increasing. + # + # Peers MUST return blocks in increasing order. + if prev_slot is not None and block.block.slot <= prev_slot: + raise CodecError( + f"Non-monotonic slot: {block.block.slot} <= {prev_slot}" + ) + + # Step 2: Verify block is within requested range. + if block.block.slot < start_slot or block.block.slot >= start_slot + count: + raise CodecError( + f"Block slot {block.block.slot} outside requested range" + ) + + # Step 3: Verify parent_root continuity. + # + # If the slots are consecutive, the parent_root MUST match the + # previous root. + # If there are skips, we can't verify continuity here but we still + # check monotonicity. + if prev_root is not None and block.block.slot == prev_slot + 1: + if block.block.parent_root != prev_root: + raise CodecError( + f"Parent root mismatch at slot {block.block.slot}: " + f"expected {prev_root.hex()}, " + f"got {block.block.parent_root.hex()}" + ) + + blocks.append(block) + prev_slot = block.block.slot + prev_root = hash_tree_root(block.block) + + elif code == ResponseCode.RESOURCE_UNAVAILABLE: + # Peer doesn't have this block, continue. + continue + else: + # Other error, stop reading. + logger.debug("BlocksByRange error response: %s", code) + break + + except CodecError as e: + # Protocol violation: Log and re-raise to trigger downscoring. + logger.warning("Protocol violation from %s: %s", conn, e) + raise + + return blocks + + finally: + # Always close the stream. + try: + await stream.close() + except Exception as e: + logger.debug("Error closing stream: %s", e) + async def send_status( self, peer_id: PeerId, diff --git a/src/lean_spec/subspecs/networking/config.py b/src/lean_spec/subspecs/networking/config.py index 258865cb7..c2bb1fdcb 100644 --- a/src/lean_spec/subspecs/networking/config.py +++ b/src/lean_spec/subspecs/networking/config.py @@ -72,3 +72,9 @@ "libp2p" is the Application-Layer Protocol Negotiation (ALPN) value used during the TLS 1.3 handshake to identify libp2p connections. """ + +MIN_BLOCK_REQUESTS_HISTORY_SLOT: Final[int] = 3600 +"""Minimum block requests responder should serve.""" + +MAX_CONCURRENT_REQUESTS: Final[int] = 2 +"""Maximum concurrent block requests receiver can make.""" diff --git a/src/lean_spec/subspecs/networking/reqresp/__init__.py b/src/lean_spec/subspecs/networking/reqresp/__init__.py index 49f0a6c97..ede4aa261 100644 --- a/src/lean_spec/subspecs/networking/reqresp/__init__.py +++ b/src/lean_spec/subspecs/networking/reqresp/__init__.py @@ -8,14 +8,17 @@ ) from .handler import ( REQRESP_PROTOCOL_IDS, + AsyncBlockBySlotLookup, AsyncBlockLookup, ReqRespServer, RequestHandler, StreamResponseAdapter, ) from .message import ( + BLOCKS_BY_RANGE_PROTOCOL_V1, BLOCKS_BY_ROOT_PROTOCOL_V1, STATUS_PROTOCOL_V1, + BlocksByRangeRequest, BlocksByRootRequest, RequestedBlockRoots, Status, @@ -23,10 +26,12 @@ __all__ = [ # Protocol IDs + "BLOCKS_BY_RANGE_PROTOCOL_V1", "BLOCKS_BY_ROOT_PROTOCOL_V1", "STATUS_PROTOCOL_V1", "REQRESP_PROTOCOL_IDS", # Message types + "BlocksByRangeRequest", "BlocksByRootRequest", "RequestedBlockRoots", "Status", @@ -36,6 +41,7 @@ "encode_request", "decode_request", # Inbound handlers + "AsyncBlockBySlotLookup", "AsyncBlockLookup", "RequestHandler", "ReqRespServer", diff --git a/src/lean_spec/subspecs/networking/reqresp/handler.py b/src/lean_spec/subspecs/networking/reqresp/handler.py index 24df08280..358d11397 100644 --- a/src/lean_spec/subspecs/networking/reqresp/handler.py +++ b/src/lean_spec/subspecs/networking/reqresp/handler.py @@ -66,16 +66,23 @@ from lean_spec.snappy import SnappyDecompressionError, frame_decompress from lean_spec.subspecs.containers import SignedBlock -from lean_spec.subspecs.networking.config import MAX_ERROR_MESSAGE_SIZE +from lean_spec.subspecs.containers.slot import Slot +from lean_spec.subspecs.networking.config import ( + MAX_ERROR_MESSAGE_SIZE, + MAX_REQUEST_BLOCKS, + MIN_BLOCK_REQUESTS_HISTORY_SLOT, +) from lean_spec.subspecs.networking.transport.protocols import InboundStreamProtocol from lean_spec.subspecs.networking.types import ProtocolId from lean_spec.subspecs.networking.varint import VarintError, decode_varint -from lean_spec.types import Bytes32 +from lean_spec.types import Bytes32, Uint64 from .codec import ResponseCode from .message import ( + BLOCKS_BY_RANGE_PROTOCOL_V1, BLOCKS_BY_ROOT_PROTOCOL_V1, STATUS_PROTOCOL_V1, + BlocksByRangeRequest, BlocksByRootRequest, Status, ) @@ -124,6 +131,12 @@ async def finish(self) -> None: Takes a block root and returns the block if available, None otherwise. """ +type AsyncBlockBySlotLookup = Callable[[Slot], Awaitable[SignedBlock | None]] +"""Type alias for block lookup by slot function. + +Takes a slot and returns the canonical block if available, None otherwise. +""" + @dataclass(slots=True) class RequestHandler: @@ -152,6 +165,9 @@ class RequestHandler: block_lookup: AsyncBlockLookup | None = None """Callback to look up blocks by root.""" + block_by_slot_lookup: AsyncBlockBySlotLookup | None = None + """Callback to look up canonical blocks by slot.""" + async def handle_status(self, response: StreamResponseAdapter) -> None: """ Handle incoming Status request. @@ -221,11 +237,66 @@ async def handle_blocks_by_root( # The peer can retry or ask another peer for this specific block. logger.warning("Error looking up block %s: %s", root.hex()[:8], e) + async def handle_blocks_by_range( + self, + request: BlocksByRangeRequest, + response: StreamResponseAdapter, + ) -> None: + """ + Handle incoming BlocksByRange request. + + Looks up and sends each requested block in the range. + + Args: + request: Block range to look up. + response: Stream for sending blocks. + """ + # Guard: Ensure we have a block lookup configured. + if self.block_by_slot_lookup is None: + logger.warning("BlocksByRange request received but no block_by_slot_lookup configured") + await response.send_error(ResponseCode.SERVER_ERROR, "Block lookup not available") + return + + # Step 1: Validate request parameters. + # + # count == 0 is INVALID_REQUEST per spec. + if request.count == Uint64(0) or request.count > Uint64(MAX_REQUEST_BLOCKS): + await response.send_error(ResponseCode.INVALID_REQUEST, "Invalid count") + return + + # Step 2: Check history window. + # + # We only serve blocks within the configured history window. + # This allows nodes to prune old state. + if request.start_slot < Slot(MIN_BLOCK_REQUESTS_HISTORY_SLOT): + await response.send_error( + ResponseCode.RESOURCE_UNAVAILABLE, "Requested slot predates history window" + ) + return + + # Step 3: Serve blocks in the range. + # + # Rules: + # - Only canonical blocks (handled by the callback). + # - Skip empty slots. + # - Order must be preserved. + for i in range(int(request.count)): + slot = request.start_slot + Slot(i) + try: + block = await self.block_by_slot_lookup(slot) + if block is not None: + await response.send_success(block.encode_bytes()) + + # Missing/skipped slot: Skip silently. + except Exception as e: + logger.warning("Error looking up block at slot %s: %s", slot, e) + REQRESP_PROTOCOL_IDS: Final[frozenset[ProtocolId]] = frozenset( { STATUS_PROTOCOL_V1, BLOCKS_BY_ROOT_PROTOCOL_V1, + BLOCKS_BY_RANGE_PROTOCOL_V1, } ) """Protocol IDs handled by ReqRespServer.""" @@ -450,6 +521,21 @@ async def _dispatch( return await self.handler.handle_blocks_by_root(request, response) + elif protocol_id == BLOCKS_BY_RANGE_PROTOCOL_V1: + # BlocksByRange request: Peer wants blocks by range. + # + # The request is an SSZ object with start_slot and count. + try: + request = BlocksByRangeRequest.decode_bytes(ssz_bytes) + except Exception as e: + # SSZ decode failure: wrong size, malformed offsets, etc. + logger.debug("BlocksByRangeRequest decode error: %s", e) + await response.send_error( + ResponseCode.INVALID_REQUEST, "Invalid BlocksByRangeRequest message" + ) + return + await self.handler.handle_blocks_by_range(request, response) + else: # Unknown protocol ID. # diff --git a/src/lean_spec/subspecs/networking/reqresp/message.py b/src/lean_spec/subspecs/networking/reqresp/message.py index 52b71ac44..57f5eb57f 100644 --- a/src/lean_spec/subspecs/networking/reqresp/message.py +++ b/src/lean_spec/subspecs/networking/reqresp/message.py @@ -8,7 +8,8 @@ from typing import ClassVar, Final from lean_spec.subspecs.containers import Checkpoint -from lean_spec.types import Bytes32, SSZList +from lean_spec.subspecs.containers.slot import Slot +from lean_spec.types import Bytes32, SSZList, Uint64 from lean_spec.types.container import Container from ..config import MAX_REQUEST_BLOCKS @@ -43,6 +44,9 @@ class Status(Container): BLOCKS_BY_ROOT_PROTOCOL_V1: Final = ProtocolId("/leanconsensus/req/blocks_by_root/1/ssz_snappy") """The protocol ID for the BlocksByRoot v1 request/response message.""" +BLOCKS_BY_RANGE_PROTOCOL_V1: Final = ProtocolId("/leanconsensus/req/blocks_by_range/1/ssz_snappy") +"""The protocol ID for the BlocksByRange v1 request/response message.""" + class RequestedBlockRoots(SSZList[Bytes32]): """List of block roots requested from a peer.""" @@ -59,3 +63,17 @@ class BlocksByRootRequest(Container): roots: RequestedBlockRoots """List of block roots requested from a peer.""" + + +class BlocksByRangeRequest(Container): + """ + A request for one or more blocks by their slot numbers. + + This is primarily used to recover recent or missing blocks from a peer. + """ + + start_slot: Slot + """The starting slot of the range (inclusive).""" + + count: Uint64 + """The number of blocks to request (at most `MAX_REQUEST_BLOCKS`).""" diff --git a/src/lean_spec/subspecs/sync/backfill_sync.py b/src/lean_spec/subspecs/sync/backfill_sync.py index ed7ea1199..9d6d1e0bd 100644 --- a/src/lean_spec/subspecs/sync/backfill_sync.py +++ b/src/lean_spec/subspecs/sync/backfill_sync.py @@ -38,17 +38,22 @@ from __future__ import annotations +import logging from dataclasses import dataclass, field -from typing import Protocol +from typing import Callable, Protocol from lean_spec.subspecs.containers import SignedBlock +from lean_spec.subspecs.containers.slot import Slot +from lean_spec.subspecs.networking.config import MAX_REQUEST_BLOCKS from lean_spec.subspecs.networking.transport.peer_id import PeerId -from lean_spec.types import Bytes32 +from lean_spec.types import Bytes32, Uint64 from .block_cache import BlockCache from .config import MAX_BACKFILL_DEPTH, MAX_BLOCKS_PER_REQUEST from .peer_manager import PeerManager +logger = logging.getLogger(__name__) + class NetworkRequester(Protocol): """ @@ -82,6 +87,28 @@ async def request_blocks_by_root( """ ... + async def request_blocks_by_range( + self, + peer_id: PeerId, + start_slot: Slot, + count: Uint64, + ) -> list[SignedBlock]: + """ + Request blocks by range from a peer. + + Implements the NetworkRequester protocol method. + + Args: + peer_id: Peer to request from. + start_slot: Start slot of the range. + count: Number of blocks to request. + + Returns: + List of blocks received. May be fewer than requested if peer + doesn't have all blocks. Empty on error. + """ + ... + @dataclass(slots=True) class BackfillSync: @@ -122,9 +149,18 @@ class BackfillSync: network: NetworkRequester """Network interface for block requests.""" + is_known_root: Callable[[Bytes32], bool] | None = field(default=None) + """Optional callback to check if a block root is already in the Store.""" + + get_finalized_slot: Callable[[], Slot] | None = field(default=None) + """Optional callback to get the current finalized slot.""" + _pending: set[Bytes32] = field(default_factory=set) """Roots currently being fetched (to avoid duplicate requests).""" + _max_range_slot: Slot = field(default_factory=lambda: Slot(0)) + """Highest slot covered by an in-flight range request.""" + async def fill_missing( self, roots: list[Bytes32], @@ -172,6 +208,92 @@ async def fill_missing( # Always clear pending status, even on error. self._pending.difference_update(roots_to_fetch) + async def fill_range( + self, + start_slot: Slot, + count: Uint64, + depth: int = 0, + ) -> None: + """ + Fetch missing blocks by slot range. + + This is a more efficient alternative to fill_missing when a large + contiguous gap is detected. + + Args: + start_slot: Start slot of the range. + count: Number of blocks to request. + depth: Current backfill depth. + """ + if depth >= MAX_BACKFILL_DEPTH: + return + + if count == Uint64(0): + return + + # Fetch in batches. + # + # Range requests are already batched by the network client, but we + # also batch here to allow interrupting or spreading load across peers. + # Optimization: only fetch what we haven't asked for yet. + actual_start = max(int(start_slot), int(self._max_range_slot) + 1) + end_slot = Slot(int(start_slot) + int(count) - 1) + + if int(end_slot) < actual_start: + logger.debug( + "Skipping range fetch [%s, %s]: already covered by pending request (up to %s)", + start_slot, + end_slot, + self._max_range_slot, + ) + return + + self._max_range_slot = max(self._max_range_slot, end_slot) + + current_slot = actual_start + remaining = int(end_slot) - actual_start + 1 + + while remaining > 0: + batch_count = min(remaining, MAX_REQUEST_BLOCKS) + await self._fetch_range(Slot(current_slot), Uint64(batch_count), depth) + current_slot += batch_count + remaining -= batch_count + + async def _fetch_range( + self, + start_slot: Slot, + count: Uint64, + depth: int, + ) -> None: + """Fetch a range of blocks from a peer.""" + peer = self.peer_manager.select_peer_for_request( + min_slot=Slot(int(start_slot) + int(count) - 1) + ) + if peer is None: + # Fallback to any peer if no one reports having the whole range. + peer = self.peer_manager.select_peer_for_request() + + if peer is None: + return + + peer.on_request_start() + try: + blocks = await self.network.request_blocks_by_range( + peer_id=peer.peer_id, + start_slot=start_slot, + count=count, + ) + + if blocks: + self.peer_manager.on_request_success(peer.peer_id) + await self._process_received_blocks(blocks, peer.peer_id, depth) + else: + self.peer_manager.on_request_success(peer.peer_id) + + except Exception as e: + logger.warning("Error in _fetch_range from %s: %s", peer.peer_id, e) + self.peer_manager.on_request_failure(peer.peer_id) + async def _fetch_batch( self, roots: list[Bytes32], @@ -249,19 +371,44 @@ async def _process_received_blocks( backfill_depth=depth + 1, ) - # Check if this block's parent is known. - # - # A block is orphan if its parent is not in the cache. + # A block is an orphan if its parent is not in the cache. # (We cannot check the Store here; that is the SyncService's job.) parent_root = pending.parent_root - if parent_root not in self.block_cache: + parent_known = parent_root in self.block_cache or ( + self.is_known_root(parent_root) if self.is_known_root else False + ) + + if not parent_known: # Parent unknown. Mark as orphan and queue for fetch. self.block_cache.mark_orphan(pending.root) if parent_root not in self._pending: new_orphan_parents.append(parent_root) # Recursively fetch orphan parents. + # + # If we have multiple missing parents, we can try to resolve them + # using range sync if they appear to follow a gap. if new_orphan_parents: + # If the oldest block we just received has a missing parent, + # check if there is a gap we can fill with a range request. + if self.get_finalized_slot and blocks: + # Find the earliest block in this batch. + earliest_block = min(blocks, key=lambda b: b.block.slot) + finalized_slot = self.get_finalized_slot() + gap = int(earliest_block.block.slot) - int(finalized_slot) + + if gap > 1: + logger.debug( + "Backfill detected gap (%d slots) at slot %s. Triggering range fetch.", + gap, + earliest_block.block.slot, + ) + await self.fill_range( + start_slot=Slot(int(finalized_slot) + 1), + count=Uint64(gap - 1), + depth=depth + 1, + ) + await self.fill_missing(new_orphan_parents, depth=depth + 1) def reset(self) -> None: diff --git a/src/lean_spec/subspecs/sync/head_sync.py b/src/lean_spec/subspecs/sync/head_sync.py index 0b397219d..d4f717a3c 100644 --- a/src/lean_spec/subspecs/sync/head_sync.py +++ b/src/lean_spec/subspecs/sync/head_sync.py @@ -49,10 +49,11 @@ from dataclasses import dataclass, field from lean_spec.subspecs.containers import SignedBlock +from lean_spec.subspecs.containers.slot import Slot from lean_spec.subspecs.forkchoice import Store from lean_spec.subspecs.networking.transport.peer_id import PeerId from lean_spec.subspecs.ssz.hash import hash_tree_root -from lean_spec.types import Bytes32 +from lean_spec.types import Bytes32, Uint64 from .backfill_sync import BackfillSync from .block_cache import BlockCache @@ -375,8 +376,20 @@ async def _cache_and_backfill( # Mark as orphan. self.block_cache.mark_orphan(pending.root) - # Trigger backfill for the missing parent. - await self.backfill.fill_missing([parent_root]) + # Trigger backfill for the missing parent(s). + # + # If there is a gap between our finalized slot and this block, + # use range-based fetching to fill it efficiently. + gap = int(block_inner.slot - store.latest_finalized.slot) + if gap > 1: + logger.debug("Large gap detected (%d slots). Triggering range backfill.", gap) + await self.backfill.fill_range( + start_slot=Slot(int(store.latest_finalized.slot) + 1), + count=Uint64(gap - 1), + ) + else: + # Direct parent missing. + await self.backfill.fill_missing([parent_root]) return HeadSyncResult( processed=False, diff --git a/src/lean_spec/subspecs/sync/service.py b/src/lean_spec/subspecs/sync/service.py index ead10c99a..c824dced7 100644 --- a/src/lean_spec/subspecs/sync/service.py +++ b/src/lean_spec/subspecs/sync/service.py @@ -242,6 +242,8 @@ def _init_components(self) -> None: peer_manager=self.peer_manager, block_cache=self.block_cache, network=self.network, + is_known_root=lambda root: root in self.store.blocks, + get_finalized_slot=lambda: self.store.latest_finalized.slot, ) # HeadSync processes incoming gossip blocks and coordinates backfill. diff --git a/tests/lean_spec/helpers/mocks.py b/tests/lean_spec/helpers/mocks.py index 8b30fa22a..ccd26a791 100644 --- a/tests/lean_spec/helpers/mocks.py +++ b/tests/lean_spec/helpers/mocks.py @@ -18,7 +18,7 @@ from lean_spec.subspecs.networking import PeerId from lean_spec.subspecs.networking.service.events import NetworkEvent from lean_spec.subspecs.ssz.hash import hash_tree_root -from lean_spec.types import Bytes32 +from lean_spec.types import Bytes32, Uint64 class MockNetworkRequester: @@ -27,7 +27,8 @@ class MockNetworkRequester: def __init__(self) -> None: """Initialize with empty block store and request log.""" self.blocks_by_root: dict[Bytes32, SignedBlock] = {} - self.request_log: list[tuple[PeerId, list[Bytes32]]] = [] + self.blocks_by_slot: dict[Slot, SignedBlock] = {} + self.request_log: list[tuple[PeerId, list[Bytes32] | tuple[Slot, Uint64]]] = [] self.should_fail: bool = False async def request_blocks_by_root( @@ -49,10 +50,30 @@ async def request_block_by_root( """Return a single block by root.""" return self.blocks_by_root.get(root) - def add_block(self, block: SignedBlock) -> Bytes32: + async def request_blocks_by_range( + self, + peer_id: PeerId, + start_slot: Slot, + count: Uint64, + ) -> list[SignedBlock]: + """Return blocks for requested slot range.""" + self.request_log.append((peer_id, (start_slot, count))) + if self.should_fail: + raise ConnectionError("Network failed") + + blocks: list[SignedBlock] = [] + for i in range(int(count)): + slot = Slot(int(start_slot) + i) + if slot in self.blocks_by_slot: + blocks.append(self.blocks_by_slot[slot]) + return blocks + + def add_block(self, block: SignedBlock, root: Bytes32 | None = None) -> Bytes32: """Add a block to the mock network. Returns its root.""" - root = hash_tree_root(block.block) + if root is None: + root = hash_tree_root(block.block) self.blocks_by_root[root] = block + self.blocks_by_slot[block.block.slot] = block return root diff --git a/tests/lean_spec/subspecs/networking/reqresp/test_handler.py b/tests/lean_spec/subspecs/networking/reqresp/test_handler.py index 538e42e87..046794325 100644 --- a/tests/lean_spec/subspecs/networking/reqresp/test_handler.py +++ b/tests/lean_spec/subspecs/networking/reqresp/test_handler.py @@ -5,9 +5,11 @@ import asyncio from dataclasses import dataclass, field +import pytest + from lean_spec.subspecs.containers import Checkpoint, SignedBlock from lean_spec.subspecs.containers.slot import Slot -from lean_spec.subspecs.networking.config import MAX_ERROR_MESSAGE_SIZE +from lean_spec.subspecs.networking.config import MAX_ERROR_MESSAGE_SIZE, MAX_REQUEST_BLOCKS from lean_spec.subspecs.networking.reqresp.codec import ( ResponseCode, encode_request, @@ -19,15 +21,19 @@ StreamResponseAdapter, ) from lean_spec.subspecs.networking.reqresp.message import ( + BLOCKS_BY_RANGE_PROTOCOL_V1, BLOCKS_BY_ROOT_PROTOCOL_V1, STATUS_PROTOCOL_V1, + BlocksByRangeRequest, BlocksByRootRequest, RequestedBlockRoots, Status, ) from lean_spec.subspecs.networking.types import ProtocolId from lean_spec.subspecs.networking.varint import encode_varint -from lean_spec.types import Bytes32 +from lean_spec.subspecs.ssz import hash_tree_root +from lean_spec.types import Bytes32, Uint64 +from lean_spec.types.exceptions import SSZSerializationError from tests.lean_spec.helpers import make_test_block, make_test_status @@ -1193,3 +1199,443 @@ async def test_read_request_rejects_oversized_compressed_data(self) -> None: code, _ = ResponseCode.decode(stream.written[0]) assert code == ResponseCode.INVALID_REQUEST + + +class TestBlocksByRangeRequestRoundTrip: + """SSZ encode/decode round-trip tests.""" + + def test_basic_roundtrip(self) -> None: + """Encode then decode yields identical container.""" + req = BlocksByRangeRequest(start_slot=Slot(100), count=Uint64(10)) + encoded = req.encode_bytes() + assert len(encoded) == 16 # 8 (Slot) + 8 (Uint64) + + decoded = BlocksByRangeRequest.decode_bytes(encoded) + assert decoded.start_slot == Slot(100) + assert decoded.count == Uint64(10) + + def test_roundtrip_preserves_equality(self) -> None: + """Decoded request equals original.""" + original = BlocksByRangeRequest(start_slot=Slot(42), count=Uint64(1024)) + decoded = BlocksByRangeRequest.decode_bytes(original.encode_bytes()) + assert original == decoded + + def test_roundtrip_various_values(self) -> None: + """Round-trip works for diverse value combinations.""" + cases = [ + (Slot(0), Uint64(1)), + (Slot(1), Uint64(1)), + (Slot(0), Uint64(MAX_REQUEST_BLOCKS)), + (Slot(999_999), Uint64(512)), + ] + for start_slot, count in cases: + req = BlocksByRangeRequest(start_slot=start_slot, count=count) + decoded = BlocksByRangeRequest.decode_bytes(req.encode_bytes()) + assert decoded.start_slot == start_slot + assert decoded.count == count + + +class TestBlocksByRangeRequestHashTreeRoot: + """hash_tree_root stability tests.""" + + def test_hash_tree_root_stable_across_reencodings(self) -> None: + """hash_tree_root is identical for equal requests re-encoded.""" + req1 = BlocksByRangeRequest(start_slot=Slot(100), count=Uint64(10)) + req2 = BlocksByRangeRequest(start_slot=Slot(100), count=Uint64(10)) + + root1 = hash_tree_root(req1) + root2 = hash_tree_root(req2) + assert root1 == root2 + + def test_hash_tree_root_after_decode(self) -> None: + """hash_tree_root matches between original and decoded copy.""" + original = BlocksByRangeRequest(start_slot=Slot(500), count=Uint64(64)) + decoded = BlocksByRangeRequest.decode_bytes(original.encode_bytes()) + + assert hash_tree_root(original) == hash_tree_root(decoded) + + def test_hash_tree_root_differs_for_different_values(self) -> None: + """Different requests produce different roots.""" + req_a = BlocksByRangeRequest(start_slot=Slot(0), count=Uint64(10)) + req_b = BlocksByRangeRequest(start_slot=Slot(1), count=Uint64(10)) + req_c = BlocksByRangeRequest(start_slot=Slot(0), count=Uint64(11)) + + assert hash_tree_root(req_a) != hash_tree_root(req_b) + assert hash_tree_root(req_a) != hash_tree_root(req_c) + + +class TestBlocksByRangeRequestBoundaryValues: + """Boundary and edge-case value tests.""" + + def test_start_slot_zero_decodes_cleanly(self) -> None: + """start_slot at Slot(0) encodes and decodes correctly.""" + req = BlocksByRangeRequest(start_slot=Slot(0), count=Uint64(1)) + decoded = BlocksByRangeRequest.decode_bytes(req.encode_bytes()) + assert decoded.start_slot == Slot(0) + + def test_large_start_slot_decodes_cleanly(self) -> None: + """Large start_slot values encode and decode correctly.""" + large_slot = Slot(2**63 - 1) # Very large but valid + req = BlocksByRangeRequest(start_slot=large_slot, count=Uint64(1)) + decoded = BlocksByRangeRequest.decode_bytes(req.encode_bytes()) + assert decoded.start_slot == large_slot + + def test_max_uint64_start_slot_decodes_cleanly(self) -> None: + """start_slot at Uint64 max boundary decodes cleanly.""" + max_slot = Slot(2**64 - 1) + req = BlocksByRangeRequest(start_slot=max_slot, count=Uint64(1)) + decoded = BlocksByRangeRequest.decode_bytes(req.encode_bytes()) + assert decoded.start_slot == max_slot + + def test_count_one(self) -> None: + """Minimum meaningful count (1) round-trips correctly.""" + req = BlocksByRangeRequest(start_slot=Slot(0), count=Uint64(1)) + decoded = BlocksByRangeRequest.decode_bytes(req.encode_bytes()) + assert decoded.count == Uint64(1) + + def test_count_max_request_blocks(self) -> None: + """count == MAX_REQUEST_BLOCKS round-trips correctly.""" + req = BlocksByRangeRequest(start_slot=Slot(0), count=Uint64(MAX_REQUEST_BLOCKS)) + decoded = BlocksByRangeRequest.decode_bytes(req.encode_bytes()) + assert decoded.count == Uint64(MAX_REQUEST_BLOCKS) + + def test_count_zero_encodes_and_decodes(self) -> None: + """count == 0 encodes/decodes at SSZ layer (validation is handler-level).""" + req = BlocksByRangeRequest(start_slot=Slot(0), count=Uint64(0)) + decoded = BlocksByRangeRequest.decode_bytes(req.encode_bytes()) + assert decoded.count == Uint64(0) + + def test_count_above_max_encodes_and_decodes(self) -> None: + """count > MAX_REQUEST_BLOCKS encodes/decodes at SSZ layer. + + Enforcement of MAX_REQUEST_BLOCKS is the handler's job, not SSZ's. + """ + req = BlocksByRangeRequest(start_slot=Slot(0), count=Uint64(MAX_REQUEST_BLOCKS + 1)) + decoded = BlocksByRangeRequest.decode_bytes(req.encode_bytes()) + assert decoded.count == Uint64(MAX_REQUEST_BLOCKS + 1) + + +class TestBlocksByRangeRequestMalformedPayloads: + """Truncated and oversized payload rejection tests.""" + + def test_truncated_payload_rejected(self) -> None: + """Payload shorter than 16 bytes is rejected.""" + truncated = b"\x01" * 15 # 15 bytes, need 16 + with pytest.raises(SSZSerializationError): + BlocksByRangeRequest.decode_bytes(truncated) + + def test_empty_payload_rejected(self) -> None: + """Zero-length payload is rejected.""" + with pytest.raises(SSZSerializationError): + BlocksByRangeRequest.decode_bytes(b"") + + def test_single_byte_rejected(self) -> None: + """Single byte payload is rejected.""" + with pytest.raises(SSZSerializationError): + BlocksByRangeRequest.decode_bytes(b"\x00") + + def test_eight_byte_payload_rejected(self) -> None: + """8 bytes (half-payload, single field) is rejected.""" + partial = (100).to_bytes(8, "little") + with pytest.raises(SSZSerializationError): + BlocksByRangeRequest.decode_bytes(partial) + + +class TestBlocksByRangeProtocolId: + """Protocol ID format tests.""" + + def test_protocol_id_format(self) -> None: + """Protocol ID follows lean namespace convention.""" + assert BLOCKS_BY_RANGE_PROTOCOL_V1.startswith("/leanconsensus/req/") + assert BLOCKS_BY_RANGE_PROTOCOL_V1.endswith("/ssz_snappy") + assert "blocks_by_range" in BLOCKS_BY_RANGE_PROTOCOL_V1 + + def test_protocol_id_version_is_v1(self) -> None: + """Protocol ID is version 1.""" + assert "/1/" in BLOCKS_BY_RANGE_PROTOCOL_V1 + + def test_protocol_id_distinct_from_blocks_by_root(self) -> None: + """BlocksByRange and BlocksByRoot have distinct protocol IDs.""" + from lean_spec.subspecs.networking.reqresp.message import ( + BLOCKS_BY_ROOT_PROTOCOL_V1, + ) + + assert BLOCKS_BY_RANGE_PROTOCOL_V1 != BLOCKS_BY_ROOT_PROTOCOL_V1 + + +class TestRequestHandlerBlocksByRange: + """Tests for RequestHandler.handle_blocks_by_range.""" + + async def test_returns_exactly_count_consecutive_blocks(self) -> None: + """Returns exactly `count` consecutive blocks from start_slot when all retained.""" + blocks_db: dict[int, SignedBlock] = {} + for i in range(5): + blocks_db[4000 + i] = make_test_block(slot=4000 + i, seed=40 + i) + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return blocks_db.get(int(slot)) + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(5)) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + assert len(response.errors) == 0 + assert len(response.successes) == 5 + + for i, ssz_data in enumerate(response.successes): + decoded = SignedBlock.decode_bytes(ssz_data) + assert decoded.block.slot == Slot(4000 + i) + + async def test_returns_fewer_when_range_overruns_head(self) -> None: + """Returns fewer than count when range overruns head, no error.""" + blocks_db: dict[int, SignedBlock] = {} + for i in range(3): + blocks_db[4000 + i] = make_test_block(slot=4000 + i, seed=40 + i) + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return blocks_db.get(int(slot)) + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + # Request 10 blocks but only 3 exist + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(10)) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + assert len(response.errors) == 0 + assert len(response.successes) == 3 + + async def test_skips_empty_slots_preserves_monotonicity(self) -> None: + """Skips empty slots, preserves slot monotonicity.""" + # Blocks at slots 4000, 4002, 4004 (4001 and 4003 are empty) + blocks_db: dict[int, SignedBlock] = { + 4000: make_test_block(slot=4000, seed=40), + 4002: make_test_block(slot=4002, seed=42), + 4004: make_test_block(slot=4004, seed=44), + } + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return blocks_db.get(int(slot)) + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(5)) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + assert len(response.errors) == 0 + assert len(response.successes) == 3 + + slots = [SignedBlock.decode_bytes(s).block.slot for s in response.successes] + assert slots == [Slot(4000), Slot(4002), Slot(4004)] + # Verify monotonicity + for i in range(1, len(slots)): + assert slots[i] > slots[i - 1] + + async def test_resource_unavailable_when_start_slot_predates_history(self) -> None: + """RESOURCE_UNAVAILABLE when start_slot predates retained history window.""" + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return None + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + # MIN_BLOCK_REQUESTS_HISTORY_SLOT is 3600, request below that + request = BlocksByRangeRequest(start_slot=Slot(0), count=Uint64(10)) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + assert len(response.successes) == 0 + assert len(response.errors) == 1 + assert response.errors[0][0] == ResponseCode.RESOURCE_UNAVAILABLE + + async def test_invalid_request_on_count_zero(self) -> None: + """INVALID_REQUEST when count == 0.""" + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return None + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(0)) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + assert len(response.successes) == 0 + assert len(response.errors) == 1 + assert response.errors[0][0] == ResponseCode.INVALID_REQUEST + + async def test_invalid_request_on_count_exceeds_max(self) -> None: + """INVALID_REQUEST when count > MAX_REQUEST_BLOCKS.""" + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return None + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + request = BlocksByRangeRequest( + start_slot=Slot(4000), + count=Uint64(MAX_REQUEST_BLOCKS + 1), + ) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + assert len(response.successes) == 0 + assert len(response.errors) == 1 + assert response.errors[0][0] == ResponseCode.INVALID_REQUEST + + async def test_count_at_max_boundary_succeeds(self) -> None: + """count == MAX_REQUEST_BLOCKS is valid (boundary case).""" + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return None # No blocks, but request is valid + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + request = BlocksByRangeRequest( + start_slot=Slot(4000), + count=Uint64(MAX_REQUEST_BLOCKS), + ) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + # No INVALID_REQUEST error; empty response is fine (no blocks) + assert len(response.errors) == 0 + assert len(response.successes) == 0 + + async def test_no_block_by_slot_lookup_returns_error(self) -> None: + """SERVER_ERROR when no block_by_slot_lookup callback is configured.""" + handler = RequestHandler() # No lookup set + response = MockResponseStream() + + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(5)) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + assert len(response.successes) == 0 + assert len(response.errors) == 1 + assert response.errors[0][0] == ResponseCode.SERVER_ERROR + assert "not available" in response.errors[0][1] + + async def test_lookup_error_continues(self) -> None: + """Lookup exceptions are caught and processing continues.""" + block_at_4001 = make_test_block(slot=4001, seed=41) + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + if int(slot) == 4000: + raise RuntimeError("Database error") + if int(slot) == 4001: + return block_at_4001 + return None + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(3)) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + # Slot 4000 errors, slot 4001 succeeds, slot 4002 returns None + assert len(response.errors) == 0 + assert len(response.successes) == 1 + decoded = SignedBlock.decode_bytes(response.successes[0]) + assert decoded.block.slot == Slot(4001) + + async def test_empty_range_returns_no_blocks(self) -> None: + """Range with no blocks at all returns empty (no error).""" + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return None + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + response = MockResponseStream() + + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(5)) + await handler.handle_blocks_by_range(request, response) # type: ignore[arg-type] + + assert len(response.errors) == 0 + assert len(response.successes) == 0 + + +class TestReqRespServerBlocksByRange: + """Full ReqRespServer integration tests for BlocksByRange.""" + + async def test_handle_blocks_by_range_request(self) -> None: + """Full BlocksByRange request/response flow through ReqRespServer.""" + block = make_test_block(slot=4000, seed=40) + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + if int(slot) == 4000: + return block + return None + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + server = ReqRespServer(handler=handler) + + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(1)) + request_bytes = encode_request(request.encode_bytes()) + + stream = MockStream(request_data=request_bytes) + await server.handle_stream(stream, BLOCKS_BY_RANGE_PROTOCOL_V1) + + assert stream.closed is True + assert len(stream.written) >= 1 + + code, ssz_data = ResponseCode.decode(stream.written[0]) + assert code == ResponseCode.SUCCESS + + returned_block = SignedBlock.decode_bytes(ssz_data) + assert returned_block.block.slot == Slot(4000) + + async def test_invalid_ssz_returns_invalid_request(self) -> None: + """Invalid SSZ for BlocksByRange returns INVALID_REQUEST.""" + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return None + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + server = ReqRespServer(handler=handler) + + invalid_ssz = b"\xff" * 10 + request_bytes = encode_request(invalid_ssz) + stream = MockStream(request_data=request_bytes) + + await server.handle_stream(stream, BLOCKS_BY_RANGE_PROTOCOL_V1) + + assert stream.closed is True + assert len(stream.written) >= 1 + + code, _ = ResponseCode.decode(stream.written[0]) + assert code == ResponseCode.INVALID_REQUEST + + async def test_protocol_id_in_reqresp_set(self) -> None: + """BlocksByRange protocol ID is in REQRESP_PROTOCOL_IDS.""" + assert BLOCKS_BY_RANGE_PROTOCOL_V1 in REQRESP_PROTOCOL_IDS + + async def test_roundtrip_blocks_by_range_multiple(self) -> None: + """Full encode -> server -> decode roundtrip for multiple blocks.""" + blocks_db: dict[int, SignedBlock] = {} + for i in range(3): + blocks_db[4000 + i] = make_test_block(slot=4000 + i, seed=40 + i) + + async def slot_lookup(slot: Slot) -> SignedBlock | None: + return blocks_db.get(int(slot)) + + handler = RequestHandler(block_by_slot_lookup=slot_lookup) + server = ReqRespServer(handler=handler) + + request = BlocksByRangeRequest(start_slot=Slot(4000), count=Uint64(3)) + request_wire = encode_request(request.encode_bytes()) + stream = MockStream(request_data=request_wire) + + await server.handle_stream(stream, BLOCKS_BY_RANGE_PROTOCOL_V1) + + blocks = [] + for response_wire in stream.written: + code, ssz_bytes = ResponseCode.decode(response_wire) + if code == ResponseCode.SUCCESS: + blocks.append(SignedBlock.decode_bytes(ssz_bytes)) + + assert len(blocks) == 3 + for i, block in enumerate(blocks): + assert block.block.slot == Slot(4000 + i) diff --git a/tests/lean_spec/subspecs/sync/test_backfill_sync.py b/tests/lean_spec/subspecs/sync/test_backfill_sync.py index aec7fcb66..9826448c9 100644 --- a/tests/lean_spec/subspecs/sync/test_backfill_sync.py +++ b/tests/lean_spec/subspecs/sync/test_backfill_sync.py @@ -2,6 +2,8 @@ from __future__ import annotations +from unittest.mock import MagicMock + import pytest from lean_spec.subspecs.containers.slot import Slot @@ -18,7 +20,7 @@ PeerManager, SyncPeer, ) -from lean_spec.types import Bytes32 +from lean_spec.types import Bytes32, Uint64 from tests.lean_spec.helpers import MockNetworkRequester, make_signed_block @@ -29,7 +31,20 @@ def network() -> MockNetworkRequester: @pytest.fixture -def backfill_system(peer_id: PeerId, network: MockNetworkRequester) -> BackfillSync: +def store() -> MagicMock: + """Provide a mock store.""" + store = MagicMock() + store.blocks = {} + store.latest_finalized = MagicMock() + store.latest_finalized.slot = Slot(0) + store.latest_finalized.root = Bytes32.zero() + return store + + +@pytest.fixture +def backfill_system( + peer_id: PeerId, network: MockNetworkRequester, store: MagicMock +) -> BackfillSync: """Provide a complete BackfillSync with connected peer.""" manager = PeerManager() manager.add_peer(PeerInfo(peer_id=peer_id, state=ConnectionState.CONNECTED)) @@ -37,6 +52,8 @@ def backfill_system(peer_id: PeerId, network: MockNetworkRequester) -> BackfillS peer_manager=manager, block_cache=BlockCache(), network=network, + is_known_root=lambda root: root in store.blocks, + get_finalized_slot=lambda: store.latest_finalized.slot, ) @@ -79,6 +96,9 @@ async def test_recursive_parent_chain_resolution( peer_id: PeerId, ) -> None: """Backfill recursively fetches missing parents up the chain.""" + # Disable gap detection to test pure recursion. + backfill_system.get_finalized_slot = None + grandparent = make_signed_block( slot=Slot(1), proposer_index=ValidatorIndex(0), @@ -388,3 +408,114 @@ async def test_retry_after_failure_clears_pending( network.should_fail = False await backfill.fill_missing([root]) assert root in backfill.block_cache + + +class TestBackfillOptimizations: + """Tests for range sync and store awareness in BackfillSync.""" + + async def test_store_awareness_skips_known_parents( + self, + backfill_system: BackfillSync, + network: MockNetworkRequester, + store: MagicMock, + peer_id: PeerId, + ) -> None: + """Backfill does not request parents that are already in the Store.""" + # Parent is in the store. + parent_root = Bytes32(b"\x01" * 32) + store.blocks[parent_root] = MagicMock() + + # Child is received. + child = make_signed_block( + slot=Slot(10), + parent_root=parent_root, + proposer_index=ValidatorIndex(0), + state_root=Bytes32.zero(), + ) + child_root = network.add_block(child) + + await backfill_system.fill_missing([child_root]) + + # Verify child was added to cache. + assert child_root in backfill_system.block_cache + + # Verify NO request was made for parent (since it's in Store). + # The request_log should only contain the initial request for the child. + assert len(network.request_log) == 1 + assert network.request_log[0][1] == [child_root] + + async def test_range_sync_triggered_by_large_gap_during_backfill( + self, + backfill_system: BackfillSync, + network: MockNetworkRequester, + store: MagicMock, + peer_id: PeerId, + ) -> None: + """Backfill triggers range sync when a large gap is detected.""" + # Store is at slot 0. + store.latest_finalized.slot = Slot(0) + + # Pre-fill the parent in the network at slot 50. + block_50 = make_signed_block( + slot=Slot(50), + parent_root=Bytes32.zero(), + proposer_index=ValidatorIndex(0), + state_root=Bytes32.zero(), + ) + parent_root = network.add_block(block_50) + + # Receive a block at slot 100 via fill_missing. + block_100 = make_signed_block( + slot=Slot(100), + parent_root=parent_root, + proposer_index=ValidatorIndex(0), + state_root=Bytes32.zero(), + ) + root_100 = network.add_block(block_100) + + # Parent of block_50 is in store. + store.blocks[Bytes32.zero()] = MagicMock() + + await backfill_system.fill_missing([root_100]) + + # Log should contain: + # 1. BlocksByRoot(root_100) + # 2. BlocksByRange(1, 99) + assert len(network.request_log) == 2 + assert network.request_log[0][1] == [root_100] + assert network.request_log[1][1] == (Slot(1), Uint64(99)) + + async def test_range_deduplication( + self, + backfill_system: BackfillSync, + network: MockNetworkRequester, + ) -> None: + """Multiple overlapping range requests are deduplicated.""" + # Request range 1-10. + await backfill_system.fill_range(start_slot=Slot(1), count=Uint64(10)) + assert backfill_system._max_range_slot == Slot(10) + assert len(network.request_log) == 1 + assert network.request_log[0][1] == (Slot(1), Uint64(10)) + + # Request range 5-15. + await backfill_system.fill_range(start_slot=Slot(5), count=Uint64(11)) + + # Should only request 11-15 (count=5). + assert len(network.request_log) == 2 + assert network.request_log[1][1] == (Slot(11), Uint64(5)) + assert backfill_system._max_range_slot == Slot(15) + + async def test_full_range_skip_if_already_covered( + self, + backfill_system: BackfillSync, + network: MockNetworkRequester, + ) -> None: + """Range requests fully covered by previous ones are skipped entirely.""" + await backfill_system.fill_range(start_slot=Slot(1), count=Uint64(100)) + assert len(network.request_log) == 1 + + # Request a sub-range. + await backfill_system.fill_range(start_slot=Slot(10), count=Uint64(20)) + + # No new request should be made. + assert len(network.request_log) == 1