diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a39e4d6..de7f8ec 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,23 +1,39 @@
name: CI
-on:
+
+# NOTE: keep UV_VERSION in sync with uv-version in s2-lite-integration-tests sdks JSON below.
+env:
+ UV_VERSION: "0.11.3"
+
+on:
pull_request:
- types: [opened, edited, synchronize, labeled, unlabeled, ready_for_review, reopened]
+ types:
+ [
+ opened,
+ edited,
+ synchronize,
+ labeled,
+ unlabeled,
+ ready_for_review,
+ reopened,
+ ]
jobs:
- ci:
- name: CI
+ local-checks:
+ name: Local Checks (code quality, unit tests, docs build, PR title)
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install uv
- uses: astral-sh/setup-uv@v6
+ uses: astral-sh/setup-uv@v7
with:
- version: "0.8.2"
+ version: ${{ env.UV_VERSION }}
- name: Sync dependencies
run: |
uv sync --all-groups
- name: Static code check
run: uv run poe ci_checker
+ - name: Unit tests
+ run: uv run pytest tests/ -v -m 'not (account or basin or stream or metrics)'
- name: Check docs build
working-directory: ./docs
run: |
@@ -26,20 +42,61 @@ jobs:
uses: actions/github-script@v7
with:
script: |
- const title = context.payload.pull_request.title;
- const labels = context.payload.pull_request.labels.map(l => l.name);
- if (labels.includes('dev')) {
- const regex = /^(?!feat|fix|refactor|docs|perf|style|test|chore|revert)[a-z].*$/;
- if (!regex.test(title)) {
- core.setFailed(
- `PR title "${title}" does not match the commit format for non-user-facing changes`
- );
- }
- } else {
- const regex = /^(feat|fix|refactor|docs|perf|style|test|chore|revert)!?:[ ][a-z].*$/;
- if (!regex.test(title)) {
- core.setFailed(
- `PR title "${title}" does not match the expected conventional commit format for user-facing changes`
- );
- }
+ const title = context.payload.pull_request.title;
+ const labels = context.payload.pull_request.labels.map(l => l.name);
+ if (labels.includes('dev')) {
+ const regex = /^(?!feat|fix|refactor|docs|perf|style|test|chore|revert)[a-z].*$/;
+ if (!regex.test(title)) {
+ core.setFailed(
+ `PR title "${title}" does not match the commit format for non-user-facing changes`
+ );
}
+ } else {
+ const regex = /^(feat|fix|refactor|docs|perf|style|test|chore|revert)!?:[ ][a-z].*$/;
+ if (!regex.test(title)) {
+ core.setFailed(
+ `PR title "${title}" does not match the expected conventional commit format for user-facing changes`
+ );
+ }
+ }
+
+ s2-cloud-integration-tests:
+ name: s2-cloud integration tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ - name: Install uv
+ uses: astral-sh/setup-uv@v7
+ with:
+ version: ${{ env.UV_VERSION }}
+ - name: Sync dependencies
+ run: uv sync --group test
+ - name: Run integration tests
+ env:
+ S2_ACCESS_TOKEN: ${{ secrets.S2_ACCESS_TOKEN }}
+ run: uv run pytest tests/ -v -s -m 'account or basin or stream or metrics'
+
+ build-s2-lite:
+ name: Build s2-lite
+ uses: s2-streamstore/s2/.github/workflows/build-s2-lite.yml@main
+
+ s2-lite-integration-tests:
+ name: s2-lite integration tests
+ needs: build-s2-lite
+ uses: s2-streamstore/s2/.github/workflows/sdk-tests.yml@main
+ with:
+ server-binary: server
+ server-args: "--port 8080"
+ server-port: 8080
+ sdks: |
+ [
+ {
+ "name": "python",
+ "repo": "${{ github.repository }}",
+ "ref": "${{ github.ref }}",
+ "lang": "python",
+ "uv-version": "0.11.3",
+ "test_cmd": "uv run pytest tests/ -v -s -m '(account or basin or stream) and not cloud_only'"
+ }
+ ]
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 8f39790..4cf0c3b 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -1,4 +1,4 @@
-name: Release streamstore package
+name: Release s2-sdk package
on:
push:
tags: ["[0-9]+.[0-9]+.[0-9]+*"]
diff --git a/README.md b/README.md
index 57eff36..83ee342 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,10 @@
-# streamstore
+# s2-sdk
-`streamstore` is the Python package that provides an async client for interacting with [s2.dev](https://s2.dev/).
+`s2_sdk` is the Python package that provides an async client for interacting with [s2.dev](https://s2.dev/).
## Project links
-- [PyPI](https://pypi.org/project/streamstore/)
-- [Documentation](https://streamstore.readthedocs.io/)
+- [PyPI](https://pypi.org/project/s2-sdk/)
+- [Documentation](https://s2-sdk.readthedocs.io/)
- [GitHub](https://github.com/s2-streamstore/s2-sdk-python)
## Requirements
@@ -26,10 +26,10 @@ Python >= 3.11
## Installation
-You can install the package from the [Python Package Index](https://pypi.org/project/streamstore) using the package manager of your choice. E.g., with `pip`:
+You can install the package from the [Python Package Index](https://pypi.org/project/s2-sdk) using the package manager of your choice. E.g., with `pip`:
```bash
-pip install streamstore
+pip install s2-sdk
```
## Examples
diff --git a/docs/source/api-reference.md b/docs/source/api-reference.md
index a81775c..569a06c 100644
--- a/docs/source/api-reference.md
+++ b/docs/source/api-reference.md
@@ -1,36 +1,183 @@
# API Reference
```{eval-rst}
-.. module:: streamstore
+.. module:: s2_sdk
.. autoclass:: S2
:members:
- :member-order: bysource
-.. autoclass:: Basin()
+
+.. autoclass:: S2Basin()
+ :members:
+
+
+.. autoclass:: S2Stream()
+ :members:
+
+
+.. autoclass:: AppendSession()
+ :members:
+
+
+.. autoclass:: BatchSubmitTicket()
+ :members:
+
+.. autoclass:: Producer()
+ :members:
+
+
+.. autoclass:: RecordSubmitTicket()
+ :members:
+
+.. autoclass:: Endpoints
+ :members:
+
+
+.. autoclass:: Timeout(request: timedelta = timedelta(seconds=5), connection: timedelta = timedelta(seconds=3))
+ :members:
+
+.. autoclass:: Retry(max_attempts: int = 3, min_base_delay: timedelta = timedelta(milliseconds=100), max_base_delay: timedelta = timedelta(seconds=1), append_retry_policy: AppendRetryPolicy = AppendRetryPolicy.ALL)
+ :members:
+
+.. autoclass:: Batching(max_records: int = 1000, max_bytes: int = 1048576, linger: timedelta = timedelta(milliseconds=5))
+ :members:
+
+.. autoclass:: Record(body: bytes, headers: list[tuple[bytes, bytes]] = [], timestamp: int | None = None)
+ :members:
+
+.. autoclass:: AppendInput
+ :members:
+
+.. autoclass:: AppendAck()
+ :members:
+
+.. autoclass:: IndexedAppendAck()
+ :members:
+
+.. autoclass:: StreamPosition()
+ :members:
+
+.. autoclass:: ReadLimit
+ :members:
+
+.. autoclass:: ReadBatch()
+ :members:
+
+.. autoclass:: SequencedRecord()
+ :members:
+
+.. autoclass:: SeqNum
+ :members:
+
+.. autoclass:: Timestamp
+ :members:
+
+.. autoclass:: TailOffset
+ :members:
+
+.. autoclass:: Page()
+ :members:
+
+.. autoclass:: CommandRecord()
+ :members:
+
+
+.. autofunction:: metered_bytes
+
+.. autofunction:: append_record_batches
+
+.. autofunction:: append_inputs
+
+.. autoenum:: Compression
+
+.. autoenum:: AppendRetryPolicy
+
+.. autoenum:: StorageClass
+
+.. autoenum:: TimestampingMode
+
+.. autoclass:: Timestamping
+ :members:
+
+.. autoclass:: StreamConfig
+ :members:
+
+.. autoclass:: BasinConfig
+ :members:
+
+.. autoenum:: BasinScope
+
+.. autoclass:: BasinInfo()
+ :members:
+
+.. autoclass:: StreamInfo()
+ :members:
+
+.. autoclass:: ExactMatch
+ :members:
+
+.. autoclass:: PrefixMatch
+ :members:
+
+.. autoenum:: Permission
+
+.. autoenum:: Operation
+
+.. autoclass:: OperationGroupPermissions
+ :members:
+
+.. autoclass:: AccessTokenScope(basins: ExactMatch | PrefixMatch | None = None, streams: ExactMatch | PrefixMatch | None = None, access_tokens: ExactMatch | PrefixMatch | None = None, op_groups: OperationGroupPermissions | None = None, ops: list[Operation] = [])
+ :members:
+
+.. autoclass:: AccessTokenInfo()
+ :members:
+
+.. autoenum:: MetricUnit
+
+.. autoenum:: TimeseriesInterval
+
+.. autoenum:: AccountMetricSet
+
+.. autoenum:: BasinMetricSet
+
+.. autoenum:: StreamMetricSet
+
+.. autoclass:: Scalar()
+ :members:
+
+.. autoclass:: Accumulation()
+ :members:
+
+.. autoclass:: Gauge()
+ :members:
+
+.. autoclass:: Label()
+ :members:
+
+.. autoclass:: S2Error()
+ :members:
+
+.. autoclass:: S2ClientError()
:members:
- :member-order: bysource
+ :show-inheritance:
-.. autoclass:: Stream()
+.. autoclass:: S2ServerError()
:members:
- :member-order: bysource
+ :show-inheritance:
-.. module:: streamstore.schemas
- :no-index:
-.. autoclass:: Record(body: bytes, headers: list[tuple[bytes, bytes]] = [])
+.. autoclass:: AppendConditionError()
:members:
+ :show-inheritance:
-.. automodule:: streamstore.schemas
+.. autoclass:: FencingTokenMismatchError()
:members:
- :exclude-members: Record, Endpoints
- :member-order: bysource
+ :show-inheritance:
-.. module:: streamstore.schemas
- :no-index:
-.. autoclass:: Endpoints()
+.. autoclass:: SeqNumMismatchError()
:members:
+ :show-inheritance:
-.. automodule:: streamstore.utils
+.. autoclass:: ReadUnwrittenError()
:members:
- :member-order: bysource
+ :show-inheritance:
```
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 7bb8cbb..112a68b 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -8,9 +8,9 @@
from datetime import date
-project = "streamstore"
+project = "s2-sdk"
copyright = f"{date.today().year}, Bandar Systems Inc"
-release = "5.0.0"
+release = "0.1.0"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
@@ -28,6 +28,11 @@
templates_path = ["_templates"]
exclude_patterns = []
+
+autodoc_member_order = "bysource"
+autodoc_typehints_format = "short"
+python_use_unqualified_type_names = True
+
intersphinx_mapping = {"python": ("https://docs.python.org/3", None)}
# -- Options for HTML output -------------------------------------------------
diff --git a/examples/append_session.py b/examples/append_session.py
index 4ce878a..37451bf 100644
--- a/examples/append_session.py
+++ b/examples/append_session.py
@@ -1,36 +1,31 @@
import asyncio
import os
import random
-from typing import AsyncIterable
-from streamstore import S2
-from streamstore.schemas import AppendInput, Record
+from s2_sdk import S2, AppendInput, Record
ACCESS_TOKEN = os.getenv("S2_ACCESS_TOKEN")
MY_BASIN = os.getenv("MY_BASIN")
MY_STREAM = os.getenv("MY_STREAM")
-async def append_inputs_gen() -> AsyncIterable[AppendInput]:
- num_inputs = random.randint(1, 100)
- for _ in range(num_inputs):
- num_records = random.randint(1, 100)
- records = []
- for _ in range(num_records):
- body_size = random.randint(1, 1024)
- records.append(Record(body=os.urandom(body_size)))
- input = AppendInput(records)
- if random.random() < 0.5:
- await asyncio.sleep(random.random() * 2.5)
- yield input
-
-
async def producer():
- async with S2(access_token=ACCESS_TOKEN) as s2:
+ async with S2(ACCESS_TOKEN) as s2:
stream = s2[MY_BASIN][MY_STREAM]
- async for output in stream.append_session(append_inputs_gen()):
- num_appended_records = output.end_seq_num - output.start_seq_num
- print(f"appended {num_appended_records} records")
+ async with stream.append_session() as session:
+ num_inputs = random.randint(1, 100)
+ for _ in range(num_inputs):
+ num_records = random.randint(1, 100)
+ records = []
+ for _ in range(num_records):
+ body_size = random.randint(1, 1024)
+ records.append(Record(body=os.urandom(body_size)))
+ ticket = await session.submit(AppendInput(records))
+ ack = await ticket
+ num_appended_records = ack.end.seq_num - ack.start.seq_num
+ print(f"appended {num_appended_records} records")
+ if random.random() < 0.5:
+ await asyncio.sleep(random.random() * 2.5)
if __name__ == "__main__":
diff --git a/examples/append_session_with_auto_batching.py b/examples/append_session_with_auto_batching.py
index cc0f501..d3988d7 100644
--- a/examples/append_session_with_auto_batching.py
+++ b/examples/append_session_with_auto_batching.py
@@ -4,9 +4,7 @@
from datetime import timedelta
from typing import AsyncIterable
-from streamstore import S2
-from streamstore.schemas import Record
-from streamstore.utils import append_inputs_gen
+from s2_sdk import S2, Batching, Record, append_inputs
ACCESS_TOKEN = os.getenv("S2_ACCESS_TOKEN")
MY_BASIN = os.getenv("MY_BASIN")
@@ -23,17 +21,20 @@ async def records_gen() -> AsyncIterable[Record]:
async def producer():
- async with S2(access_token=ACCESS_TOKEN) as s2:
+ async with S2(ACCESS_TOKEN) as s2:
stream = s2[MY_BASIN][MY_STREAM]
- async for output in stream.append_session(
- append_inputs_gen(
+ async with stream.append_session() as session:
+ async for batch in append_inputs(
records=records_gen(),
- max_records_per_batch=10,
- max_linger_per_batch=timedelta(milliseconds=5),
- )
- ):
- num_appended_records = output.end_seq_num - output.start_seq_num
- print(f"appended {num_appended_records} records")
+ batching=Batching(
+ max_records=10,
+ linger=timedelta(milliseconds=5),
+ ),
+ ):
+ ticket = await session.submit(batch)
+ ack = await ticket
+ num_appended_records = ack.end.seq_num - ack.start.seq_num
+ print(f"appended {num_appended_records} records")
if __name__ == "__main__":
diff --git a/examples/producer.py b/examples/producer.py
new file mode 100644
index 0000000..aeefe4b
--- /dev/null
+++ b/examples/producer.py
@@ -0,0 +1,24 @@
+import asyncio
+import os
+import random
+
+from s2_sdk import S2, Record
+
+ACCESS_TOKEN = os.getenv("S2_ACCESS_TOKEN")
+MY_BASIN = os.getenv("MY_BASIN")
+MY_STREAM = os.getenv("MY_STREAM")
+
+
+async def main():
+ async with S2(ACCESS_TOKEN) as s2:
+ stream = s2[MY_BASIN][MY_STREAM]
+ async with stream.producer() as producer:
+ for i in range(100):
+ body_size = random.randint(1, 1024)
+ ticket = await producer.submit(Record(body=os.urandom(body_size)))
+ ack = await ticket
+ print(f"seq_num={ack.seq_num}")
+
+
+if __name__ == "__main__":
+ asyncio.run(main())
diff --git a/examples/read_session.py b/examples/read_session.py
index 981b3c9..c03c1ab 100644
--- a/examples/read_session.py
+++ b/examples/read_session.py
@@ -1,8 +1,7 @@
import asyncio
import os
-from streamstore import S2
-from streamstore.schemas import SeqNum
+from s2_sdk import S2, SeqNum
ACCESS_TOKEN = os.getenv("S2_ACCESS_TOKEN")
MY_BASIN = os.getenv("MY_BASIN")
@@ -10,20 +9,14 @@
async def consumer():
- async with S2(access_token=ACCESS_TOKEN) as s2:
+ async with S2(ACCESS_TOKEN) as s2:
stream = s2[MY_BASIN][MY_STREAM]
tail = await stream.check_tail()
print(f"reading from tail: {tail}")
total_num_records = 0
- async for output in stream.read_session(start=SeqNum(tail.next_seq_num)):
- match output:
- case list(records):
- total_num_records += len(records)
- print(f"read {len(records)} now, {total_num_records} so far")
- case _:
- raise RuntimeError(
- "Records not received, which is unexpected as we start from the tail of the stream"
- )
+ async for batch in stream.read_session(start=SeqNum(tail.seq_num)):
+ total_num_records += len(batch.records)
+ print(f"read {len(batch.records)} now, {total_num_records} so far")
if __name__ == "__main__":
diff --git a/pyproject.toml b/pyproject.toml
index 21c4719..0ca3d4f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,26 +1,29 @@
[project]
-name = "streamstore"
-version = "5.0.0"
+name = "s2-sdk"
+version = "0.1.0"
description = "Python SDK for s2.dev"
readme = "README.md"
license = "MIT"
license-files = ["LICENSE"]
requires-python = ">=3.11"
dependencies = [
- "grpcio-tools>=1.69.0",
- "grpcio>=1.69.0",
- "types-protobuf>=5.29.1.20241207",
- "grpc-stubs>=1.53.0.5",
- "anyio>=4.8.0",
+ "h2>=4.1.0",
+ "protobuf>=5.29.0",
+ "zstandard>=0.23.0",
]
[dependency-groups]
-dev = ["mypy>=1.14.1", "poethepoet>=0.36.0", "ruff>=0.9.1"]
+dev = [
+ "grpcio-tools>=1.69.0",
+ "mypy>=1.14.1",
+ "poethepoet>=0.36.0",
+ "ruff>=0.9.1",
+ "types-protobuf>=5.29.1.20241207",
+]
test = [
"pytest>=8.0.0",
"pytest-asyncio>=0.23.0",
"pytest-timeout>=2.3.0",
- "pytest-xdist>=3.5.0",
]
docs = [
"enum-tools[sphinx]>=0.12.0",
@@ -28,14 +31,20 @@ docs = [
"myst-parser>=4.0.0",
"sphinx==8.1.3",
"sphinx-autodoc-typehints>=3.0.0",
+ "sphinx-autobuild>=2024.10.3",
]
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
+[tool.hatch.build.targets.wheel]
+packages = ["src/s2_sdk"]
+
[tool.mypy]
-files = ["src/", "tests/", "examples/"]
+files = ["src/s2_sdk/", "tests/"]
+exclude = ["src/s2_sdk/_generated/"]
+check_untyped_defs = true
[tool.ruff]
exclude = [
@@ -59,7 +68,10 @@ ci_linter = "uv run ruff check"
ci_formatter = "uv run ruff format --check"
checker = ["linter", "formatter", "type_checker"]
ci_checker = ["ci_linter", "ci_formatter", "type_checker"]
-e2e_tests = "uv run pytest tests/ -v -s"
+docs_check = "uv run --group docs sphinx-build -W -b html docs/source docs/build/html"
+docs_clean = "rm -rf docs/build"
+docs_live.sequence = ["docs_clean", "docs_check", {cmd = "uv run --group docs sphinx-autobuild docs/source docs/build/html --watch src/s2_sdk"}]
+e2e_tests = "uv run pytest tests/ -v -s -m 'account or basin or stream'"
e2e_account_tests = "uv run pytest tests/ -v -s -m account"
e2e_basin_tests = "uv run pytest tests/ -v -s -m basin"
e2e_stream_tests = "uv run pytest tests/ -v -s -m stream"
diff --git a/pytest.ini b/pytest.ini
index 5a18750..b998b45 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -16,3 +16,5 @@ markers =
account: tests for account operations
basin: tests for basin operations
stream: tests for stream operations
+ metrics: tests for metrics operations
+ cloud_only: tests that require s2-cloud (not supported on s2-lite)
diff --git a/s2-specs b/s2-specs
index 8babbd6..7d9167d 160000
--- a/s2-specs
+++ b/s2-specs
@@ -1 +1 @@
-Subproject commit 8babbd68f20829b8ec92bbe4864a3f020782bd5c
+Subproject commit 7d9167d55609c98830278bd00c66f188fdd0099d
diff --git a/src/s2_sdk/__init__.py b/src/s2_sdk/__init__.py
new file mode 100644
index 0000000..34eba20
--- /dev/null
+++ b/src/s2_sdk/__init__.py
@@ -0,0 +1,122 @@
+from s2_sdk._append_session import AppendSession, BatchSubmitTicket
+from s2_sdk._batching import append_inputs, append_record_batches
+from s2_sdk._exceptions import (
+ AppendConditionError,
+ FencingTokenMismatchError,
+ ReadUnwrittenError,
+ S2ClientError,
+ S2Error,
+ S2ServerError,
+ SeqNumMismatchError,
+)
+from s2_sdk._ops import S2, S2Basin, S2Stream
+from s2_sdk._producer import Producer, RecordSubmitTicket
+from s2_sdk._types import (
+ AccessTokenInfo,
+ AccessTokenScope,
+ AccountMetricSet,
+ Accumulation,
+ AppendAck,
+ AppendInput,
+ AppendRetryPolicy,
+ BasinConfig,
+ BasinInfo,
+ BasinMetricSet,
+ BasinScope,
+ Batching,
+ CommandRecord,
+ Compression,
+ Endpoints,
+ ExactMatch,
+ Gauge,
+ IndexedAppendAck,
+ Label,
+ MetricUnit,
+ Operation,
+ OperationGroupPermissions,
+ Page,
+ Permission,
+ PrefixMatch,
+ ReadBatch,
+ ReadLimit,
+ Record,
+ Retry,
+ Scalar,
+ SeqNum,
+ SequencedRecord,
+ StorageClass,
+ StreamConfig,
+ StreamInfo,
+ StreamMetricSet,
+ StreamPosition,
+ TailOffset,
+ Timeout,
+ TimeseriesInterval,
+ Timestamp,
+ Timestamping,
+ TimestampingMode,
+ metered_bytes,
+)
+
+__all__ = [
+ "S2",
+ "S2Basin",
+ "S2Stream",
+ "AppendSession",
+ "BatchSubmitTicket",
+ "Producer",
+ "RecordSubmitTicket",
+ "append_record_batches",
+ "append_inputs",
+ "Retry",
+ "Timeout",
+ "Compression",
+ "AppendRetryPolicy",
+ "Batching",
+ "Endpoints",
+ "Record",
+ "AppendInput",
+ "AppendAck",
+ "IndexedAppendAck",
+ "StreamPosition",
+ "SeqNum",
+ "Timestamp",
+ "TailOffset",
+ "ReadBatch",
+ "ReadLimit",
+ "SequencedRecord",
+ "Page",
+ "CommandRecord",
+ "metered_bytes",
+ "StorageClass",
+ "TimestampingMode",
+ "Timestamping",
+ "StreamConfig",
+ "BasinConfig",
+ "BasinInfo",
+ "BasinScope",
+ "StreamInfo",
+ "ExactMatch",
+ "PrefixMatch",
+ "Permission",
+ "Operation",
+ "OperationGroupPermissions",
+ "AccessTokenScope",
+ "AccessTokenInfo",
+ "MetricUnit",
+ "TimeseriesInterval",
+ "AccountMetricSet",
+ "BasinMetricSet",
+ "StreamMetricSet",
+ "Scalar",
+ "Accumulation",
+ "Gauge",
+ "Label",
+ "S2Error",
+ "S2ClientError",
+ "S2ServerError",
+ "AppendConditionError",
+ "FencingTokenMismatchError",
+ "SeqNumMismatchError",
+ "ReadUnwrittenError",
+]
diff --git a/src/s2_sdk/_append_session.py b/src/s2_sdk/_append_session.py
new file mode 100644
index 0000000..bd9cd17
--- /dev/null
+++ b/src/s2_sdk/_append_session.py
@@ -0,0 +1,218 @@
+from __future__ import annotations
+
+import asyncio
+from collections import deque
+from typing import AsyncIterable, NamedTuple, Self
+
+from s2_sdk._client import HttpClient
+from s2_sdk._exceptions import S2ClientError
+from s2_sdk._s2s._append_session import run_append_session
+from s2_sdk._types import (
+ AppendAck,
+ AppendInput,
+ Compression,
+ Retry,
+ metered_bytes,
+)
+from s2_sdk._validators import validate_append_input
+
+
+class _UnackedBatch(NamedTuple):
+ ack_fut: asyncio.Future[AppendAck]
+ metered_bytes: int
+
+
+class AppendSession:
+ """Session for high-throughput appending with backpressure control.
+
+ Supports pipelining multiple :class:`AppendInput`\ s while preserving
+ submission order.
+
+ Caution:
+ Returned by :meth:`S2Stream.append_session`. Do not instantiate directly.
+ """
+
+ __slots__ = (
+ "_closed",
+ "_client",
+ "_compression",
+ "_error",
+ "_permits",
+ "_queue",
+ "_retry",
+ "_stream_name",
+ "_task",
+ "_unacked",
+ )
+
+ def __init__(
+ self,
+ client: HttpClient,
+ stream_name: str,
+ retry: Retry,
+ compression: Compression,
+ max_unacked_bytes: int,
+ max_unacked_batches: int | None,
+ ) -> None:
+ self._client = client
+ self._stream_name = stream_name
+ self._retry = retry
+ self._compression = compression
+ self._permits = _AppendPermits(max_unacked_bytes, max_unacked_batches)
+
+ self._queue: asyncio.Queue[AppendInput | None] = asyncio.Queue()
+ self._unacked: deque[_UnackedBatch] = deque()
+ self._closed = False
+ self._error: BaseException | None = None
+
+ self._task = asyncio.get_running_loop().create_task(self._run())
+
+ async def submit(self, inp: AppendInput) -> BatchSubmitTicket:
+ """Submit a batch of records for appending.
+
+ Waits when backpressure limits are reached.
+ """
+ self._check_ready()
+ batch_bytes = metered_bytes(inp.records)
+ validate_append_input(len(inp.records), batch_bytes)
+
+ await self._permits.acquire(batch_bytes)
+ # Re-check after potentially waiting on backpressure.
+ try:
+ self._check_ready()
+ except BaseException:
+ self._permits.release(batch_bytes)
+ raise
+
+ ack_fut: asyncio.Future[AppendAck] = asyncio.get_running_loop().create_future()
+ self._unacked.append(_UnackedBatch(ack_fut, batch_bytes))
+ await self._queue.put(inp)
+ return BatchSubmitTicket(ack_fut)
+
+ def _check_ready(self) -> None:
+ if self._closed:
+ raise S2ClientError("AppendSession is closed")
+ if self._error is not None:
+ raise self._error
+
+ async def close(self) -> None:
+ """Close the session and wait for all submitted batches to be appended."""
+ if self._closed:
+ return
+ self._closed = True
+ await self._queue.put(None)
+ await self._task
+ if self._error is not None:
+ raise self._error
+
+ async def __aenter__(self) -> Self:
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb) -> bool:
+ await self.close()
+ return False
+
+ async def _run(self) -> None:
+ try:
+ async for ack in run_append_session(
+ self._client,
+ self._stream_name,
+ self._input_iter(),
+ retry=self._retry,
+ compression=self._compression,
+ ack_timeout=self._client._request_timeout,
+ ):
+ self._resolve_next(ack)
+ except BaseException as e:
+ # Unwrap single-exception ExceptionGroups so callers see
+ # the original exception type (e.g. S2ServerError, SeqNumMismatchError).
+ exc = e
+ while isinstance(exc, BaseExceptionGroup) and len(exc.exceptions) == 1:
+ exc = exc.exceptions[0]
+ self._fail_all(exc)
+
+ async def _input_iter(self) -> AsyncIterable[AppendInput]:
+ while True:
+ item = await self._queue.get()
+ if item is None:
+ return
+ yield item
+
+ def _resolve_next(self, ack: AppendAck) -> None:
+ unacked = self._unacked.popleft()
+ self._permits.release(unacked.metered_bytes)
+ unacked.ack_fut.set_result(ack)
+
+ def _fail_all(self, error: BaseException) -> None:
+ self._error = error
+ for unacked in self._unacked:
+ self._permits.release(unacked.metered_bytes)
+ if not unacked.ack_fut.done():
+ unacked.ack_fut.set_exception(error)
+ self._unacked.clear()
+ # Drain queue
+ while not self._queue.empty():
+ try:
+ self._queue.get_nowait()
+ except asyncio.QueueEmpty:
+ break
+
+
+class BatchSubmitTicket:
+ """Awaitable that resolves to an :class:`AppendAck` once the batch is appended."""
+
+ __slots__ = ("_ack_fut",)
+
+ def __init__(self, ack_fut: asyncio.Future[AppendAck]) -> None:
+ self._ack_fut = ack_fut
+
+ def __await__(self):
+ return self._ack_fut.__await__()
+
+
+class _Semaphore:
+ __slots__ = ("_event", "_lock", "_value")
+
+ def __init__(self, value: int) -> None:
+ self._value = value
+ self._event = asyncio.Event()
+ self._event.set()
+ self._lock = asyncio.Lock()
+
+ async def acquire(self, n: int) -> None:
+ while True:
+ async with self._lock:
+ if self._value >= n:
+ self._value -= n
+ return
+ self._event.clear()
+ if self._value >= n:
+ continue
+ await self._event.wait()
+
+ def release(self, n: int) -> None:
+ self._value += n
+ self._event.set()
+
+
+class _AppendPermits:
+ __slots__ = ("_bytes", "_count")
+
+ def __init__(self, max_bytes: int, max_count: int | None = None) -> None:
+ self._bytes = _Semaphore(max_bytes)
+ self._count = _Semaphore(max_count) if max_count is not None else None
+
+ async def acquire(self, n_bytes: int) -> None:
+ if self._count is not None:
+ await self._count.acquire(1)
+ try:
+ await self._bytes.acquire(n_bytes)
+ except BaseException:
+ if self._count is not None:
+ self._count.release(1)
+ raise
+
+ def release(self, n_bytes: int) -> None:
+ self._bytes.release(n_bytes)
+ if self._count is not None:
+ self._count.release(1)
diff --git a/src/s2_sdk/_batching.py b/src/s2_sdk/_batching.py
new file mode 100644
index 0000000..dccad1a
--- /dev/null
+++ b/src/s2_sdk/_batching.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import asyncio
+from typing import AsyncIterable
+
+from s2_sdk._types import AppendInput, Batching, Record, metered_bytes
+from s2_sdk._validators import validate_batching
+
+
+class BatchAccumulator:
+ __slots__ = ("_batching", "_bytes", "_records")
+
+ def __init__(self, batching: Batching) -> None:
+ self._batching = batching
+ self._records: list[Record] = []
+ self._bytes = 0
+
+ def add(self, record: Record) -> None:
+ self._records.append(record)
+ self._bytes += metered_bytes((record,))
+
+ def take(self) -> list[Record]:
+ records = list(self._records)
+ self._records.clear()
+ self._bytes = 0
+ return records
+
+ def is_full(self) -> bool:
+ return (
+ len(self._records) >= self._batching.max_records
+ or self._bytes >= self._batching.max_bytes
+ )
+
+ def is_empty(self) -> bool:
+ return len(self._records) == 0
+
+ @property
+ def linger(self) -> float:
+ return self._batching.linger.total_seconds()
+
+
+async def append_record_batches(
+ records: AsyncIterable[Record],
+ *,
+ batching: Batching | None = None,
+) -> AsyncIterable[list[Record]]:
+ """Group records into batches based on count, bytes, and linger time."""
+ if batching is None:
+ batching = Batching()
+ validate_batching(batching.max_records, batching.max_bytes)
+ acc = BatchAccumulator(batching)
+ linger_secs = batching.linger.total_seconds()
+ aiter = records.__aiter__()
+
+ while True:
+ try:
+ record = await anext(aiter)
+ except StopAsyncIteration:
+ break
+
+ acc.add(record)
+ if acc.is_full():
+ yield acc.take()
+ continue
+
+ try:
+ while not acc.is_full():
+ if linger_secs > 0:
+ record = await asyncio.wait_for(anext(aiter), timeout=linger_secs)
+ else:
+ record = await anext(aiter)
+ acc.add(record)
+ except StopAsyncIteration:
+ pass
+ except TimeoutError:
+ pass
+
+ if not acc.is_empty():
+ yield acc.take()
+
+
+async def append_inputs(
+ records: AsyncIterable[Record],
+ *,
+ match_seq_num: int | None = None,
+ fencing_token: str | None = None,
+ batching: Batching | None = None,
+) -> AsyncIterable[AppendInput]:
+ """Group records into :class:`AppendInput` batches based on count, bytes, and linger time.
+
+ If ``match_seq_num`` is set, it applies to the first input and is auto-incremented for subsequent ones.
+ """
+ if batching is None:
+ batching = Batching()
+ async for batch in append_record_batches(records, batching=batching):
+ if not batch:
+ continue
+ append_input = AppendInput(
+ records=batch,
+ match_seq_num=match_seq_num,
+ fencing_token=fencing_token,
+ )
+ if match_seq_num is not None:
+ match_seq_num += len(batch)
+ yield append_input
diff --git a/src/s2_sdk/_client.py b/src/s2_sdk/_client.py
new file mode 100644
index 0000000..e139a05
--- /dev/null
+++ b/src/s2_sdk/_client.py
@@ -0,0 +1,1070 @@
+from __future__ import annotations
+
+import asyncio
+import json as json_lib
+import ssl
+import time
+from collections.abc import AsyncGenerator, AsyncIterator, Callable
+from contextlib import asynccontextmanager
+from dataclasses import dataclass, field
+from importlib.metadata import version
+from typing import Any, NamedTuple
+from urllib.parse import urlencode, urlsplit
+
+import h2.config
+import h2.connection
+import h2.events
+
+from s2_sdk._compression import compress, decompress
+from s2_sdk._exceptions import (
+ UNKNOWN_CODE,
+ ConnectError,
+ ConnectionClosedError,
+ ProtocolError,
+ ReadTimeoutError,
+ S2ClientError,
+ S2ServerError,
+ TransportError,
+ raise_for_412,
+ raise_for_416,
+)
+from s2_sdk._types import Compression
+
+_VERSION = version("s2-sdk")
+_USER_AGENT = f"s2-sdk-python/{_VERSION}"
+
+DEFAULT_MAX_STREAMS_PER_CONN = 100
+IDLE_TIMEOUT = 90.0
+REAPER_INTERVAL = 30.0
+_DRAIN_BUFFER_THRESHOLD = 65536 # 64 KiB — drain when write buffer exceeds this
+
+
+_COMPRESSION_ENCODING = {
+ Compression.ZSTD: "zstd",
+ Compression.GZIP: "gzip",
+}
+
+_ENCODING_COMPRESSION = {v: k for k, v in _COMPRESSION_ENCODING.items()}
+
+
+class HttpClient:
+ __slots__ = (
+ "_base_url",
+ "_pool",
+ "_request_timeout",
+ "_scheme",
+ "_authority",
+ "_headers",
+ "_compression",
+ )
+
+ def __init__(
+ self,
+ pool: ConnectionPool,
+ base_url: str,
+ request_timeout: float,
+ headers: dict[str, str] | None = None,
+ compression: Compression = Compression.NONE,
+ ) -> None:
+ self._pool = pool
+ self._base_url = base_url
+ self._request_timeout = request_timeout
+ origin = _origin(base_url)
+ self._scheme = origin.scheme
+ default_port = 443 if origin.scheme == "https" else 80
+ self._authority = (
+ origin.host
+ if origin.port == default_port
+ else f"{origin.host}:{origin.port}"
+ )
+ self._headers = headers
+ self._compression = compression
+
+ async def unary_request(
+ self,
+ method: str,
+ path: str,
+ *,
+ json: Any = None,
+ params: dict[str, Any] | None = None,
+ headers: dict[str, str] | None = None,
+ content: bytes | None = None,
+ ) -> Response:
+ pc, state = await self._pool.checkout(self._base_url)
+ conn = pc._conn
+ stream_id: int | None = None
+ try:
+ url_path = _build_path(path, params)
+ h2_headers = self._build_headers(method, url_path, headers)
+
+ # Build body
+ body: bytes | None = None
+ if json is not None:
+ body = json_lib.dumps(json).encode("utf-8")
+ h2_headers.append(("content-type", "application/json"))
+ elif content is not None:
+ body = content
+
+ # Compress request body
+ if body is not None and self._compression != Compression.NONE:
+ body = compress(body, self._compression)
+ h2_headers.append(
+ ("content-encoding", _COMPRESSION_ENCODING[self._compression])
+ )
+
+ if body is not None:
+ h2_headers.append(("content-length", str(len(body))))
+
+ end_stream = body is None
+ stream_id = await conn.send_headers(
+ state, h2_headers, end_stream=end_stream
+ )
+
+ if body is not None:
+ assert stream_id is not None
+ await conn.send_data(stream_id, body, end_stream=True)
+
+ # Wait for response headers
+ resp_headers = await asyncio.wait_for(
+ state.response_headers,
+ timeout=self._request_timeout,
+ )
+ status_code = _status_from_headers(resp_headers)
+
+ # Read full body
+ chunks: list[bytes] = []
+ while True:
+ item = await asyncio.wait_for(
+ state.data_queue.get(),
+ timeout=self._request_timeout,
+ )
+ if item is None:
+ break
+ chunk, flow_bytes = _queue_item_parts(item)
+ chunks.append(chunk)
+ if flow_bytes > 0:
+ state.unacked_flow_bytes -= flow_bytes
+ await conn.ack_data(stream_id, flow_bytes)
+
+ if state.error is not None and not state.end_stream_received:
+ raise state.error
+
+ resp_body = b"".join(chunks)
+
+ # Decompress response body
+ content_encoding = _header_value(resp_headers, "content-encoding")
+ if (
+ content_encoding is not None
+ and content_encoding in _ENCODING_COMPRESSION
+ ):
+ resp_body = decompress(
+ resp_body, _ENCODING_COMPRESSION[content_encoding]
+ )
+
+ response = Response(status_code, resp_body)
+ except TransportError:
+ raise
+ except asyncio.TimeoutError:
+ raise ReadTimeoutError("Request timed out")
+ finally:
+ if stream_id is not None:
+ nbytes = _take_all_unacked_flow_bytes(state)
+ if nbytes > 0:
+ try:
+ await conn.ack_data(stream_id, nbytes)
+ except Exception:
+ pass
+ if not state.ended.is_set():
+ await conn.reset_stream(stream_id)
+ conn.release_stream(stream_id, state)
+ pc.touch_idle()
+
+ retry_after_ms = _header_value(resp_headers, "retry-after-ms")
+ _raise_for_status(response, retry_after_ms=retry_after_ms)
+ return response
+
+ @asynccontextmanager
+ async def streaming_request(
+ self,
+ method: str,
+ path: str,
+ *,
+ params: dict[str, Any] | None = None,
+ headers: dict[str, str] | None = None,
+ content: Any = None,
+ frame_signal: Any = None,
+ ) -> AsyncIterator[StreamingResponse]:
+ pc, state = await self._pool.checkout(self._base_url)
+ conn = pc._conn
+ stream_id: int | None = None
+ send_task: asyncio.Task[None] | None = None
+ try:
+ url_path = _build_path(path, params)
+ h2_headers = self._build_headers(method, url_path, headers)
+
+ has_body = content is not None
+ stream_id = await conn.send_headers(
+ state, h2_headers, end_stream=not has_body
+ )
+
+ if has_body:
+ on_write = frame_signal.signal if frame_signal is not None else None
+ send_task = asyncio.get_running_loop().create_task(
+ # stream_id is assigned by send_headers above.
+ _drain_body(conn, stream_id, content, on_write)
+ )
+
+ # Propagate sender errors to stream state so the response
+ # reader doesn't hang forever on data_queue.get().
+ def _on_send_done(
+ task: asyncio.Task[None], _state: Any = state
+ ) -> None:
+ if task.cancelled():
+ return
+ exc = task.exception()
+ if exc is not None:
+ conn._fail_stream(_state, exc)
+
+ assert send_task is not None
+ send_task.add_done_callback(_on_send_done)
+
+ # Wait for response headers
+ resp_headers = await asyncio.wait_for(
+ state.response_headers,
+ timeout=self._request_timeout,
+ )
+ status_code = _status_from_headers(resp_headers)
+
+ async def _ack_stream_data(nbytes: int) -> None:
+ assert stream_id is not None
+ await conn.ack_data(stream_id, nbytes)
+
+ response = StreamingResponse(
+ status_code=status_code,
+ data_queue=state.data_queue,
+ ended=state.ended,
+ stream_state=state,
+ ack=_ack_stream_data,
+ )
+ yield response
+ except TransportError:
+ raise
+ except asyncio.TimeoutError:
+ raise ReadTimeoutError("Streaming request timed out")
+ finally:
+ if send_task is not None and not send_task.done():
+ send_task.cancel()
+ try:
+ await send_task
+ except (asyncio.CancelledError, Exception):
+ pass
+ # Ack remaining flow bytes to keep connection window healthy
+ if stream_id is not None:
+ nbytes = _take_all_unacked_flow_bytes(state)
+ if nbytes > 0:
+ try:
+ await conn.ack_data(stream_id, nbytes)
+ except Exception:
+ pass
+ if not state.ended.is_set():
+ await conn.reset_stream(stream_id)
+ conn.release_stream(stream_id, state)
+ pc.touch_idle()
+
+ def _build_headers(
+ self,
+ method: str,
+ url_path: str,
+ extra_headers: dict[str, str] | None = None,
+ ) -> list[tuple[str, str]]:
+ h = [
+ (":method", method),
+ (":path", url_path),
+ (":scheme", self._scheme),
+ (":authority", self._authority),
+ ("user-agent", _USER_AGENT),
+ ]
+ if self._compression != Compression.NONE:
+ h.append(("accept-encoding", _COMPRESSION_ENCODING[self._compression]))
+ if self._headers:
+ for k, v in self._headers.items():
+ h.append((k.lower(), v))
+ if extra_headers:
+ for k, v in extra_headers.items():
+ h.append((k.lower(), v))
+ return h
+
+
+class ConnectionPool:
+ __slots__ = (
+ "_closed",
+ "_connect_timeout",
+ "_hosts",
+ "_host_locks",
+ "_reaper_task",
+ "_ssl_context",
+ )
+
+ def __init__(self, connect_timeout: float) -> None:
+ self._connect_timeout = connect_timeout
+ self._hosts: dict[str, list[_PooledConnection]] = {}
+ self._host_locks: dict[str, asyncio.Lock] = {}
+ self._reaper_task: asyncio.Task[None] | None = None
+ self._closed = False
+ self._ssl_context = ssl.create_default_context()
+ self._ssl_context.set_alpn_protocols(["h2"])
+
+ async def checkout(self, base_url: str) -> _Checkout:
+ if self._closed:
+ raise S2ClientError("Pool is closed")
+
+ self._ensure_reaper()
+
+ result = self._try_checkout(base_url)
+ if result is not None:
+ return result
+
+ lock = self._host_locks.get(base_url)
+ if lock is None:
+ lock = asyncio.Lock()
+ self._host_locks[base_url] = lock
+
+ async with lock:
+ # Re-check after acquiring lock — another caller may have
+ # created a connection while we waited.
+ result = self._try_checkout(base_url)
+ if result is not None:
+ return result
+
+ scheme, host, port = _origin(base_url)
+ use_ssl = self._ssl_context if scheme == "https" else None
+
+ conn = Connection(
+ host=host,
+ port=port,
+ ssl_context=use_ssl,
+ connect_timeout=self._connect_timeout,
+ )
+ await conn.connect()
+
+ # Wait briefly for the server's initial SETTINGS frame so
+ # max_concurrent_streams reflects the real limit.
+ try:
+ await asyncio.wait_for(
+ conn._settings_received.wait(),
+ timeout=self._connect_timeout,
+ )
+ except asyncio.TimeoutError:
+ pass # Proceed with h2 defaults
+ if conn._recv_dead:
+ await conn.close()
+ raise ConnectError(
+ f"Connection to {host}:{port} closed before HTTP/2 SETTINGS"
+ )
+
+ pc = _PooledConnection(conn)
+ conns = self._hosts.get(base_url)
+ if conns is None:
+ conns = [pc]
+ self._hosts[base_url] = conns
+ else:
+ conns.append(pc)
+ if conn._settings_received.is_set() and conn.max_concurrent_streams <= 0:
+ await pc.close()
+ conns.remove(pc)
+ raise ProtocolError("Connection has no available stream capacity")
+ state = pc._conn.reserve_stream()
+ return _Checkout(pc, state)
+
+ def _try_checkout(self, base_url: str) -> _Checkout | None:
+ conns = self._hosts.get(base_url)
+ if conns is not None:
+ for pc in conns:
+ if pc.has_capacity:
+ state = pc._conn.reserve_stream()
+ return _Checkout(pc, state)
+ return None
+
+ def _ensure_reaper(self) -> None:
+ if self._reaper_task is None or self._reaper_task.done():
+ self._reaper_task = asyncio.get_running_loop().create_task(
+ self._reap_idle()
+ )
+
+ async def _reap_idle(self) -> None:
+ while not self._closed:
+ await asyncio.sleep(REAPER_INTERVAL)
+ empty_hosts: list[str] = []
+ for base_url, conns in tuple(self._hosts.items()):
+ to_close: list[_PooledConnection] = []
+ for pc in conns:
+ if not pc._conn.is_available:
+ to_close.append(pc)
+ elif (
+ pc.is_idle
+ and pc.idle_for() > IDLE_TIMEOUT
+ and len(conns) - len(to_close) > 1
+ ):
+ to_close.append(pc)
+ for pc in to_close:
+ conns.remove(pc)
+ for pc in to_close:
+ await pc.close()
+ if not conns:
+ empty_hosts.append(base_url)
+ for base_url in empty_hosts:
+ self._hosts.pop(base_url, None)
+ lock = self._host_locks.get(base_url)
+ if lock is not None and not lock.locked():
+ self._host_locks.pop(base_url, None)
+
+ async def close(self) -> None:
+ self._closed = True
+ if self._reaper_task is not None:
+ self._reaper_task.cancel()
+ try:
+ await self._reaper_task
+ except asyncio.CancelledError:
+ pass
+ for conns in self._hosts.values():
+ for pc in conns:
+ await pc.close()
+ self._hosts.clear()
+ self._host_locks.clear()
+
+
+class Response:
+ __slots__ = ("status_code", "_content")
+
+ def __init__(self, status_code: int, content: bytes) -> None:
+ self.status_code = status_code
+ self._content = content
+
+ @property
+ def content(self) -> bytes:
+ return self._content
+
+ @property
+ def text(self) -> str:
+ return self._content.decode("utf-8", errors="replace")
+
+ def json(self) -> Any:
+ return json_lib.loads(self._content)
+
+
+class StreamingResponse:
+ __slots__ = (
+ "status_code",
+ "_data_queue",
+ "_ended",
+ "_stream_state",
+ "_buf",
+ "_ack",
+ )
+
+ def __init__(
+ self,
+ status_code: int,
+ data_queue: asyncio.Queue[tuple[bytes, int] | bytes | None],
+ ended: asyncio.Event,
+ stream_state: Any,
+ ack: Callable[[int], Any] | None = None,
+ ) -> None:
+ self.status_code = status_code
+ self._data_queue = data_queue
+ self._ended = ended
+ self._stream_state = stream_state
+ self._buf = bytearray()
+ self._ack = ack
+
+ async def aread(self) -> bytes:
+ chunks: list[bytes] = []
+ if self._buf:
+ chunks.append(bytes(self._buf))
+ self._buf.clear()
+ while True:
+ item = await self._data_queue.get()
+ if item is None:
+ break
+ chunk, flow_bytes = _queue_item_parts(item)
+ chunks.append(chunk)
+ if self._ack is not None and flow_bytes > 0:
+ self._stream_state.unacked_flow_bytes -= flow_bytes
+ await self._ack(flow_bytes)
+ if (
+ self._stream_state.error is not None
+ and not self._stream_state.end_stream_received
+ ):
+ raise self._stream_state.error
+ return b"".join(chunks)
+
+ async def aiter_bytes(self) -> AsyncGenerator[bytes, None]:
+ if self._buf:
+ yield bytes(self._buf)
+ self._buf.clear()
+ while True:
+ item = await self._data_queue.get()
+ if item is None:
+ if (
+ self._stream_state.error is not None
+ and not self._stream_state.end_stream_received
+ ):
+ raise self._stream_state.error
+ return
+ chunk, flow_bytes = _queue_item_parts(item)
+ try:
+ yield chunk
+ finally:
+ if self._ack is not None and flow_bytes > 0:
+ self._stream_state.unacked_flow_bytes -= flow_bytes
+ await self._ack(flow_bytes)
+
+
+class _PooledConnection:
+ __slots__ = ("_conn", "_idle_since")
+
+ def __init__(self, conn: Connection) -> None:
+ self._conn = conn
+ self._idle_since: float | None = None
+
+ @property
+ def has_capacity(self) -> bool:
+ return (
+ self._conn.is_available
+ and self._conn._settings_received.is_set()
+ and self._conn.open_stream_count < self._conn.max_concurrent_streams
+ )
+
+ @property
+ def is_idle(self) -> bool:
+ return self._conn.open_stream_count == 0
+
+ def touch_idle(self) -> None:
+ if self._conn.open_stream_count == 0:
+ self._idle_since = time.monotonic()
+
+ def idle_for(self) -> float:
+ if self._idle_since is None:
+ return 0.0
+ return time.monotonic() - self._idle_since
+
+ async def close(self) -> None:
+ await self._conn.close()
+
+
+@dataclass
+class _StreamState:
+ response_headers: asyncio.Future[list[tuple[str, str]]] = field(
+ default_factory=lambda: asyncio.get_running_loop().create_future()
+ )
+ data_queue: asyncio.Queue[tuple[bytes, int] | bytes | None] = field(
+ default_factory=asyncio.Queue
+ )
+ ended: asyncio.Event = field(default_factory=asyncio.Event)
+ window_updated: asyncio.Event = field(default_factory=asyncio.Event)
+ error: BaseException | None = None
+ end_stream_received: bool = False
+ unacked_flow_bytes: int = 0
+
+
+class Connection:
+ __slots__ = (
+ "_host",
+ "_port",
+ "_ssl_context",
+ "_connect_timeout",
+ "_reader",
+ "_writer",
+ "_h2",
+ "_write_lock",
+ "_streams",
+ "_pending_streams",
+ "_recv_task",
+ "_closed",
+ "_goaway_received",
+ "_recv_dead",
+ "_settings_received",
+ )
+
+ def __init__(
+ self,
+ host: str,
+ port: int,
+ ssl_context: ssl.SSLContext | None,
+ connect_timeout: float,
+ ) -> None:
+ self._host = host
+ self._port = port
+ self._ssl_context = ssl_context
+ self._connect_timeout = connect_timeout
+ self._reader: asyncio.StreamReader | None = None
+ self._writer: asyncio.StreamWriter | None = None
+ self._h2: h2.connection.H2Connection | None = None
+ self._write_lock = asyncio.Lock()
+ self._streams: dict[int, _StreamState] = {}
+ self._pending_streams: dict[int, _StreamState] = {}
+ self._recv_task: asyncio.Task[None] | None = None
+ self._closed = False
+ self._goaway_received = False
+ self._recv_dead = False
+ self._settings_received = asyncio.Event()
+
+ async def connect(self) -> None:
+ try:
+ self._reader, self._writer = await asyncio.wait_for(
+ asyncio.open_connection(
+ self._host,
+ self._port,
+ ssl=self._ssl_context,
+ ),
+ timeout=self._connect_timeout,
+ )
+ except asyncio.TimeoutError:
+ raise ConnectError(f"Connection to {self._host}:{self._port} timed out")
+ except OSError as e:
+ raise ConnectError(str(e)) from e
+
+ if self._ssl_context is not None:
+ assert self._writer is not None
+ ssl_object = self._writer.get_extra_info("ssl_object")
+ if ssl_object is not None:
+ alpn = ssl_object.selected_alpn_protocol()
+ if alpn != "h2":
+ self._writer.close()
+ raise ConnectError(
+ f"ALPN negotiation failed: expected 'h2', got {alpn!r}"
+ )
+
+ config = h2.config.H2Configuration(client_side=True, header_encoding="utf-8")
+ self._h2 = h2.connection.H2Connection(config=config)
+ self._h2.initiate_connection()
+ self._flush_h2_data_sync()
+ assert self._writer is not None
+ await self._writer.drain()
+ self._recv_task = asyncio.get_running_loop().create_task(self._recv_loop())
+
+ def reserve_stream(self) -> _StreamState:
+ """Reserve per-connection capacity for a future outbound stream."""
+ state = _StreamState()
+ self._pending_streams[id(state)] = state
+ return state
+
+ async def send_headers(
+ self,
+ state: _StreamState,
+ headers: list[tuple[str, str]],
+ end_stream: bool = False,
+ ) -> int:
+ """Atomically allocate a stream ID and send request headers."""
+ assert self._h2 is not None
+ async with self._write_lock:
+ if state.error is not None:
+ raise state.error
+ pending_state = self._pending_streams.pop(id(state), None)
+ if pending_state is None:
+ raise ProtocolError("Stream reservation missing")
+ stream_id = self._h2.get_next_available_stream_id()
+ self._streams[stream_id] = state
+ try:
+ self._h2.send_headers(stream_id, headers, end_stream=end_stream)
+ await self._flush_h2_data_and_drain()
+ except Exception:
+ self._streams.pop(stream_id, None)
+ raise
+ return stream_id
+
+ async def send_data(
+ self,
+ stream_id: int,
+ data: bytes,
+ end_stream: bool = False,
+ on_write: Callable[[], None] | None = None,
+ ) -> None:
+ """Send data on a stream, respecting flow control windows.
+
+ Chunks by flow control window + max frame size. If ``on_write`` is
+ provided, it is called after each chunk is flushed to the socket.
+ """
+ assert self._h2 is not None
+ offset = 0
+ while offset < len(data):
+ state = self._streams.get(stream_id)
+ if state and state.error:
+ raise state.error
+
+ # Window check + send in a single lock acquisition to avoid TOCTOU.
+ sent = False
+ async with self._write_lock:
+ window = self._h2.local_flow_control_window(stream_id)
+ if window > 0:
+ max_frame = self._h2.max_outbound_frame_size
+ chunk_size = min(len(data) - offset, window, max_frame)
+ chunk = data[offset : offset + chunk_size]
+ is_last_chunk = offset + chunk_size >= len(data)
+
+ self._h2.send_data(
+ stream_id,
+ chunk,
+ end_stream=end_stream and is_last_chunk,
+ )
+ await self._flush_h2_data()
+ offset += chunk_size
+ sent = True
+
+ if sent:
+ if on_write:
+ on_write()
+ continue
+
+ # Window exhausted — wait for update (lock released).
+ # Caller-level timeouts (unary or streaming) will cancel this
+ # if it takes too long.
+ state = self._streams.get(stream_id)
+ if state:
+ state.window_updated.clear()
+ # Re-check under lock to avoid missing an update.
+ async with self._write_lock:
+ window = self._h2.local_flow_control_window(stream_id)
+ if window <= 0:
+ await state.window_updated.wait()
+
+ # Handle empty data with end_stream
+ if not data and end_stream:
+ async with self._write_lock:
+ self._h2.send_data(stream_id, b"", end_stream=True)
+ await self._flush_h2_data_and_drain()
+ if on_write:
+ on_write()
+
+ async def end_stream(self, stream_id: int) -> None:
+ """Send END_STREAM on a stream."""
+ assert self._h2 is not None
+ async with self._write_lock:
+ self._h2.send_data(stream_id, b"", end_stream=True)
+ await self._flush_h2_data_and_drain()
+
+ async def ack_data(self, stream_id: int, nbytes: int) -> None:
+ """Acknowledge received data to update the flow control window."""
+ assert self._h2 is not None
+ async with self._write_lock:
+ self._h2.acknowledge_received_data(nbytes, stream_id)
+ await self._flush_h2_data()
+
+ async def reset_stream(self, stream_id: int) -> None:
+ """Send RST_STREAM to tell the peer to stop sending."""
+ assert self._h2 is not None
+ try:
+ async with self._write_lock:
+ self._h2.reset_stream(stream_id)
+ await self._flush_h2_data_and_drain()
+ except Exception:
+ pass # Best effort
+
+ def release_stream(
+ self, stream_id: int | None, state: _StreamState | None = None
+ ) -> None:
+ """Clean up active or reserved stream state."""
+ if stream_id is not None:
+ self._streams.pop(stream_id, None)
+ if state is not None:
+ self._pending_streams.pop(id(state), None)
+
+ @property
+ def is_available(self) -> bool:
+ """Connection is usable for new streams."""
+ return not self._closed and not self._goaway_received and not self._recv_dead
+
+ @property
+ def max_concurrent_streams(self) -> int:
+ assert self._h2 is not None
+ advertised = self._h2.remote_settings.max_concurrent_streams
+ if advertised is None:
+ return DEFAULT_MAX_STREAMS_PER_CONN
+ return int(advertised)
+
+ @property
+ def open_stream_count(self) -> int:
+ return len(self._streams) + len(self._pending_streams)
+
+ async def close(self) -> None:
+ """Send GOAWAY, cancel recv_loop, close socket."""
+ if self._closed:
+ return
+ self._closed = True
+ self._fail_all_streams(ConnectionClosedError("Connection closed"))
+
+ if self._h2 is not None and self._writer is not None:
+ try:
+ async with self._write_lock:
+ self._h2.close_connection()
+ await self._flush_h2_data_and_drain()
+ except Exception:
+ pass
+
+ if self._recv_task is not None:
+ self._recv_task.cancel()
+ try:
+ await self._recv_task
+ except (asyncio.CancelledError, Exception):
+ pass
+
+ if self._writer is not None:
+ try:
+ self._writer.close()
+ await self._writer.wait_closed()
+ except Exception:
+ pass
+
+ def _flush_h2_data_sync(self) -> None:
+ """Write pending h2 bytes to socket. Must be called under _write_lock or during init."""
+ assert self._h2 is not None
+ assert self._writer is not None
+ data = self._h2.data_to_send()
+ if data:
+ self._writer.write(data)
+
+ async def _flush_h2_data(self) -> None:
+ """Write pending h2 bytes, draining only when the buffer is large.
+
+ This allows small writes to coalesce in the kernel buffer, reducing
+ the number of syscalls and await-points on high-rate paths.
+ """
+ assert self._writer is not None
+ self._flush_h2_data_sync()
+ transport = self._writer.transport
+ if (
+ transport is not None
+ and transport.get_write_buffer_size() >= _DRAIN_BUFFER_THRESHOLD
+ ):
+ await self._writer.drain()
+
+ async def _flush_h2_data_and_drain(self) -> None:
+ """Write pending h2 bytes and unconditionally drain the socket."""
+ assert self._writer is not None
+ self._flush_h2_data_sync()
+ await self._writer.drain()
+
+ async def _recv_loop(self) -> None:
+ """Background task: read from socket, feed to h2, dispatch events."""
+ assert self._reader is not None
+ assert self._h2 is not None
+ try:
+ while not self._closed:
+ data = await self._reader.read(65535)
+ if not data:
+ self._fail_all_streams(
+ ConnectionClosedError("Connection closed by remote")
+ )
+ return
+
+ # All h2 state mutations must be under _write_lock to avoid
+ # concurrent access with send_data/send_headers.
+ async with self._write_lock:
+ events = self._h2.receive_data(data)
+ for event in events:
+ self._handle_event(event)
+ # Flush h2 data generated by event handling (e.g. window update ACKs)
+ await self._flush_h2_data()
+ except Exception as e:
+ if not self._closed:
+ self._fail_all_streams(ConnectionClosedError(f"recv_loop error: {e}"))
+ finally:
+ self._recv_dead = True
+
+ def _handle_event(self, event: h2.events.Event) -> None:
+ if isinstance(event, h2.events.ResponseReceived):
+ state = self._streams.get(event.stream_id)
+ if state and not state.response_headers.done():
+ # h2 returns str tuples when header_encoding is set,
+ # but type stubs declare bytes.
+ headers = [(str(n), str(v)) for n, v in event.headers]
+ state.response_headers.set_result(headers)
+
+ elif isinstance(event, h2.events.DataReceived):
+ state = self._streams.get(event.stream_id)
+ if state:
+ state.data_queue.put_nowait((event.data, event.flow_controlled_length))
+ state.unacked_flow_bytes += event.flow_controlled_length
+
+ elif isinstance(event, h2.events.StreamEnded):
+ state = self._streams.get(event.stream_id)
+ if state:
+ state.end_stream_received = True
+ state.data_queue.put_nowait(None)
+ state.ended.set()
+
+ elif isinstance(event, h2.events.WindowUpdated):
+ if event.stream_id == 0:
+ # Connection-level window update — wake all streams.
+ for state in self._streams.values():
+ state.window_updated.set()
+ else:
+ state = self._streams.get(event.stream_id)
+ if state:
+ state.window_updated.set()
+
+ elif isinstance(event, h2.events.StreamReset):
+ state = self._streams.get(event.stream_id)
+ if state:
+ err = ProtocolError(
+ f"Stream reset with error code {event.error_code}",
+ error_code=event.error_code,
+ )
+ self._fail_stream(state, err)
+
+ elif isinstance(event, h2.events.ConnectionTerminated):
+ self._goaway_received = True
+ err = ProtocolError(
+ f"GOAWAY received: error_code={event.error_code}, "
+ f"last_stream_id={event.last_stream_id}",
+ error_code=event.error_code,
+ )
+ # Only fail streams the server never processed.
+ if event.last_stream_id is not None:
+ for stream_id, state in self._streams.items():
+ if stream_id > event.last_stream_id:
+ self._fail_stream(state, err)
+ else:
+ self._fail_all_streams(err)
+
+ elif isinstance(event, h2.events.RemoteSettingsChanged):
+ self._settings_received.set()
+ for state in self._streams.values():
+ state.window_updated.set()
+
+ def _fail_stream(self, state: _StreamState, error: BaseException) -> None:
+ if state.error is None:
+ state.error = error
+ if not state.response_headers.done():
+ state.response_headers.set_exception(state.error)
+ if not state.ended.is_set():
+ state.data_queue.put_nowait(None)
+ state.ended.set()
+ state.window_updated.set()
+
+ def _fail_all_streams(self, error: BaseException) -> None:
+ for state in self._streams.values():
+ self._fail_stream(state, error)
+ for state in self._pending_streams.values():
+ self._fail_stream(state, error)
+
+
+async def _drain_body(
+ conn: Connection,
+ stream_id: int,
+ content: Any,
+ on_write: Any | None,
+) -> None:
+ """Drain an async generator body into h2 send_data calls."""
+ try:
+ async for chunk in content:
+ await conn.send_data(stream_id, chunk, on_write=on_write)
+ await conn.end_stream(stream_id)
+ except (asyncio.CancelledError, Exception):
+ await conn.reset_stream(stream_id)
+ raise
+
+
+def _queue_item_parts(item: tuple[bytes, int] | bytes) -> tuple[bytes, int]:
+ if isinstance(item, tuple):
+ return item
+ return item, len(item)
+
+
+def _take_all_unacked_flow_bytes(state: _StreamState) -> int:
+ nbytes = state.unacked_flow_bytes
+ state.unacked_flow_bytes = 0
+ return nbytes
+
+
+def _parse_retry_after_ms(raw: str | None) -> float | None:
+ if raw is None:
+ return None
+ try:
+ return int(raw) / 1000.0
+ except (ValueError, TypeError):
+ return None
+
+
+def _raise_for_status(response: Response, *, retry_after_ms: str | None = None) -> None:
+ status = response.status_code
+ if 200 <= status < 300:
+ return
+
+ retry_after = _parse_retry_after_ms(retry_after_ms)
+ body: Any | None
+ try:
+ body = response.json()
+ except Exception:
+ body = None
+
+ if status == 412 and isinstance(body, dict):
+ code = body.get("code", UNKNOWN_CODE)
+ raise_for_412(body, code)
+
+ if status == 416 and isinstance(body, dict):
+ code = body.get("code", UNKNOWN_CODE)
+ raise_for_416(body, code)
+
+ if isinstance(body, dict):
+ message = body.get("message", response.text)
+ code = body.get("code", UNKNOWN_CODE)
+ else:
+ message = response.text
+ code = UNKNOWN_CODE
+
+ err = S2ServerError(code, message, status)
+ err._retry_after = retry_after
+ raise err
+
+
+class _Checkout(NamedTuple):
+ connection: _PooledConnection
+ state: _StreamState
+
+
+class _Origin(NamedTuple):
+ scheme: str
+ host: str
+ port: int
+
+
+def _origin(base_url: str) -> _Origin:
+ url = base_url if "://" in base_url else f"https://{base_url}"
+ parsed = urlsplit(url)
+ scheme = parsed.scheme.lower()
+ if scheme not in {"http", "https"}:
+ raise S2ClientError(f"Unsupported URL scheme: {parsed.scheme!r}")
+ if parsed.path not in {"", "/"} or parsed.query or parsed.fragment:
+ raise S2ClientError("Endpoint URL must be an origin without path or query")
+ if parsed.username is not None or parsed.password is not None:
+ raise S2ClientError("Endpoint URL must not include userinfo")
+ if parsed.hostname is None:
+ raise S2ClientError("Endpoint URL must include a host")
+ default_port = 443 if scheme == "https" else 80
+ try:
+ port = parsed.port if parsed.port is not None else default_port
+ except ValueError as e:
+ raise S2ClientError(f"Invalid endpoint URL: {e}") from e
+ return _Origin(scheme, parsed.hostname, port)
+
+
+def _build_path(path: str, params: dict[str, Any] | None) -> str:
+ if not params:
+ return path
+ qs = urlencode({k: v for k, v in params.items() if v is not None})
+ return f"{path}?{qs}" if qs else path
+
+
+def _status_from_headers(headers: list[tuple[str, str]]) -> int:
+ for name, value in headers:
+ if name == ":status":
+ return int(value)
+ raise TransportError("No :status header in response")
+
+
+def _header_value(headers: list[tuple[str, str]], name: str) -> str | None:
+ for k, v in headers:
+ if k == name:
+ return v
+ return None
diff --git a/src/s2_sdk/_compression.py b/src/s2_sdk/_compression.py
new file mode 100644
index 0000000..c083e2a
--- /dev/null
+++ b/src/s2_sdk/_compression.py
@@ -0,0 +1,32 @@
+"""S2S message-level compression (zstd and gzip)."""
+
+import gzip
+
+import zstandard
+
+from s2_sdk._types import Compression
+
+_zstd_compressor = zstandard.ZstdCompressor()
+_zstd_dctx = zstandard.ZstdDecompressor()
+
+
+def compress(data: bytes, compression: Compression) -> bytes:
+ match compression:
+ case Compression.ZSTD:
+ return _zstd_compressor.compress(data)
+ case Compression.GZIP:
+ return gzip.compress(data)
+ case _:
+ return data
+
+
+def decompress(data: bytes, compression: Compression) -> bytes:
+ match compression:
+ case Compression.ZSTD:
+ # stream_reader handles frames without content size in header.
+ with _zstd_dctx.stream_reader(data) as reader:
+ return reader.read()
+ case Compression.GZIP:
+ return gzip.decompress(data)
+ case _:
+ return data
diff --git a/src/s2_sdk/_exceptions.py b/src/s2_sdk/_exceptions.py
new file mode 100644
index 0000000..ca7df3a
--- /dev/null
+++ b/src/s2_sdk/_exceptions.py
@@ -0,0 +1,194 @@
+from __future__ import annotations
+
+from functools import wraps
+from inspect import isasyncgenfunction, iscoroutinefunction
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from s2_sdk._types import StreamPosition
+
+
+class S2Error(Exception):
+ """Base class for all S2 related exceptions."""
+
+
+class S2ClientError(S2Error):
+ """Error originating from the client."""
+
+
+UNKNOWN_CODE = "unknown"
+
+
+class S2ServerError(S2Error):
+ """Error originating from the server.
+
+ Attributes:
+ code: Error code from the server. See `error codes `_ for possible values.
+ message: Human-readable error message.
+ status_code: HTTP status code.
+ """
+
+ def __init__(
+ self,
+ code: str,
+ message: str,
+ status_code: int,
+ ):
+ self.code = code
+ self.message = message
+ self.status_code = status_code
+ self._retry_after: float | None = None
+ super().__init__(message)
+
+
+class AppendConditionError(S2ServerError):
+ """Append condition (fencing token or sequence number match) was not met."""
+
+
+class FencingTokenMismatchError(AppendConditionError):
+ """Fencing token did not match.
+
+ Attributes:
+ expected_fencing_token: The fencing token the server expected.
+ """
+
+ def __init__(
+ self, code: str, message: str, status_code: int, expected_fencing_token: str
+ ):
+ self.expected_fencing_token = expected_fencing_token
+ super().__init__(code, message, status_code)
+
+
+class SeqNumMismatchError(AppendConditionError):
+ """Sequence number did not match.
+
+ Attributes:
+ expected_seq_num: The sequence number the server expected.
+ """
+
+ def __init__(
+ self, code: str, message: str, status_code: int, expected_seq_num: int
+ ):
+ self.expected_seq_num = expected_seq_num
+ super().__init__(code, message, status_code)
+
+
+class ReadUnwrittenError(S2ServerError):
+ """Read from an unwritten position.
+
+ Attributes:
+ tail: The tail position of the stream.
+ """
+
+ def __init__(self, code: str, message: str, status_code: int, tail: StreamPosition):
+ self.tail = tail
+ super().__init__(code, message, status_code)
+
+
+def raise_for_412(body: dict, code: str) -> None:
+ if "fencing_token_mismatch" in body:
+ info = body["fencing_token_mismatch"]
+ expected = (
+ info if isinstance(info, str) else info.get("expected_fencing_token", "")
+ )
+ raise FencingTokenMismatchError(
+ code,
+ f"Fencing token mismatch: {info}",
+ 412,
+ expected_fencing_token=str(expected),
+ )
+ elif "seq_num_mismatch" in body:
+ info = body["seq_num_mismatch"]
+ expected = info if isinstance(info, int) else info.get("expected_seq_num", 0)
+ raise SeqNumMismatchError(
+ code,
+ f"Sequence number mismatch: {info}",
+ 412,
+ expected_seq_num=int(expected),
+ )
+ raise AppendConditionError(code, str(body), 412)
+
+
+def raise_for_416(body: dict, code: str) -> None:
+ from s2_sdk._types import StreamPosition
+
+ tail = body.get("tail", {})
+ raise ReadUnwrittenError(
+ code,
+ "Read from unwritten position",
+ 416,
+ tail=StreamPosition(
+ seq_num=tail.get("seq_num", 0),
+ timestamp=tail.get("timestamp", 0),
+ ),
+ )
+
+
+S2Error.__module__ = "s2_sdk"
+S2ClientError.__module__ = "s2_sdk"
+S2ServerError.__module__ = "s2_sdk"
+AppendConditionError.__module__ = "s2_sdk"
+FencingTokenMismatchError.__module__ = "s2_sdk"
+SeqNumMismatchError.__module__ = "s2_sdk"
+ReadUnwrittenError.__module__ = "s2_sdk"
+
+
+class TransportError(Exception):
+ """Network-level error (connection reset, timeout, protocol)."""
+
+
+class ConnectError(TransportError):
+ """Failed to establish TCP/TLS connection."""
+
+
+class ReadTimeoutError(TransportError):
+ """Timed out waiting for data."""
+
+
+class ConnectionClosedError(TransportError):
+ """Connection closed unexpectedly."""
+
+
+class ProtocolError(TransportError):
+ """HTTP/2 protocol error (RST_STREAM, GOAWAY)."""
+
+ def __init__(self, message: str, error_code: int | None = None):
+ self.error_code = error_code
+ super().__init__(message)
+
+
+def fallible(f):
+ @wraps(f)
+ def sync_wrapper(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, S2Error):
+ raise e
+ raise S2ClientError(e) from e
+
+ @wraps(f)
+ async def async_gen_wrapper(*args, **kwargs):
+ try:
+ async for val in f(*args, **kwargs):
+ yield val
+ except Exception as e:
+ if isinstance(e, S2Error):
+ raise e
+ raise S2ClientError(e) from e
+
+ @wraps(f)
+ async def coro_wrapper(*args, **kwargs):
+ try:
+ return await f(*args, **kwargs)
+ except Exception as e:
+ if isinstance(e, S2Error):
+ raise e
+ raise S2ClientError(e) from e
+
+ if iscoroutinefunction(f):
+ return coro_wrapper
+ elif isasyncgenfunction(f):
+ return async_gen_wrapper
+ else:
+ return sync_wrapper
diff --git a/src/s2_sdk/_frame_signal.py b/src/s2_sdk/_frame_signal.py
new file mode 100644
index 0000000..cd8d7af
--- /dev/null
+++ b/src/s2_sdk/_frame_signal.py
@@ -0,0 +1,21 @@
+class FrameSignal:
+ """Tracks whether any HTTP/2 DATA frames were written to the socket.
+
+ Signalled by the transport's ``on_write`` callback. An unsignalled
+ state at retry time proves no request data left the process, making
+ the retry safe. Reset at the start of each attempt.
+ """
+
+ __slots__ = ("_signalled",)
+
+ def __init__(self) -> None:
+ self._signalled = False
+
+ def is_signalled(self) -> bool:
+ return self._signalled
+
+ def signal(self) -> None:
+ self._signalled = True
+
+ def reset(self) -> None:
+ self._signalled = False
diff --git a/src/streamstore/_lib/__init__.py b/src/s2_sdk/_generated/__init__.py
similarity index 100%
rename from src/streamstore/_lib/__init__.py
rename to src/s2_sdk/_generated/__init__.py
diff --git a/src/streamstore/_lib/s2/__init__.py b/src/s2_sdk/_generated/s2/__init__.py
similarity index 100%
rename from src/streamstore/_lib/s2/__init__.py
rename to src/s2_sdk/_generated/s2/__init__.py
diff --git a/src/streamstore/py.typed b/src/s2_sdk/_generated/s2/v1/__init__.py
similarity index 100%
rename from src/streamstore/py.typed
rename to src/s2_sdk/_generated/s2/v1/__init__.py
diff --git a/src/s2_sdk/_generated/s2/v1/s2_pb2.py b/src/s2_sdk/_generated/s2/v1/s2_pb2.py
new file mode 100644
index 0000000..a2ce856
--- /dev/null
+++ b/src/s2_sdk/_generated/s2/v1/s2_pb2.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Generated by the protocol buffer compiler. DO NOT EDIT!
+# NO CHECKED-IN PROTOBUF GENCODE
+# source: s2/v1/s2.proto
+# Protobuf Python Version: 5.29.0
+"""Generated protocol buffer code."""
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import descriptor_pool as _descriptor_pool
+from google.protobuf import runtime_version as _runtime_version
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf.internal import builder as _builder
+
+_runtime_version.ValidateProtobufRuntimeVersion(
+ _runtime_version.Domain.PUBLIC, 5, 29, 0, "", "s2/v1/s2.proto"
+)
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
+ b'\n\x0es2/v1/s2.proto\x12\x05s2.v1"4\n\x0eStreamPosition\x12\x0f\n\x07seq_num\x18\x01 \x01(\x04\x12\x11\n\ttimestamp\x18\x02 \x01(\x04"%\n\x06Header\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c"b\n\x0c\x41ppendRecord\x12\x16\n\ttimestamp\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x1e\n\x07headers\x18\x02 \x03(\x0b\x32\r.s2.v1.Header\x12\x0c\n\x04\x62ody\x18\x03 \x01(\x0c\x42\x0c\n\n_timestamp"\x8f\x01\n\x0b\x41ppendInput\x12$\n\x07records\x18\x01 \x03(\x0b\x32\x13.s2.v1.AppendRecord\x12\x1a\n\rmatch_seq_num\x18\x02 \x01(\x04H\x00\x88\x01\x01\x12\x1a\n\rfencing_token\x18\x03 \x01(\tH\x01\x88\x01\x01\x42\x10\n\x0e_match_seq_numB\x10\n\x0e_fencing_token"z\n\tAppendAck\x12$\n\x05start\x18\x01 \x01(\x0b\x32\x15.s2.v1.StreamPosition\x12"\n\x03\x65nd\x18\x02 \x01(\x0b\x32\x15.s2.v1.StreamPosition\x12#\n\x04tail\x18\x03 \x01(\x0b\x32\x15.s2.v1.StreamPosition"c\n\x0fSequencedRecord\x12\x0f\n\x07seq_num\x18\x01 \x01(\x04\x12\x11\n\ttimestamp\x18\x02 \x01(\x04\x12\x1e\n\x07headers\x18\x03 \x03(\x0b\x32\r.s2.v1.Header\x12\x0c\n\x04\x62ody\x18\x04 \x01(\x0c"g\n\tReadBatch\x12\'\n\x07records\x18\x01 \x03(\x0b\x32\x16.s2.v1.SequencedRecord\x12(\n\x04tail\x18\x02 \x01(\x0b\x32\x15.s2.v1.StreamPositionH\x00\x88\x01\x01\x42\x07\n\x05_tailB\t\n\x05s2.v1P\x01\x62\x06proto3'
+)
+
+_globals = globals()
+_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
+_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "s2.v1.s2_pb2", _globals)
+if not _descriptor._USE_C_DESCRIPTORS:
+ _globals["DESCRIPTOR"]._loaded_options = None
+ _globals["DESCRIPTOR"]._serialized_options = b"\n\005s2.v1P\001"
+ _globals["_STREAMPOSITION"]._serialized_start = 25
+ _globals["_STREAMPOSITION"]._serialized_end = 77
+ _globals["_HEADER"]._serialized_start = 79
+ _globals["_HEADER"]._serialized_end = 116
+ _globals["_APPENDRECORD"]._serialized_start = 118
+ _globals["_APPENDRECORD"]._serialized_end = 216
+ _globals["_APPENDINPUT"]._serialized_start = 219
+ _globals["_APPENDINPUT"]._serialized_end = 362
+ _globals["_APPENDACK"]._serialized_start = 364
+ _globals["_APPENDACK"]._serialized_end = 486
+ _globals["_SEQUENCEDRECORD"]._serialized_start = 488
+ _globals["_SEQUENCEDRECORD"]._serialized_end = 587
+ _globals["_READBATCH"]._serialized_start = 589
+ _globals["_READBATCH"]._serialized_end = 692
+# @@protoc_insertion_point(module_scope)
diff --git a/src/s2_sdk/_generated/s2/v1/s2_pb2.pyi b/src/s2_sdk/_generated/s2/v1/s2_pb2.pyi
new file mode 100644
index 0000000..de57f04
--- /dev/null
+++ b/src/s2_sdk/_generated/s2/v1/s2_pb2.pyi
@@ -0,0 +1,106 @@
+from typing import ClassVar as _ClassVar
+from typing import Iterable as _Iterable
+from typing import Mapping as _Mapping
+from typing import Optional as _Optional
+from typing import Union as _Union
+
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf.internal import containers as _containers
+
+DESCRIPTOR: _descriptor.FileDescriptor
+
+class StreamPosition(_message.Message):
+ __slots__ = ("seq_num", "timestamp")
+ SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
+ TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
+ seq_num: int
+ timestamp: int
+ def __init__(
+ self, seq_num: _Optional[int] = ..., timestamp: _Optional[int] = ...
+ ) -> None: ...
+
+class Header(_message.Message):
+ __slots__ = ("name", "value")
+ NAME_FIELD_NUMBER: _ClassVar[int]
+ VALUE_FIELD_NUMBER: _ClassVar[int]
+ name: bytes
+ value: bytes
+ def __init__(
+ self, name: _Optional[bytes] = ..., value: _Optional[bytes] = ...
+ ) -> None: ...
+
+class AppendRecord(_message.Message):
+ __slots__ = ("timestamp", "headers", "body")
+ TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
+ HEADERS_FIELD_NUMBER: _ClassVar[int]
+ BODY_FIELD_NUMBER: _ClassVar[int]
+ timestamp: int
+ headers: _containers.RepeatedCompositeFieldContainer[Header]
+ body: bytes
+ def __init__(
+ self,
+ timestamp: _Optional[int] = ...,
+ headers: _Optional[_Iterable[_Union[Header, _Mapping]]] = ...,
+ body: _Optional[bytes] = ...,
+ ) -> None: ...
+
+class AppendInput(_message.Message):
+ __slots__ = ("records", "match_seq_num", "fencing_token")
+ RECORDS_FIELD_NUMBER: _ClassVar[int]
+ MATCH_SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
+ FENCING_TOKEN_FIELD_NUMBER: _ClassVar[int]
+ records: _containers.RepeatedCompositeFieldContainer[AppendRecord]
+ match_seq_num: int
+ fencing_token: str
+ def __init__(
+ self,
+ records: _Optional[_Iterable[_Union[AppendRecord, _Mapping]]] = ...,
+ match_seq_num: _Optional[int] = ...,
+ fencing_token: _Optional[str] = ...,
+ ) -> None: ...
+
+class AppendAck(_message.Message):
+ __slots__ = ("start", "end", "tail")
+ START_FIELD_NUMBER: _ClassVar[int]
+ END_FIELD_NUMBER: _ClassVar[int]
+ TAIL_FIELD_NUMBER: _ClassVar[int]
+ start: StreamPosition
+ end: StreamPosition
+ tail: StreamPosition
+ def __init__(
+ self,
+ start: _Optional[_Union[StreamPosition, _Mapping]] = ...,
+ end: _Optional[_Union[StreamPosition, _Mapping]] = ...,
+ tail: _Optional[_Union[StreamPosition, _Mapping]] = ...,
+ ) -> None: ...
+
+class SequencedRecord(_message.Message):
+ __slots__ = ("seq_num", "timestamp", "headers", "body")
+ SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
+ TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
+ HEADERS_FIELD_NUMBER: _ClassVar[int]
+ BODY_FIELD_NUMBER: _ClassVar[int]
+ seq_num: int
+ timestamp: int
+ headers: _containers.RepeatedCompositeFieldContainer[Header]
+ body: bytes
+ def __init__(
+ self,
+ seq_num: _Optional[int] = ...,
+ timestamp: _Optional[int] = ...,
+ headers: _Optional[_Iterable[_Union[Header, _Mapping]]] = ...,
+ body: _Optional[bytes] = ...,
+ ) -> None: ...
+
+class ReadBatch(_message.Message):
+ __slots__ = ("records", "tail")
+ RECORDS_FIELD_NUMBER: _ClassVar[int]
+ TAIL_FIELD_NUMBER: _ClassVar[int]
+ records: _containers.RepeatedCompositeFieldContainer[SequencedRecord]
+ tail: StreamPosition
+ def __init__(
+ self,
+ records: _Optional[_Iterable[_Union[SequencedRecord, _Mapping]]] = ...,
+ tail: _Optional[_Union[StreamPosition, _Mapping]] = ...,
+ ) -> None: ...
diff --git a/src/s2_sdk/_mappers.py b/src/s2_sdk/_mappers.py
new file mode 100644
index 0000000..31563a7
--- /dev/null
+++ b/src/s2_sdk/_mappers.py
@@ -0,0 +1,399 @@
+from datetime import datetime
+from typing import Any, Literal
+
+import s2_sdk._generated.s2.v1.s2_pb2 as pb
+from s2_sdk._types import (
+ AccessTokenInfo,
+ AccessTokenScope,
+ Accumulation,
+ AppendAck,
+ AppendInput,
+ BasinConfig,
+ BasinInfo,
+ BasinScope,
+ ExactMatch,
+ Gauge,
+ Label,
+ MetricUnit,
+ Operation,
+ OperationGroupPermissions,
+ Permission,
+ PrefixMatch,
+ ReadBatch,
+ ReadLimit,
+ Record,
+ Scalar,
+ SeqNum,
+ SequencedRecord,
+ StorageClass,
+ StreamConfig,
+ StreamInfo,
+ StreamPosition,
+ TailOffset,
+ TimeseriesInterval,
+ Timestamp,
+ Timestamping,
+ TimestampingMode,
+)
+
+_ReadStart = SeqNum | Timestamp | TailOffset
+
+
+def basin_config_to_json(config: BasinConfig | None) -> dict[str, Any] | None:
+ if config is None:
+ return None
+ result: dict[str, Any] = {}
+ if config.default_stream_config is not None:
+ result["default_stream_config"] = stream_config_to_json(
+ config.default_stream_config
+ )
+ if config.create_stream_on_append is not None:
+ result["create_stream_on_append"] = config.create_stream_on_append
+ if config.create_stream_on_read is not None:
+ result["create_stream_on_read"] = config.create_stream_on_read
+ return result
+
+
+def basin_config_from_json(data: dict[str, Any]) -> BasinConfig:
+ dsc = data.get("default_stream_config")
+ return BasinConfig(
+ default_stream_config=stream_config_from_json(dsc) if dsc else None,
+ create_stream_on_append=data.get("create_stream_on_append"),
+ create_stream_on_read=data.get("create_stream_on_read"),
+ )
+
+
+def basin_reconfiguration_to_json(config: BasinConfig) -> dict[str, Any]:
+ return basin_config_to_json(config) or {}
+
+
+def stream_config_to_json(config: StreamConfig | None) -> dict[str, Any] | None:
+ if config is None:
+ return None
+ result: dict[str, Any] = {}
+ if config.storage_class is not None:
+ result["storage_class"] = config.storage_class.value
+ if config.retention_policy is not None:
+ result["retention_policy"] = _retention_policy_to_json(config.retention_policy)
+ if config.timestamping is not None:
+ ts: dict[str, Any] = {}
+ if config.timestamping.mode is not None:
+ ts["mode"] = config.timestamping.mode.value
+ if config.timestamping.uncapped is not None:
+ ts["uncapped"] = config.timestamping.uncapped
+ result["timestamping"] = ts
+ if config.delete_on_empty_min_age is not None:
+ result["delete_on_empty"] = {"min_age_secs": config.delete_on_empty_min_age}
+ return result
+
+
+def stream_reconfiguration_to_json(config: StreamConfig) -> dict[str, Any]:
+ return stream_config_to_json(config) or {}
+
+
+def stream_config_from_json(data: dict[str, Any]) -> StreamConfig:
+ retention_policy: int | Literal["infinite"] | None = None
+ rp = data.get("retention_policy")
+ if rp is not None:
+ retention_policy = _retention_policy_from_json(rp)
+
+ timestamping = None
+ ts = data.get("timestamping")
+ if ts is not None:
+ mode_val = ts.get("mode")
+ timestamping = Timestamping(
+ mode=TimestampingMode(mode_val) if mode_val else None,
+ uncapped=ts.get("uncapped"),
+ )
+
+ doe = data.get("delete_on_empty")
+ delete_on_empty_min_age = doe.get("min_age_secs") if doe else None
+
+ sc = data.get("storage_class")
+ return StreamConfig(
+ storage_class=StorageClass(sc) if sc else None,
+ retention_policy=retention_policy,
+ timestamping=timestamping,
+ delete_on_empty_min_age=delete_on_empty_min_age,
+ )
+
+
+def _retention_policy_to_json(rp: int | Literal["infinite"]) -> dict[str, Any]:
+ if rp == "infinite":
+ return {"infinite": {}}
+ return {"age": rp}
+
+
+def _retention_policy_from_json(data: dict[str, Any]) -> int | Literal["infinite"]:
+ if "infinite" in data:
+ return "infinite"
+ return data["age"]
+
+
+def basin_info_from_json(data: dict[str, Any]) -> BasinInfo:
+ created_at = datetime.fromisoformat(data["created_at"])
+ deleted_at_str = data.get("deleted_at")
+ deleted_at = datetime.fromisoformat(deleted_at_str) if deleted_at_str else None
+ return BasinInfo(
+ name=data["name"],
+ scope=BasinScope(data["scope"]) if data.get("scope") else None,
+ created_at=created_at,
+ deleted_at=deleted_at,
+ )
+
+
+def stream_info_from_json(data: dict[str, Any]) -> StreamInfo:
+ created_at = datetime.fromisoformat(data["created_at"])
+ deleted_at_str = data.get("deleted_at")
+ deleted_at = datetime.fromisoformat(deleted_at_str) if deleted_at_str else None
+ return StreamInfo(
+ name=data["name"],
+ created_at=created_at,
+ deleted_at=deleted_at,
+ )
+
+
+def _resource_set_to_json(
+ rs: ExactMatch | PrefixMatch | None,
+) -> dict[str, str] | None:
+ if rs is None:
+ return None
+ if isinstance(rs, ExactMatch):
+ return {"exact": rs.value}
+ return {"prefix": rs.value}
+
+
+def _resource_set_from_json(
+ data: dict[str, Any] | None,
+) -> ExactMatch | PrefixMatch | None:
+ if data is None:
+ return None
+ if "exact" in data:
+ return ExactMatch(data["exact"])
+ if "prefix" in data:
+ return PrefixMatch(data["prefix"])
+ return None
+
+
+def _rw_perms_to_json(perm: Permission | None) -> dict[str, bool] | None:
+ if perm is None:
+ return None
+ match perm:
+ case Permission.READ:
+ return {"read": True}
+ case Permission.WRITE:
+ return {"write": True}
+ case Permission.READ_WRITE:
+ return {"read": True, "write": True}
+
+
+def _rw_perms_from_json(data: dict[str, Any] | None) -> Permission | None:
+ if data is None:
+ return None
+ read = data.get("read", False)
+ write = data.get("write", False)
+ if read and write:
+ return Permission.READ_WRITE
+ elif read:
+ return Permission.READ
+ elif write:
+ return Permission.WRITE
+ return None
+
+
+def access_token_info_to_json(
+ id: str,
+ scope: AccessTokenScope,
+ auto_prefix_streams: bool,
+ expires_at: str | None,
+) -> dict[str, Any]:
+ scope_json: dict[str, Any] = {}
+ if scope.basins is not None:
+ scope_json["basins"] = _resource_set_to_json(scope.basins)
+ if scope.streams is not None:
+ scope_json["streams"] = _resource_set_to_json(scope.streams)
+ if scope.access_tokens is not None:
+ scope_json["access_tokens"] = _resource_set_to_json(scope.access_tokens)
+ if scope.op_groups is not None:
+ og: dict[str, Any] = {}
+ if scope.op_groups.account is not None:
+ og["account"] = _rw_perms_to_json(scope.op_groups.account)
+ if scope.op_groups.basin is not None:
+ og["basin"] = _rw_perms_to_json(scope.op_groups.basin)
+ if scope.op_groups.stream is not None:
+ og["stream"] = _rw_perms_to_json(scope.op_groups.stream)
+ scope_json["op_groups"] = og
+ if scope.ops:
+ scope_json["ops"] = [op.value for op in scope.ops]
+
+ result: dict[str, Any] = {
+ "id": id,
+ "scope": scope_json,
+ "auto_prefix_streams": auto_prefix_streams,
+ }
+ if expires_at is not None:
+ result["expires_at"] = expires_at
+ return result
+
+
+def access_token_info_from_json(data: dict[str, Any]) -> AccessTokenInfo:
+ scope_data = data["scope"]
+ og_data = scope_data.get("op_groups")
+ op_groups = None
+ if og_data:
+ op_groups = OperationGroupPermissions(
+ account=_rw_perms_from_json(og_data.get("account")),
+ basin=_rw_perms_from_json(og_data.get("basin")),
+ stream=_rw_perms_from_json(og_data.get("stream")),
+ )
+
+ ops_data = scope_data.get("ops")
+ ops = [Operation(op) for op in ops_data] if ops_data else []
+
+ return AccessTokenInfo(
+ id=data["id"],
+ scope=AccessTokenScope(
+ basins=_resource_set_from_json(scope_data.get("basins")),
+ streams=_resource_set_from_json(scope_data.get("streams")),
+ access_tokens=_resource_set_from_json(scope_data.get("access_tokens")),
+ op_groups=op_groups,
+ ops=ops,
+ ),
+ expires_at=data.get("expires_at"),
+ auto_prefix_streams=data.get("auto_prefix_streams", False),
+ )
+
+
+def metric_set_from_json(
+ data: dict[str, Any],
+) -> list[Scalar | Accumulation | Gauge | Label]:
+ result: list[Scalar | Accumulation | Gauge | Label] = []
+ for metric in data.get("values", []):
+ if "scalar" in metric:
+ s = metric["scalar"]
+ result.append(
+ Scalar(
+ name=s["name"],
+ unit=MetricUnit(s["unit"]),
+ value=s["value"],
+ )
+ )
+ elif "accumulation" in metric:
+ a = metric["accumulation"]
+ bl = a.get("interval")
+ result.append(
+ Accumulation(
+ name=a["name"],
+ unit=MetricUnit(a["unit"]),
+ interval=TimeseriesInterval(bl) if bl else None,
+ values=[(int(v[0]), float(v[1])) for v in a["values"]],
+ )
+ )
+ elif "gauge" in metric:
+ g = metric["gauge"]
+ result.append(
+ Gauge(
+ name=g["name"],
+ unit=MetricUnit(g["unit"]),
+ values=[(int(v[0]), float(v[1])) for v in g["values"]],
+ )
+ )
+ elif "label" in metric:
+ lb = metric["label"]
+ result.append(
+ Label(
+ name=lb["name"],
+ values=lb["values"],
+ )
+ )
+ return result
+
+
+def tail_from_json(data: dict[str, Any]) -> StreamPosition:
+ tail = data["tail"]
+ return StreamPosition(seq_num=tail["seq_num"], timestamp=tail["timestamp"])
+
+
+def append_record_to_proto(record: Record) -> pb.AppendRecord:
+ headers = [pb.Header(name=name, value=value) for (name, value) in record.headers]
+ return pb.AppendRecord(
+ timestamp=record.timestamp, headers=headers, body=record.body
+ )
+
+
+def append_input_to_proto(inp: AppendInput) -> pb.AppendInput:
+ records = [append_record_to_proto(r) for r in inp.records]
+ return pb.AppendInput(
+ records=records,
+ match_seq_num=inp.match_seq_num,
+ fencing_token=inp.fencing_token,
+ )
+
+
+def append_ack_from_proto(ack: pb.AppendAck) -> AppendAck:
+ return AppendAck(
+ start=StreamPosition(ack.start.seq_num, ack.start.timestamp),
+ end=StreamPosition(ack.end.seq_num, ack.end.timestamp),
+ tail=StreamPosition(ack.tail.seq_num, ack.tail.timestamp),
+ )
+
+
+def read_batch_from_proto(
+ batch: pb.ReadBatch, ignore_command_records: bool = False
+) -> ReadBatch:
+ records = []
+ for sr in batch.records:
+ if ignore_command_records and _is_command_record(sr):
+ continue
+ records.append(
+ SequencedRecord(
+ seq_num=sr.seq_num,
+ body=sr.body,
+ headers=[(h.name, h.value) for h in sr.headers],
+ timestamp=sr.timestamp,
+ )
+ )
+ tail = None
+ if batch.HasField("tail"):
+ tail = StreamPosition(
+ seq_num=batch.tail.seq_num,
+ timestamp=batch.tail.timestamp,
+ )
+ return ReadBatch(records=records, tail=tail)
+
+
+def sequenced_record_from_proto(sr: pb.SequencedRecord) -> SequencedRecord:
+ return SequencedRecord(
+ seq_num=sr.seq_num,
+ body=sr.body,
+ headers=[(h.name, h.value) for h in sr.headers],
+ timestamp=sr.timestamp,
+ )
+
+
+def _is_command_record(sr: pb.SequencedRecord) -> bool:
+ if len(sr.headers) == 1 and sr.headers[0].name == b"":
+ return True
+ return False
+
+
+def read_start_params(start: _ReadStart) -> dict[str, Any]:
+ if isinstance(start, SeqNum):
+ return {"seq_num": start.value}
+ elif isinstance(start, Timestamp):
+ return {"timestamp": start.value}
+ elif isinstance(start, TailOffset):
+ return {"tail_offset": start.value}
+ from s2_sdk._exceptions import S2ClientError
+
+ raise S2ClientError("start doesn't match any of the expected types")
+
+
+def read_limit_params(limit: ReadLimit | None) -> dict[str, Any]:
+ params: dict[str, Any] = {}
+ if limit:
+ if limit.count is not None:
+ params["count"] = limit.count
+ if limit.bytes is not None:
+ params["bytes"] = limit.bytes
+ return params
diff --git a/src/s2_sdk/_ops.py b/src/s2_sdk/_ops.py
new file mode 100644
index 0000000..0638afd
--- /dev/null
+++ b/src/s2_sdk/_ops.py
@@ -0,0 +1,1068 @@
+import uuid
+from collections.abc import AsyncIterator
+from typing import Any, AsyncIterable, Self
+from urllib.parse import quote
+
+import s2_sdk._generated.s2.v1.s2_pb2 as pb
+from s2_sdk import _types as types
+from s2_sdk._append_session import AppendSession
+from s2_sdk._client import ConnectionPool, HttpClient
+from s2_sdk._exceptions import S2ServerError, fallible
+from s2_sdk._mappers import (
+ access_token_info_from_json,
+ access_token_info_to_json,
+ append_ack_from_proto,
+ append_input_to_proto,
+ basin_config_from_json,
+ basin_config_to_json,
+ basin_info_from_json,
+ basin_reconfiguration_to_json,
+ metric_set_from_json,
+ read_batch_from_proto,
+ read_limit_params,
+ read_start_params,
+ stream_config_from_json,
+ stream_config_to_json,
+ stream_info_from_json,
+ stream_reconfiguration_to_json,
+ tail_from_json,
+)
+from s2_sdk._producer import Producer
+from s2_sdk._retrier import Retrier, http_retry_on, is_safe_to_retry_unary
+from s2_sdk._s2s._read_session import run_read_session
+from s2_sdk._types import ONE_MIB, Compression, Endpoints, Retry, Timeout, metered_bytes
+from s2_sdk._validators import (
+ validate_append_input,
+ validate_basin,
+ validate_batching,
+ validate_max_unacked,
+ validate_retry,
+)
+
+
+class S2:
+ """Client for S2, an API for unlimited, durable, real-time streams.
+
+ Works with both the `cloud `_ and
+ `open source, self-hosted `_ versions.
+
+ Args:
+ access_token: Access token for authenticating with S2.
+ endpoints: S2 endpoints. If ``None``, defaults to public cloud
+ endpoints. See :class:`Endpoints`.
+ timeout: Timeout configuration. If ``None``, default values are
+ used. See :class:`Timeout`.
+ retry: Retry configuration. If ``None``, default values are
+ used. See :class:`Retry`.
+ compression: Compression algorithm for requests and responses.
+ Defaults to ``NONE``. See :class:`Compression`.
+
+ Tip:
+ Use as an async context manager to ensure connections are closed::
+
+ async with S2(token) as s2:
+ ...
+
+ Warning:
+ If not using a context manager, call :meth:`close` when done.
+ """
+
+ __slots__ = (
+ "_account_client",
+ "_auth_header",
+ "_basin_clients",
+ "_compression",
+ "_endpoints",
+ "_pool",
+ "_request_timeout",
+ "_retry",
+ "_retrier",
+ )
+
+ @fallible
+ def __init__(
+ self,
+ access_token: str,
+ *,
+ endpoints: Endpoints | None = None,
+ timeout: Timeout | None = None,
+ retry: Retry | None = None,
+ compression: Compression = Compression.NONE,
+ ) -> None:
+ if endpoints is None:
+ endpoints = Endpoints.default()
+ if timeout is None:
+ timeout = Timeout()
+ if retry is None:
+ retry = Retry()
+ validate_retry(retry.max_attempts)
+ self._endpoints = endpoints
+ self._retry = retry
+ self._compression = compression
+ self._auth_header = ("authorization", f"Bearer {access_token}")
+ self._pool = ConnectionPool(
+ connect_timeout=timeout.connection.total_seconds(),
+ )
+ self._request_timeout = timeout.request.total_seconds()
+ self._account_client = HttpClient(
+ pool=self._pool,
+ base_url=endpoints._account_url(),
+ request_timeout=self._request_timeout,
+ headers={self._auth_header[0]: self._auth_header[1]},
+ compression=compression,
+ )
+ self._basin_clients: dict[str, HttpClient] = {}
+ self._retrier = Retrier(
+ should_retry_on=http_retry_on,
+ max_attempts=retry.max_attempts,
+ min_base_delay=retry.min_base_delay.total_seconds(),
+ max_base_delay=retry.max_base_delay.total_seconds(),
+ )
+
+ async def __aenter__(self) -> Self:
+ return self
+
+ async def __aexit__(self, exc_type, exc_value, traceback) -> bool:
+ await self.close()
+ return False
+
+ def __getitem__(self, name: str) -> "S2Basin":
+ return self.basin(name)
+
+ async def close(self) -> None:
+ """Close all open connections to S2 service endpoints."""
+ await self._pool.close()
+
+ def _get_basin_client(self, name: str) -> HttpClient:
+ if name not in self._basin_clients:
+ headers = {self._auth_header[0]: self._auth_header[1]}
+ if self._endpoints._is_direct_basin():
+ headers["s2-basin"] = name
+ self._basin_clients[name] = HttpClient(
+ pool=self._pool,
+ base_url=self._endpoints._basin_url(name),
+ request_timeout=self._request_timeout,
+ headers=headers,
+ compression=self._compression,
+ )
+ return self._basin_clients[name]
+
+ @fallible
+ async def create_basin(
+ self,
+ name: str,
+ *,
+ config: types.BasinConfig | None = None,
+ ) -> types.BasinInfo:
+ """Create a basin.
+
+ Args:
+ name: Name of the basin.
+ config: Configuration for the basin.
+
+ Returns:
+ Information about the created basin.
+
+ Note:
+ ``name`` must be globally unique, 8--48 characters, comprising lowercase
+ letters, numbers, and hyphens. It cannot begin or end with a hyphen.
+ """
+ validate_basin(name)
+ json: dict[str, Any] = {"basin": name}
+ if config is not None:
+ json["config"] = basin_config_to_json(config)
+
+ response = await self._retrier(
+ self._account_client.unary_request,
+ "POST",
+ "/v1/basins",
+ json=json,
+ headers={"s2-request-token": _s2_request_token()},
+ )
+ return basin_info_from_json(response.json())
+
+ def basin(self, name: str) -> "S2Basin":
+ """Get an :class:`S2Basin` for performing basin-level operations.
+
+ Args:
+ name: Name of the basin.
+
+ Returns:
+ An :class:`S2Basin` bound to the given basin name.
+
+ Tip:
+ Also available via subscript: ``s2["my-basin"]``.
+ """
+ validate_basin(name)
+ return S2Basin(
+ name,
+ self._get_basin_client(name),
+ retry=self._retry,
+ compression=self._compression,
+ )
+
+ @fallible
+ async def list_basins(
+ self,
+ *,
+ prefix: str = "",
+ start_after: str = "",
+ limit: int = 1000,
+ ) -> types.Page[types.BasinInfo]:
+ """List a page of basins.
+
+ Args:
+ prefix: Filter to basins whose name starts with this prefix.
+ start_after: List basins whose name is lexicographically after this value.
+ limit: Maximum number of basins to return per page.
+
+ Returns:
+ A page of :class:`BasinInfo`.
+
+ Tip:
+ See :meth:`list_all_basins` for automatic pagination.
+ """
+ params: dict[str, Any] = {}
+ if prefix:
+ params["prefix"] = prefix
+ if start_after:
+ params["start_after"] = start_after
+ if limit != 1000:
+ params["limit"] = limit
+
+ response = await self._retrier(
+ self._account_client.unary_request, "GET", "/v1/basins", params=params
+ )
+ data = response.json()
+ return types.Page(
+ items=[basin_info_from_json(b) for b in data["basins"]],
+ has_more=data["has_more"],
+ )
+
+ @fallible
+ async def list_all_basins(
+ self,
+ *,
+ prefix: str = "",
+ start_after: str = "",
+ include_deleted: bool = False,
+ ) -> AsyncIterator[types.BasinInfo]:
+ """List all basins, paginating automatically.
+
+ Args:
+ prefix: Filter to basins whose name starts with this prefix.
+ start_after: List basins whose name is lexicographically after this value.
+ include_deleted: Include basins that are being deleted.
+
+ Yields:
+ :class:`BasinInfo` for each basin.
+ """
+ while True:
+ page = await self.list_basins(prefix=prefix, start_after=start_after)
+ for info in page.items:
+ if not include_deleted and info.deleted_at is not None:
+ continue
+ yield info
+ if not page.has_more or not page.items:
+ break
+ start_after = page.items[-1].name
+
+ @fallible
+ async def delete_basin(self, name: str, *, ignore_not_found: bool = False) -> None:
+ """Delete a basin.
+
+ Args:
+ name: Name of the basin to delete.
+ ignore_not_found: If ``True``, do not raise on 404.
+
+ Note:
+ Basin deletion is asynchronous and may take several minutes to complete.
+ """
+ await _maybe_not_found(
+ self._retrier(
+ self._account_client.unary_request, "DELETE", f"/v1/basins/{name}"
+ ),
+ ignore=ignore_not_found,
+ )
+
+ @fallible
+ async def get_basin_config(self, name: str) -> types.BasinConfig:
+ """Get basin configuration.
+
+ Args:
+ name: Name of the basin.
+
+ Returns:
+ Current configuration of the basin.
+ """
+ response = await self._retrier(
+ self._account_client.unary_request, "GET", f"/v1/basins/{name}"
+ )
+ return basin_config_from_json(response.json())
+
+ @fallible
+ async def reconfigure_basin(
+ self,
+ name: str,
+ *,
+ config: types.BasinConfig,
+ ) -> types.BasinConfig:
+ """Reconfigure a basin.
+
+ Args:
+ name: Name of the basin.
+ config: New configuration. Only provided fields are updated.
+
+ Returns:
+ Updated basin configuration.
+
+ Note:
+ Modifying ``default_stream_config`` only affects newly created streams.
+ """
+ json = basin_reconfiguration_to_json(config)
+ response = await self._retrier(
+ self._account_client.unary_request,
+ "PATCH",
+ f"/v1/basins/{name}",
+ json=json,
+ )
+ return basin_config_from_json(response.json())
+
+ @fallible
+ async def issue_access_token(
+ self,
+ id: str,
+ *,
+ scope: types.AccessTokenScope,
+ expires_at: str | None = None,
+ auto_prefix_streams: bool = False,
+ ) -> str:
+ """Issue an access token.
+
+ Args:
+ id: Unique identifier for the token (1--96 bytes).
+ scope: Permissions scope for the token.
+ expires_at: Optional expiration time (RFC 3339).
+ auto_prefix_streams: Automatically prefix stream names during
+ creation and strip the prefix during listing.
+
+ Returns:
+ The access token string.
+ """
+ json = access_token_info_to_json(id, scope, auto_prefix_streams, expires_at)
+ response = await self._retrier(
+ self._account_client.unary_request,
+ "POST",
+ "/v1/access-tokens",
+ json=json,
+ )
+ return response.json()["access_token"]
+
+ @fallible
+ async def list_access_tokens(
+ self,
+ *,
+ prefix: str = "",
+ start_after: str = "",
+ limit: int = 1000,
+ ) -> types.Page[types.AccessTokenInfo]:
+ """List a page of access tokens.
+
+ Args:
+ prefix: Filter to tokens whose ID starts with this prefix.
+ start_after: List tokens whose ID is lexicographically after this value.
+ limit: Maximum number of tokens to return per page.
+
+ Returns:
+ A page of :class:`AccessTokenInfo`.
+
+ Tip:
+ See :meth:`list_all_access_tokens` for automatic pagination.
+ """
+ params: dict[str, Any] = {}
+ if prefix:
+ params["prefix"] = prefix
+ if start_after:
+ params["start_after"] = start_after
+ if limit != 1000:
+ params["limit"] = limit
+
+ response = await self._retrier(
+ self._account_client.unary_request,
+ "GET",
+ "/v1/access-tokens",
+ params=params,
+ )
+ data = response.json()
+ return types.Page(
+ items=[access_token_info_from_json(info) for info in data["access_tokens"]],
+ has_more=data["has_more"],
+ )
+
+ @fallible
+ async def list_all_access_tokens(
+ self,
+ *,
+ prefix: str = "",
+ start_after: str = "",
+ ) -> AsyncIterator[types.AccessTokenInfo]:
+ """List all access tokens, paginating automatically.
+
+ Args:
+ prefix: Filter to tokens whose ID starts with this prefix.
+ start_after: List tokens whose ID is lexicographically after this value.
+
+ Yields:
+ :class:`AccessTokenInfo` for each token.
+ """
+ while True:
+ page = await self.list_access_tokens(prefix=prefix, start_after=start_after)
+ for info in page.items:
+ yield info
+ if not page.has_more or not page.items:
+ break
+ start_after = page.items[-1].id
+
+ @fallible
+ async def revoke_access_token(self, id: str) -> None:
+ """Revoke an access token.
+
+ Args:
+ id: Identifier of the token to revoke.
+ """
+ await self._retrier(
+ self._account_client.unary_request, "DELETE", _access_token_path(id)
+ )
+
+ @fallible
+ async def account_metrics(
+ self,
+ *,
+ set: types.AccountMetricSet,
+ start: int | None = None,
+ end: int | None = None,
+ interval: types.TimeseriesInterval | None = None,
+ ) -> list[types.Scalar | types.Accumulation | types.Gauge | types.Label]:
+ """Get account metrics.
+
+ Args:
+ set: Metric set to query.
+ start: Start of the time range (epoch seconds).
+ end: End of the time range (epoch seconds).
+ interval: Accumulation interval for timeseries metrics.
+
+ Returns:
+ List of metric values.
+ """
+ response = await self._retrier(
+ self._account_client.unary_request,
+ "GET",
+ "/v1/metrics",
+ params=_metrics_params(set.value, start, end, interval),
+ )
+ return metric_set_from_json(response.json())
+
+ @fallible
+ async def basin_metrics(
+ self,
+ basin: str,
+ *,
+ set: types.BasinMetricSet,
+ start: int | None = None,
+ end: int | None = None,
+ interval: types.TimeseriesInterval | None = None,
+ ) -> list[types.Scalar | types.Accumulation | types.Gauge | types.Label]:
+ """Get basin metrics.
+
+ Args:
+ basin: Name of the basin.
+ set: Metric set to query.
+ start: Start of the time range (epoch seconds).
+ end: End of the time range (epoch seconds).
+ interval: Accumulation interval for timeseries metrics.
+
+ Returns:
+ List of metric values.
+ """
+ response = await self._retrier(
+ self._account_client.unary_request,
+ "GET",
+ f"/v1/metrics/{_encode_path_segment(basin)}",
+ params=_metrics_params(set.value, start, end, interval),
+ )
+ return metric_set_from_json(response.json())
+
+ @fallible
+ async def stream_metrics(
+ self,
+ basin: str,
+ stream: str,
+ *,
+ set: types.StreamMetricSet,
+ start: int | None = None,
+ end: int | None = None,
+ interval: types.TimeseriesInterval | None = None,
+ ) -> list[types.Scalar | types.Accumulation | types.Gauge | types.Label]:
+ """Get stream metrics.
+
+ Args:
+ basin: Name of the basin.
+ stream: Name of the stream.
+ set: Metric set to query.
+ start: Start of the time range (epoch seconds).
+ end: End of the time range (epoch seconds).
+ interval: Accumulation interval for timeseries metrics.
+
+ Returns:
+ List of metric values.
+ """
+ response = await self._retrier(
+ self._account_client.unary_request,
+ "GET",
+ (
+ f"/v1/metrics/{_encode_path_segment(basin)}"
+ f"/{_encode_path_segment(stream)}"
+ ),
+ params=_metrics_params(set.value, start, end, interval),
+ )
+ return metric_set_from_json(response.json())
+
+
+class S2Basin:
+ """
+ Caution:
+ Returned by :meth:`S2.basin`. Do not instantiate directly.
+ """
+
+ __slots__ = (
+ "_name",
+ "_client",
+ "_compression",
+ "_retry",
+ "_retrier",
+ )
+
+ @fallible
+ def __init__(
+ self,
+ name: str,
+ client: HttpClient,
+ *,
+ retry: Retry,
+ compression: Compression,
+ ) -> None:
+ self._name = name
+ self._client = client
+ self._retry = retry
+ self._compression = compression
+ self._retrier = Retrier(
+ should_retry_on=http_retry_on,
+ max_attempts=retry.max_attempts,
+ min_base_delay=retry.min_base_delay.total_seconds(),
+ max_base_delay=retry.max_base_delay.total_seconds(),
+ )
+
+ def __repr__(self) -> str:
+ return f"S2Basin(name={self.name})"
+
+ def __getitem__(self, name: str) -> "S2Stream":
+ return self.stream(name)
+
+ @property
+ def name(self) -> str:
+ """Basin name."""
+ return self._name
+
+ @fallible
+ async def create_stream(
+ self,
+ name: str,
+ *,
+ config: types.StreamConfig | None = None,
+ ) -> types.StreamInfo:
+ """Create a stream.
+
+ Args:
+ name: Name of the stream.
+ config: Configuration for the stream.
+
+ Returns:
+ Information about the created stream.
+
+ Note:
+ ``name`` must be unique within the basin. It can be an arbitrary string
+ up to 512 characters. ``/`` is recommended as a delimiter for
+ hierarchical naming.
+ """
+ json: dict[str, Any] = {"stream": name}
+ if config is not None:
+ json["config"] = stream_config_to_json(config)
+
+ response = await self._retrier(
+ self._client.unary_request,
+ "POST",
+ "/v1/streams",
+ json=json,
+ headers={"s2-request-token": _s2_request_token()},
+ )
+ return stream_info_from_json(response.json())
+
+ def stream(self, name: str) -> "S2Stream":
+ """Get an :class:`S2Stream` for performing stream-level operations.
+
+ Args:
+ name: Name of the stream.
+
+ Returns:
+ An :class:`S2Stream` bound to the given stream name.
+
+ Tip:
+ Also available via subscript: ``s2["my-basin"]["my-stream"]``.
+ """
+ return S2Stream(
+ name,
+ self._client,
+ retry=self._retry,
+ compression=self._compression,
+ )
+
+ @fallible
+ async def list_streams(
+ self,
+ *,
+ prefix: str = "",
+ start_after: str = "",
+ limit: int = 1000,
+ ) -> types.Page[types.StreamInfo]:
+ """List a page of streams.
+
+ Args:
+ prefix: Filter to streams whose name starts with this prefix.
+ start_after: List streams whose name is lexicographically after this value.
+ limit: Maximum number of streams to return per page.
+
+ Returns:
+ A page of :class:`StreamInfo`.
+
+ Tip:
+ See :meth:`list_all_streams` for automatic pagination.
+ """
+ params: dict[str, Any] = {}
+ if prefix:
+ params["prefix"] = prefix
+ if start_after:
+ params["start_after"] = start_after
+ if limit != 1000:
+ params["limit"] = limit
+
+ response = await self._retrier(
+ self._client.unary_request, "GET", "/v1/streams", params=params
+ )
+ data = response.json()
+ return types.Page(
+ items=[stream_info_from_json(s) for s in data["streams"]],
+ has_more=data["has_more"],
+ )
+
+ @fallible
+ async def list_all_streams(
+ self,
+ *,
+ prefix: str = "",
+ start_after: str = "",
+ include_deleted: bool = False,
+ ) -> AsyncIterator[types.StreamInfo]:
+ """List all streams, paginating automatically.
+
+ Args:
+ prefix: Filter to streams whose name starts with this prefix.
+ start_after: List streams whose name is lexicographically after this value.
+ include_deleted: Include streams that are being deleted.
+
+ Yields:
+ :class:`StreamInfo` for each stream.
+ """
+ while True:
+ page = await self.list_streams(prefix=prefix, start_after=start_after)
+ for info in page.items:
+ if not include_deleted and info.deleted_at is not None:
+ continue
+ yield info
+ if not page.has_more or not page.items:
+ break
+ start_after = page.items[-1].name
+
+ @fallible
+ async def delete_stream(self, name: str, *, ignore_not_found: bool = False) -> None:
+ """Delete a stream.
+
+ Args:
+ name: Name of the stream to delete.
+ ignore_not_found: If ``True``, do not raise on 404.
+
+ Note:
+ Stream deletion is asynchronous and may take several minutes to complete.
+ """
+ await _maybe_not_found(
+ self._retrier(self._client.unary_request, "DELETE", _stream_path(name)),
+ ignore=ignore_not_found,
+ )
+
+ @fallible
+ async def get_stream_config(self, name: str) -> types.StreamConfig:
+ """Get stream configuration.
+
+ Args:
+ name: Name of the stream.
+
+ Returns:
+ Current configuration of the stream.
+ """
+ response = await self._retrier(
+ self._client.unary_request, "GET", _stream_path(name)
+ )
+ return stream_config_from_json(response.json())
+
+ @fallible
+ async def reconfigure_stream(
+ self,
+ name: str,
+ *,
+ config: types.StreamConfig,
+ ) -> types.StreamConfig:
+ """Reconfigure a stream.
+
+ Args:
+ name: Name of the stream.
+ config: New configuration. Only provided fields are updated.
+
+ Returns:
+ Updated stream configuration.
+ """
+ json = stream_reconfiguration_to_json(config)
+ response = await self._retrier(
+ self._client.unary_request, "PATCH", _stream_path(name), json=json
+ )
+ return stream_config_from_json(response.json())
+
+
+class S2Stream:
+ """
+ Caution:
+ Returned by :meth:`S2Basin.stream`. Do not instantiate directly.
+ """
+
+ __slots__ = (
+ "_name",
+ "_client",
+ "_compression",
+ "_retry",
+ "_retrier",
+ "_append_retrier",
+ )
+
+ def __init__(
+ self,
+ name: str,
+ client: HttpClient,
+ *,
+ retry: Retry,
+ compression: Compression,
+ ) -> None:
+ self._name = name
+ self._client = client
+ self._retry = retry
+ self._compression = compression
+ self._retrier = Retrier(
+ should_retry_on=http_retry_on,
+ max_attempts=retry.max_attempts,
+ min_base_delay=retry.min_base_delay.total_seconds(),
+ max_base_delay=retry.max_base_delay.total_seconds(),
+ )
+ self._append_retrier = Retrier(
+ should_retry_on=lambda e: is_safe_to_retry_unary(
+ e, retry.append_retry_policy
+ ),
+ max_attempts=retry.max_attempts,
+ min_base_delay=retry.min_base_delay.total_seconds(),
+ max_base_delay=retry.max_base_delay.total_seconds(),
+ )
+
+ def __repr__(self) -> str:
+ return f"S2Stream(name={self.name})"
+
+ @property
+ def name(self) -> str:
+ """Stream name."""
+ return self._name
+
+ @fallible
+ async def check_tail(self) -> types.StreamPosition:
+ """Check the tail of a stream.
+
+ Returns:
+ The tail position — the next sequence number to be assigned and the
+ timestamp of the last record on the stream.
+ """
+ response = await self._retrier(
+ self._client.unary_request,
+ "GET",
+ _stream_path(self.name, "/records/tail"),
+ )
+ return tail_from_json(response.json())
+
+ @fallible
+ async def append(self, inp: types.AppendInput) -> types.AppendAck:
+ """Append a batch of records to a stream.
+
+ Args:
+ inp: Batch of records and optional conditions.
+
+ Returns:
+ Acknowledgement with assigned sequence numbers and tail position.
+ """
+ validate_append_input(len(inp.records), metered_bytes(inp.records))
+ proto = append_input_to_proto(inp)
+ body = proto.SerializeToString()
+
+ response = await self._append_retrier(
+ self._client.unary_request,
+ "POST",
+ _stream_path(self.name, "/records"),
+ content=body,
+ headers={
+ "content-type": "application/x-protobuf",
+ "accept": "application/x-protobuf",
+ },
+ )
+ ack = pb.AppendAck()
+ ack.ParseFromString(response.content)
+ return append_ack_from_proto(ack)
+
+ def append_session(
+ self,
+ *,
+ max_unacked_bytes: int = 5 * ONE_MIB,
+ max_unacked_batches: int | None = None,
+ ) -> AppendSession:
+ """Open a session for appending batches of records continuously.
+
+ Pipelined inputs are guaranteed to be processed in order.
+
+ Args:
+ max_unacked_bytes: Maximum total metered bytes of unacknowledged
+ batches before backpressure is applied. Default is 5 MiB.
+ max_unacked_batches: Maximum number of unacknowledged batches
+ before backpressure is applied. If ``None``, no limit is applied.
+
+ Returns:
+ An :class:`AppendSession` to use as an async context manager.
+
+ Tip:
+ Use as an async context manager::
+
+ async with stream.append_session() as session:
+ ticket = await session.submit(AppendInput(records=[...]))
+ ack = await ticket
+
+ Warning:
+ If not using a context manager, call :meth:`AppendSession.close` to
+ ensure all submitted batches are appended.
+ """
+ validate_max_unacked(max_unacked_bytes, max_unacked_batches)
+ return AppendSession(
+ client=self._client,
+ stream_name=self.name,
+ retry=self._retry,
+ compression=self._compression,
+ max_unacked_bytes=max_unacked_bytes,
+ max_unacked_batches=max_unacked_batches,
+ )
+
+ def producer(
+ self,
+ *,
+ fencing_token: str | None = None,
+ match_seq_num: int | None = None,
+ batching: types.Batching | None = None,
+ max_unacked_bytes: int = 5 * ONE_MIB,
+ ) -> Producer:
+ """Open a producer with per-record submit and auto-batching.
+
+ Args:
+ fencing_token: Fencing token applied to every batch.
+ match_seq_num: Expected sequence number for the first record.
+ Automatically advanced after each acknowledgement.
+ batching: Auto-batching configuration. If ``None``, default
+ values are used. See :class:`Batching`.
+ max_unacked_bytes: Maximum total metered bytes of unacknowledged
+ batches before backpressure is applied. Default is 5 MiB.
+
+ Returns:
+ A :class:`Producer` to use as an async context manager.
+
+ Tip:
+ Use as an async context manager::
+
+ async with stream.producer() as p:
+ ticket = await p.submit(Record(body=b"hello"))
+ ack = await ticket
+
+ Warning:
+ If not using a context manager, call :meth:`Producer.close` to
+ ensure all submitted records are appended.
+ """
+ if batching is None:
+ batching = types.Batching()
+ validate_max_unacked(max_unacked_bytes)
+ validate_batching(batching.max_records, batching.max_bytes)
+ return Producer(
+ client=self._client,
+ stream_name=self.name,
+ retry=self._retry,
+ compression=self._compression,
+ fencing_token=fencing_token,
+ match_seq_num=match_seq_num,
+ max_unacked_bytes=max_unacked_bytes,
+ batching=batching,
+ )
+
+ @fallible
+ async def read(
+ self,
+ *,
+ start: types.SeqNum | types.Timestamp | types.TailOffset,
+ limit: types.ReadLimit | None = None,
+ until_timestamp: int | None = None,
+ clamp_to_tail: bool = False,
+ wait: int | None = None,
+ ignore_command_records: bool = False,
+ ) -> types.ReadBatch:
+ """Read a batch of records from a stream.
+
+ Args:
+ start: Inclusive start position.
+ limit: Maximum number of records or metered bytes to return.
+ until_timestamp: Exclusive upper-bound timestamp. All returned records
+ are guaranteed to have timestamps less than this value.
+ clamp_to_tail: Clamp the start position to the tail when it
+ exceeds the tail, instead of raising.
+ wait: Number of seconds to wait for records before returning.
+ ignore_command_records: Filter out command records from the batch.
+
+ Returns:
+ A :class:`ReadBatch` containing sequenced records and an optional
+ tail position. Records can be empty only if ``limit``,
+ ``until_timestamp``, or ``wait`` were provided.
+ """
+ params: dict[str, Any] = {}
+ params.update(read_start_params(start))
+ params.update(read_limit_params(limit))
+ if until_timestamp is not None:
+ params["until"] = until_timestamp
+ if clamp_to_tail:
+ params["clamp"] = "true"
+ if wait is not None:
+ params["wait"] = wait
+
+ response = await self._retrier(
+ self._client.unary_request,
+ "GET",
+ _stream_path(self.name, "/records"),
+ params=params,
+ headers={"accept": "application/x-protobuf"},
+ )
+
+ proto_batch = pb.ReadBatch()
+ proto_batch.ParseFromString(response.content)
+ return read_batch_from_proto(proto_batch, ignore_command_records)
+
+ @fallible
+ async def read_session(
+ self,
+ *,
+ start: types.SeqNum | types.Timestamp | types.TailOffset,
+ limit: types.ReadLimit | None = None,
+ until_timestamp: int | None = None,
+ clamp_to_tail: bool = False,
+ wait: int | None = None,
+ ignore_command_records: bool = False,
+ ) -> AsyncIterable[types.ReadBatch]:
+ """Read batches of records from a stream continuously.
+
+ Args:
+ start: Inclusive start position.
+ limit: Maximum number of records or metered bytes to return across
+ the entire session.
+ until_timestamp: Exclusive upper-bound timestamp. All returned records
+ are guaranteed to have timestamps less than this value.
+ clamp_to_tail: Clamp the start position to the tail when it
+ exceeds the tail, instead of raising.
+ wait: Number of seconds to wait for new records when the tail is
+ reached.
+ ignore_command_records: Filter out command records from batches.
+
+ Yields:
+ :class:`ReadBatch` — each containing a batch of records and an
+ optional tail position.
+
+ Note:
+ Sessions without bounds (no ``limit`` or ``until_timestamp``) default
+ to infinite ``wait``, waiting for new records indefinitely. Sessions with bounds default to zero ``wait``, ending
+ when the bounds are met or the tail is reached. Setting a non-zero
+ ``wait`` makes a bounded session wait up to that many seconds for
+ new records before ending.
+ """
+ async for batch in run_read_session(
+ self._client,
+ self.name,
+ start,
+ limit,
+ until_timestamp,
+ clamp_to_tail,
+ wait,
+ ignore_command_records,
+ retry=self._retry,
+ ):
+ yield batch
+
+
+def _s2_request_token() -> str:
+ return uuid.uuid4().hex
+
+
+def _encode_path_segment(value: str) -> str:
+ return quote(value, safe="")
+
+
+def _stream_path(name: str, suffix: str = "") -> str:
+ return f"/v1/streams/{_encode_path_segment(name)}{suffix}"
+
+
+def _access_token_path(id: str) -> str:
+ return f"/v1/access-tokens/{_encode_path_segment(id)}"
+
+
+def _metrics_params(
+ set_value: str,
+ start: int | None,
+ end: int | None,
+ interval: types.TimeseriesInterval | None,
+) -> dict[str, Any]:
+ params: dict[str, Any] = {"set": set_value}
+ if start is not None:
+ params["start"] = start
+ if end is not None:
+ params["end"] = end
+ if interval is not None:
+ params["interval"] = interval.value
+ return params
+
+
+async def _maybe_not_found(coro, *, ignore: bool) -> None:
+ try:
+ await coro
+ except S2ServerError as e:
+ if ignore and e.status_code == 404:
+ return
+ raise
diff --git a/src/s2_sdk/_producer.py b/src/s2_sdk/_producer.py
new file mode 100644
index 0000000..8095e32
--- /dev/null
+++ b/src/s2_sdk/_producer.py
@@ -0,0 +1,219 @@
+from __future__ import annotations
+
+import asyncio
+from collections import deque
+from dataclasses import dataclass
+from typing import Self
+
+from s2_sdk._append_session import AppendSession, BatchSubmitTicket
+from s2_sdk._batching import BatchAccumulator
+from s2_sdk._client import HttpClient
+from s2_sdk._exceptions import S2ClientError
+from s2_sdk._types import (
+ AppendAck,
+ AppendInput,
+ Batching,
+ Compression,
+ IndexedAppendAck,
+ Record,
+ Retry,
+)
+
+
+@dataclass(slots=True)
+class _UnackedBatch:
+ ticket: BatchSubmitTicket
+ indexed_ack_futs: tuple[asyncio.Future[IndexedAppendAck], ...]
+
+
+class Producer:
+ """High-level interface for submitting individual records.
+
+ Handles batching into :class:`AppendInput` automatically and uses an
+ append session internally.
+
+ Caution:
+ Returned by :meth:`S2Stream.producer`. Do not instantiate directly.
+ """
+
+ __slots__ = (
+ "_accumulator",
+ "_indexed_ack_futs",
+ "_batch_ready",
+ "_closed",
+ "_drain_task",
+ "_error",
+ "_fencing_token",
+ "_linger_task",
+ "_match_seq_num",
+ "_unacked",
+ "_session",
+ )
+
+ def __init__(
+ self,
+ client: HttpClient,
+ stream_name: str,
+ retry: Retry,
+ compression: Compression,
+ fencing_token: str | None,
+ match_seq_num: int | None,
+ max_unacked_bytes: int,
+ batching: Batching,
+ ) -> None:
+ self._session = AppendSession(
+ client=client,
+ stream_name=stream_name,
+ retry=retry,
+ compression=compression,
+ max_unacked_bytes=max_unacked_bytes,
+ max_unacked_batches=None,
+ )
+ self._fencing_token = fencing_token
+ self._match_seq_num = match_seq_num
+ self._accumulator = BatchAccumulator(batching)
+
+ self._indexed_ack_futs: list[asyncio.Future[IndexedAppendAck]] = []
+ self._linger_task: asyncio.Task[None] | None = None
+ self._unacked: deque[_UnackedBatch] = deque()
+ self._batch_ready = asyncio.Event()
+ self._drain_task = asyncio.get_running_loop().create_task(self._drain_acks())
+ self._closed = False
+ self._error: BaseException | None = None
+
+ async def submit(self, record: Record) -> RecordSubmitTicket:
+ """Submit a record for appending.
+
+ Waits when backpressure limits are reached.
+ """
+ if self._closed:
+ raise S2ClientError("Producer is closed")
+ if self._error is not None:
+ raise self._error
+
+ ack_fut: asyncio.Future[IndexedAppendAck] = (
+ asyncio.get_running_loop().create_future()
+ )
+ self._indexed_ack_futs.append(ack_fut)
+
+ first_in_batch = self._accumulator.is_empty()
+ self._accumulator.add(record)
+ if self._accumulator.is_full():
+ await self._flush()
+ elif first_in_batch and self._accumulator.linger is not None:
+ self._linger_task = asyncio.get_running_loop().create_task(
+ self._linger_flush()
+ )
+
+ return RecordSubmitTicket(ack_fut)
+
+ async def close(self) -> None:
+ """Close the producer and wait for all submitted records to be appended."""
+ if self._closed:
+ return
+ self._closed = True
+ await self._flush()
+ await self._session.close()
+ # Signal drain task to finish and wait for it
+ self._batch_ready.set()
+ await self._drain_task
+ if self._error is not None:
+ raise self._error
+
+ async def __aenter__(self) -> Self:
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb) -> bool:
+ await self.close()
+ return False
+
+ async def _flush(self) -> None:
+ if self._accumulator.is_empty():
+ return
+
+ if self._linger_task is not None:
+ self._linger_task.cancel()
+ self._linger_task = None
+
+ records = self._accumulator.take()
+ indexed_ack_futs = tuple(self._indexed_ack_futs)
+ self._indexed_ack_futs.clear()
+
+ batch = AppendInput(
+ records=records,
+ fencing_token=self._fencing_token,
+ match_seq_num=self._match_seq_num,
+ )
+ if self._match_seq_num is not None:
+ self._match_seq_num += len(records)
+
+ try:
+ ticket = await self._session.submit(batch)
+ except BaseException as e:
+ self._error = e
+ for ack_fut in indexed_ack_futs:
+ if not ack_fut.done():
+ ack_fut.set_exception(e)
+ # Suppress "Future exception was never retrieved" for
+ # futures the caller never got back (submit raised).
+ ack_fut.exception()
+ raise
+
+ self._unacked.append(
+ _UnackedBatch(ticket=ticket, indexed_ack_futs=indexed_ack_futs)
+ )
+ self._batch_ready.set()
+
+ async def _drain_acks(self) -> None:
+ """Single background task that resolves batches in FIFO order."""
+ while True:
+ while not self._unacked:
+ if self._closed:
+ return
+ self._batch_ready.clear()
+ if self._unacked:
+ break
+ await self._batch_ready.wait()
+
+ unacked = self._unacked.popleft()
+ try:
+ ack: AppendAck = await unacked.ticket # type: ignore[assignment]
+ for i, ack_fut in enumerate(unacked.indexed_ack_futs):
+ if not ack_fut.done():
+ ack_fut.set_result(
+ IndexedAppendAck(
+ seq_num=ack.start.seq_num + i,
+ batch=ack,
+ )
+ )
+ except BaseException as e:
+ self._error = e
+ for ack_fut in unacked.indexed_ack_futs:
+ if not ack_fut.done():
+ ack_fut.set_exception(e)
+ # Fail all remaining unacked batches too
+ for remaining in self._unacked:
+ for ack_fut in remaining.indexed_ack_futs:
+ if not ack_fut.done():
+ ack_fut.set_exception(e)
+ self._unacked.clear()
+ return
+
+ async def _linger_flush(self) -> None:
+ assert self._accumulator.linger is not None
+ await asyncio.sleep(self._accumulator.linger)
+ # Clear before calling _flush() so it doesn't cancel this task.
+ self._linger_task = None
+ await self._flush()
+
+
+class RecordSubmitTicket:
+ """Awaitable that resolves to an :class:`IndexedAppendAck` once the record is appended."""
+
+ __slots__ = ("_ack_fut",)
+
+ def __init__(self, ack_fut: asyncio.Future[IndexedAppendAck]) -> None:
+ self._ack_fut = ack_fut
+
+ def __await__(self):
+ return self._ack_fut.__await__()
diff --git a/src/s2_sdk/_retrier.py b/src/s2_sdk/_retrier.py
new file mode 100644
index 0000000..19b4b71
--- /dev/null
+++ b/src/s2_sdk/_retrier.py
@@ -0,0 +1,116 @@
+import asyncio
+import random
+from dataclasses import dataclass
+from typing import Callable
+
+from s2_sdk._exceptions import ConnectError, S2ServerError, TransportError
+from s2_sdk._frame_signal import FrameSignal
+from s2_sdk._types import AppendRetryPolicy
+
+
+class Retrier:
+ def __init__(
+ self,
+ should_retry_on: Callable[[Exception], bool],
+ max_attempts: int,
+ min_base_delay: float = 0.1,
+ max_base_delay: float = 1.0,
+ ):
+ self.should_retry_on = should_retry_on
+ self.max_attempts = max_attempts
+ self.min_base_delay = min_base_delay
+ self.max_base_delay = max_base_delay
+
+ async def __call__(self, f: Callable, *args, **kwargs):
+ backoffs = compute_backoffs(
+ attempts=max(self.max_attempts - 1, 0),
+ min_base_delay=self.min_base_delay,
+ max_base_delay=self.max_base_delay,
+ )
+ attempt = 0
+ while True:
+ try:
+ return await f(*args, **kwargs)
+ except Exception as e:
+ if attempt < len(backoffs) and self.should_retry_on(e):
+ delay = backoffs[attempt]
+ retry_after = getattr(e, "_retry_after", None)
+ if retry_after is not None:
+ delay = max(delay, retry_after)
+ await asyncio.sleep(delay)
+ attempt += 1
+ else:
+ raise e
+
+
+@dataclass(slots=True)
+class Attempt:
+ value: int
+
+
+def compute_backoffs(
+ attempts: int,
+ min_base_delay: float = 0.1,
+ max_base_delay: float = 1.0,
+) -> list[float]:
+ backoffs = []
+ for n in range(attempts):
+ base_delay = min(min_base_delay * 2**n, max_base_delay)
+ jitter = random.uniform(0, base_delay)
+ backoffs.append(base_delay + jitter)
+ return backoffs
+
+
+def is_safe_to_retry_unary(
+ e: Exception,
+ policy: AppendRetryPolicy | None,
+) -> bool:
+ match policy:
+ case None | AppendRetryPolicy.ALL:
+ policy_compliant = True
+ case AppendRetryPolicy.NO_SIDE_EFFECTS:
+ policy_compliant = has_no_side_effects(e)
+ return policy_compliant and http_retry_on(e)
+
+
+def is_safe_to_retry_session(
+ e: Exception,
+ policy: AppendRetryPolicy,
+ has_inflight: bool,
+ frame_signal: FrameSignal | None,
+) -> bool:
+ match policy:
+ case AppendRetryPolicy.ALL:
+ policy_compliant = True
+ case AppendRetryPolicy.NO_SIDE_EFFECTS:
+ not_signalled = frame_signal is not None and not frame_signal.is_signalled()
+ policy_compliant = (
+ not has_inflight or not_signalled or has_no_side_effects(e)
+ )
+ return policy_compliant and http_retry_on(e)
+
+
+def http_retry_on(e: Exception) -> bool:
+ if isinstance(e, S2ServerError):
+ if e.status_code in (408, 429, 500, 502, 503, 504):
+ return True
+ if e.status_code == 409 and e.code == "transaction_conflict":
+ return True
+ if isinstance(e, TransportError):
+ return True
+ return False
+
+
+def has_no_side_effects(e: Exception) -> bool:
+ if isinstance(e, S2ServerError):
+ return (e.status_code == 429 and e.code == "rate_limited") or (
+ e.status_code == 502 and e.code == "hot_server"
+ )
+ if isinstance(e, ConnectError):
+ cause = e.__cause__
+ while cause is not None:
+ if isinstance(cause, ConnectionRefusedError):
+ return True
+ cause = cause.__cause__
+ return False
+ return False
diff --git a/src/s2_sdk/_s2s/__init__.py b/src/s2_sdk/_s2s/__init__.py
new file mode 100644
index 0000000..8fab3f3
--- /dev/null
+++ b/src/s2_sdk/_s2s/__init__.py
@@ -0,0 +1,5 @@
+from urllib.parse import quote
+
+
+def _stream_records_path(stream_name: str) -> str:
+ return f"/v1/streams/{quote(stream_name, safe='')}/records"
diff --git a/src/s2_sdk/_s2s/_append_session.py b/src/s2_sdk/_s2s/_append_session.py
new file mode 100644
index 0000000..3c41621
--- /dev/null
+++ b/src/s2_sdk/_s2s/_append_session.py
@@ -0,0 +1,201 @@
+import asyncio
+from collections import deque
+from collections.abc import AsyncGenerator, AsyncIterable
+from typing import NamedTuple
+
+import s2_sdk._generated.s2.v1.s2_pb2 as pb
+from s2_sdk._client import HttpClient
+from s2_sdk._exceptions import ReadTimeoutError, S2ClientError
+from s2_sdk._frame_signal import FrameSignal
+from s2_sdk._mappers import append_ack_from_proto, append_input_to_proto
+from s2_sdk._retrier import Attempt, compute_backoffs, is_safe_to_retry_session
+from s2_sdk._s2s import _stream_records_path
+from s2_sdk._s2s._protocol import (
+ Message,
+ frame_message,
+ maybe_compress,
+ parse_error_info,
+ read_messages,
+)
+from s2_sdk._types import (
+ AppendAck,
+ AppendInput,
+ AppendRetryPolicy,
+ Compression,
+ Retry,
+)
+
+_QUEUE_MAX_SIZE = 100
+
+
+class _InflightInput(NamedTuple):
+ num_records: int
+ encoded: bytes
+
+
+async def run_append_session(
+ client: HttpClient,
+ stream_name: str,
+ inputs: AsyncIterable[AppendInput],
+ retry: Retry,
+ compression: Compression,
+ ack_timeout: float | None = None,
+) -> AsyncIterable[AppendAck]:
+ input_queue: asyncio.Queue[AppendInput | None] = asyncio.Queue(
+ maxsize=_QUEUE_MAX_SIZE
+ )
+ ack_queue: asyncio.Queue[AppendAck | None] = asyncio.Queue(maxsize=_QUEUE_MAX_SIZE)
+
+ frame_signal: FrameSignal | None = None
+ if retry.append_retry_policy == AppendRetryPolicy.NO_SIDE_EFFECTS:
+ frame_signal = FrameSignal()
+
+ async def pipe_inputs():
+ try:
+ async for inp in inputs:
+ await input_queue.put(inp)
+ finally:
+ await input_queue.put(None)
+
+ async def retrying_inner():
+ inflight_inputs: deque[_InflightInput] = deque()
+ backoffs = compute_backoffs(
+ retry._max_retries(),
+ min_base_delay=retry.min_base_delay.total_seconds(),
+ max_base_delay=retry.max_base_delay.total_seconds(),
+ )
+ attempt = Attempt(0)
+ try:
+ while True:
+ try:
+ pending_resend = tuple(inflight_inputs)
+ if frame_signal is not None:
+ frame_signal.reset()
+ await _run_attempt(
+ client,
+ stream_name,
+ attempt,
+ inflight_inputs,
+ input_queue,
+ ack_queue,
+ pending_resend,
+ compression,
+ frame_signal,
+ ack_timeout,
+ )
+ return
+ except Exception as e:
+ has_inflight = len(inflight_inputs) > 0
+ if attempt.value < len(backoffs) and is_safe_to_retry_session(
+ e,
+ retry.append_retry_policy,
+ has_inflight,
+ frame_signal,
+ ):
+ await asyncio.sleep(backoffs[attempt.value])
+ attempt.value += 1
+ else:
+ raise
+ finally:
+ await ack_queue.put(None)
+
+ async with asyncio.TaskGroup() as tg:
+ tg.create_task(retrying_inner())
+ tg.create_task(pipe_inputs())
+ while True:
+ ack = await ack_queue.get()
+ if ack is None:
+ break
+ yield ack
+
+
+async def _run_attempt(
+ client: HttpClient,
+ stream_name: str,
+ attempt: Attempt,
+ inflight_inputs: deque[_InflightInput],
+ input_queue: asyncio.Queue[AppendInput | None],
+ ack_queue: asyncio.Queue[AppendAck | None],
+ pending_resend: tuple[_InflightInput, ...],
+ compression: Compression,
+ frame_signal: FrameSignal | None,
+ ack_timeout: float | None = None,
+) -> None:
+ async with client.streaming_request(
+ "POST",
+ _stream_records_path(stream_name),
+ headers={
+ "content-type": "s2s/proto",
+ "accept": "s2s/proto",
+ },
+ content=_body_gen(inflight_inputs, input_queue, pending_resend, compression),
+ frame_signal=frame_signal,
+ ) as response:
+ if response.status_code != 200:
+ body = await response.aread()
+ raise parse_error_info(body, response.status_code)
+
+ prev_ack_end: int | None = None
+ resend_remaining = len(pending_resend)
+
+ messages = read_messages(response.aiter_bytes())
+ while True:
+ try:
+ msg_body = await asyncio.wait_for(
+ messages.__anext__(), timeout=ack_timeout
+ )
+ except StopAsyncIteration:
+ break
+ except asyncio.TimeoutError:
+ raise ReadTimeoutError("Append session ack timeout")
+
+ if attempt.value > 0:
+ attempt.value = 0
+ ack = pb.AppendAck()
+ ack.ParseFromString(msg_body)
+
+ if ack.end.seq_num < ack.start.seq_num:
+ raise S2ClientError("Invalid ack: end < start")
+ if prev_ack_end is not None and ack.end.seq_num <= prev_ack_end:
+ raise S2ClientError("Invalid ack: not monotonically increasing")
+ prev_ack_end = ack.end.seq_num
+
+ num_records_sent = inflight_inputs.popleft().num_records
+ num_records_ackd = ack.end.seq_num - ack.start.seq_num
+ if num_records_sent != num_records_ackd:
+ raise S2ClientError(
+ "Number of records sent doesn't match the number of acknowledgements received"
+ )
+ await ack_queue.put(append_ack_from_proto(ack))
+
+ if resend_remaining > 0:
+ resend_remaining -= 1
+ if resend_remaining == 0 and frame_signal is not None:
+ frame_signal.reset()
+
+
+async def _body_gen(
+ inflight_inputs: deque[_InflightInput],
+ input_queue: asyncio.Queue[AppendInput | None],
+ pending_resend: tuple[_InflightInput, ...],
+ compression: Compression,
+) -> AsyncGenerator[bytes]:
+ for resend_inp in pending_resend:
+ yield resend_inp.encoded
+
+ while True:
+ inp = await input_queue.get()
+ if inp is None:
+ return
+ encoded = _encode_input(inp, compression)
+ inflight_inputs.append(
+ _InflightInput(num_records=len(inp.records), encoded=encoded)
+ )
+ yield encoded
+
+
+def _encode_input(inp: AppendInput, compression: Compression) -> bytes:
+ proto = append_input_to_proto(inp)
+ body = proto.SerializeToString()
+ body, compression = maybe_compress(body, compression)
+ return frame_message(Message(body, terminal=False, compression=compression))
diff --git a/src/s2_sdk/_s2s/_protocol.py b/src/s2_sdk/_s2s/_protocol.py
new file mode 100644
index 0000000..15ea5f9
--- /dev/null
+++ b/src/s2_sdk/_s2s/_protocol.py
@@ -0,0 +1,174 @@
+"""S2S message framing protocol.
+
+Message layout: [3 bytes: length] [1 byte: flag] [N bytes: body]
+ The 3-byte length covers flag + body (i.e. everything after the length prefix).
+Flag byte: [T][CC][RRRRR]
+ T = terminal (1 = last message)
+ CC = compression (00=none, 01=zstd, 10=gzip)
+ R = reserved
+Terminal body: [2 bytes: status code big-endian] [JSON error]
+"""
+
+import json
+import struct
+from collections.abc import AsyncIterator
+from typing import NamedTuple
+
+from s2_sdk._compression import compress, decompress
+from s2_sdk._exceptions import (
+ UNKNOWN_CODE,
+ S2ClientError,
+ S2ServerError,
+ raise_for_412,
+ raise_for_416,
+)
+from s2_sdk._types import Compression
+
+
+class Message(NamedTuple):
+ body: bytes
+ terminal: bool
+ compression: Compression
+
+
+# Compression threshold (1 KiB)
+COMPRESSION_THRESHOLD = 1024
+
+# Flag byte: [T][CC][RRRRR]
+_TERMINAL_BIT = 0b1000_0000
+_COMPRESSION_MASK = 0b0110_0000
+_COMPRESSION_SHIFT = 5
+
+# Length of the flag field in bytes
+_FLAG_LEN = 1
+
+# Wire codes for compression in the flag byte
+_COMPRESSION_CODE = {
+ Compression.NONE: 0,
+ Compression.ZSTD: 1,
+ Compression.GZIP: 2,
+}
+
+_COMPRESSION_FROM_CODE = {v: k for k, v in _COMPRESSION_CODE.items()}
+
+
+def frame_message(msg: Message) -> bytes:
+ msg_len = _FLAG_LEN + len(msg.body)
+ flag = 0
+ if msg.terminal:
+ flag |= _TERMINAL_BIT
+ flag |= (_COMPRESSION_CODE[msg.compression] & 0x3) << _COMPRESSION_SHIFT
+
+ return struct.pack(">I", msg_len)[1:] + bytes([flag]) + msg.body
+
+
+def deframe_data(data: bytes) -> Message:
+ if len(data) < 4:
+ raise ValueError("Message too short")
+
+ msg_len = int.from_bytes(data[0:3], "big") # flag + body
+ flag = data[3]
+
+ terminal = bool(flag & _TERMINAL_BIT)
+ code = (flag & _COMPRESSION_MASK) >> _COMPRESSION_SHIFT
+ compression = _COMPRESSION_FROM_CODE.get(code, Compression.NONE)
+
+ body_len = msg_len - _FLAG_LEN
+ body = data[4 : 4 + body_len]
+ if len(body) < body_len:
+ raise ValueError("Incomplete message body")
+
+ return Message(body, terminal, compression)
+
+
+async def read_messages(byte_stream: AsyncIterator[bytes]) -> AsyncIterator[bytes]:
+ """Read messages from an async byte stream.
+
+ Yields decoded message bodies. Stops on terminal message.
+ """
+ buf = bytearray()
+ async for chunk in byte_stream:
+ buf.extend(chunk)
+ while len(buf) >= 4:
+ msg_len = int.from_bytes(buf[0:3], "big")
+ frame_len = 3 + msg_len
+ if len(buf) < frame_len:
+ break
+
+ flag = buf[3]
+ terminal = bool(flag & _TERMINAL_BIT)
+ compression_code = (flag & _COMPRESSION_MASK) >> _COMPRESSION_SHIFT
+ compression = _COMPRESSION_FROM_CODE.get(compression_code, Compression.NONE)
+ body = bytes(buf[4:frame_len])
+ del buf[:frame_len]
+
+ if terminal:
+ _handle_terminal(body)
+ return
+
+ if compression != Compression.NONE:
+ body = decompress(body, compression)
+
+ yield body
+
+
+def maybe_compress(body: bytes, compression: Compression) -> tuple[bytes, Compression]:
+ if len(body) >= COMPRESSION_THRESHOLD and compression != Compression.NONE:
+ return compress(body, compression), compression
+ return body, Compression.NONE
+
+
+def parse_error_info(body: bytes, status_code: int) -> S2ServerError:
+ try:
+ error = json.loads(body)
+ message = error.get("message", body.decode("utf-8", errors="replace"))
+ code = (
+ error.get("code", UNKNOWN_CODE) if isinstance(error, dict) else UNKNOWN_CODE
+ )
+ except Exception:
+ message = body.decode("utf-8", errors="replace")
+ code = UNKNOWN_CODE
+ return S2ServerError(code, message, status_code)
+
+
+def _handle_terminal(body: bytes) -> None:
+ """Handle a terminal message body.
+
+ Terminal messages carry an HTTP status code and JSON error information.
+ Always raises an exception — terminal messages are only sent on errors.
+ """
+ if len(body) < 2:
+ raise S2ClientError("Session terminated")
+
+ status_code = int.from_bytes(body[0:2], "big")
+
+ error_json = body[2:]
+ if not error_json:
+ raise S2ServerError(
+ UNKNOWN_CODE,
+ f"Session terminated with status {status_code}",
+ status_code,
+ )
+
+ try:
+ error = json.loads(error_json)
+ except (json.JSONDecodeError, ValueError):
+ raise S2ServerError(
+ UNKNOWN_CODE,
+ error_json.decode("utf-8", errors="replace"),
+ status_code,
+ )
+
+ code = error.get("code", UNKNOWN_CODE) if isinstance(error, dict) else UNKNOWN_CODE
+
+ if status_code == 412:
+ raise_for_412(error, code)
+
+ if status_code == 416:
+ raise_for_416(error, code)
+
+ if isinstance(error, dict):
+ message = error.get("message", str(error))
+ else:
+ message = str(error)
+ raise S2ServerError(code, message, status_code)
diff --git a/src/s2_sdk/_s2s/_read_session.py b/src/s2_sdk/_s2s/_read_session.py
new file mode 100644
index 0000000..2a93d2c
--- /dev/null
+++ b/src/s2_sdk/_s2s/_read_session.py
@@ -0,0 +1,151 @@
+import asyncio
+import json
+import math
+import time
+from typing import Any, AsyncIterable
+
+import s2_sdk._generated.s2.v1.s2_pb2 as pb
+from s2_sdk._client import HttpClient
+from s2_sdk._exceptions import UNKNOWN_CODE, ReadTimeoutError, raise_for_416
+from s2_sdk._mappers import read_batch_from_proto, read_limit_params, read_start_params
+from s2_sdk._retrier import Attempt, compute_backoffs, http_retry_on
+from s2_sdk._s2s import _stream_records_path
+from s2_sdk._s2s._protocol import parse_error_info, read_messages
+from s2_sdk._types import (
+ ReadBatch,
+ ReadLimit,
+ Retry,
+ SeqNum,
+ TailOffset,
+ Timestamp,
+ metered_bytes,
+)
+
+_HEARTBEAT_TIMEOUT = 20.0 # seconds
+
+
+async def run_read_session(
+ client: HttpClient,
+ stream_name: str,
+ start: SeqNum | Timestamp | TailOffset,
+ limit: ReadLimit | None,
+ until_timestamp: int | None,
+ clamp_to_tail: bool,
+ wait: int | None,
+ ignore_command_records: bool,
+ retry: Retry,
+) -> AsyncIterable[ReadBatch]:
+ params = _build_read_params(start, limit, until_timestamp, clamp_to_tail, wait)
+ backoffs = compute_backoffs(
+ retry._max_retries(),
+ min_base_delay=retry.min_base_delay.total_seconds(),
+ max_base_delay=retry.max_base_delay.total_seconds(),
+ )
+ attempt = Attempt(0)
+
+ remaining_count = limit.count if limit and limit.count is not None else None
+ remaining_bytes = limit.bytes if limit and limit.bytes is not None else None
+
+ last_tail_at: float | None = None
+
+ while True:
+ if wait is not None:
+ params["wait"] = _remaining_wait(wait, last_tail_at)
+
+ try:
+ async with client.streaming_request(
+ "GET",
+ _stream_records_path(stream_name),
+ params=params,
+ headers={"content-type": "s2s/proto"},
+ ) as response:
+ if response.status_code == 416:
+ body = await response.aread()
+ data = json.loads(body)
+ code = (
+ data.get("code", UNKNOWN_CODE)
+ if isinstance(data, dict)
+ else UNKNOWN_CODE
+ )
+ raise_for_416(data, code)
+
+ if response.status_code != 200:
+ body = await response.aread()
+ raise parse_error_info(body, response.status_code)
+
+ messages = read_messages(response.aiter_bytes())
+ while True:
+ try:
+ message_body = await asyncio.wait_for(
+ messages.__anext__(), timeout=_HEARTBEAT_TIMEOUT
+ )
+ except StopAsyncIteration:
+ break
+ except asyncio.TimeoutError:
+ raise ReadTimeoutError("Read session heartbeat timeout")
+
+ if attempt.value > 0:
+ attempt.value = 0
+
+ proto_batch = pb.ReadBatch()
+ proto_batch.ParseFromString(message_body)
+ batch = read_batch_from_proto(proto_batch, ignore_command_records)
+
+ if batch.tail is not None:
+ last_tail_at = time.monotonic()
+
+ if not batch.records and batch.tail is None:
+ continue
+
+ if batch.records:
+ last_record = batch.records[-1]
+ params["seq_num"] = last_record.seq_num + 1
+ params.pop("timestamp", None)
+ params.pop("tail_offset", None)
+
+ if remaining_count is not None:
+ remaining_count = max(
+ remaining_count - len(batch.records), 0
+ )
+ params["count"] = remaining_count
+ if remaining_bytes is not None:
+ remaining_bytes = max(
+ remaining_bytes - metered_bytes(batch.records), 0
+ )
+ params["bytes"] = remaining_bytes
+
+ yield batch
+
+ return
+ except Exception as e:
+ if attempt.value < len(backoffs) and http_retry_on(e):
+ await asyncio.sleep(backoffs[attempt.value])
+ attempt.value += 1
+ else:
+ raise e
+
+
+def _build_read_params(
+ start: SeqNum | Timestamp | TailOffset,
+ limit: ReadLimit | None,
+ until_timestamp: int | None,
+ clamp_to_tail: bool,
+ wait: int | None,
+) -> dict[str, Any]:
+ params: dict[str, Any] = {}
+ params.update(read_start_params(start))
+ params.update(read_limit_params(limit))
+ if until_timestamp is not None:
+ params["until"] = until_timestamp
+ if clamp_to_tail:
+ params["clamp"] = "true"
+ if wait is not None:
+ params["wait"] = wait
+ return params
+
+
+def _remaining_wait(baseline: int, last_tail_at: float | None) -> int:
+ if last_tail_at is None:
+ return baseline
+ elapsed = math.ceil(time.monotonic() - last_tail_at)
+ return max(0, baseline - elapsed)
diff --git a/src/s2_sdk/_types.py b/src/s2_sdk/_types.py
new file mode 100644
index 0000000..4d008a1
--- /dev/null
+++ b/src/s2_sdk/_types.py
@@ -0,0 +1,609 @@
+from __future__ import annotations
+
+import os
+from dataclasses import dataclass, field
+from datetime import datetime, timedelta
+from enum import Enum
+from typing import Generic, Iterable, Literal, TypeVar
+
+from s2_sdk._exceptions import S2ClientError, fallible
+
+T = TypeVar("T")
+
+ONE_MIB = 1024 * 1024
+
+
+def _parse_scheme(url: str) -> str:
+ idx = url.find("://")
+ if idx >= 0:
+ return url[:idx].lower()
+ return "https"
+
+
+class _DocEnum(Enum):
+ def __new__(cls, value, doc=None):
+ self = object.__new__(cls)
+ self._value_ = value
+ if doc is not None:
+ self.__doc__ = doc
+ return self
+
+ def __repr__(self) -> str:
+ return f"{type(self).__name__}.{self.name}"
+
+
+class Compression(_DocEnum):
+ """Compression algorithm for requests and responses."""
+
+ NONE = "none"
+ ZSTD = "zstd"
+ GZIP = "gzip"
+
+
+class AppendRetryPolicy(_DocEnum):
+ """Policy controlling when append operations are retried."""
+
+ ALL = "all", "Retry all retryable errors."
+ NO_SIDE_EFFECTS = (
+ "no-side-effects",
+ "Retry only when no server-side mutation could have occurred.",
+ )
+
+
+class Endpoints:
+ """S2 service endpoints. See `endpoints `_."""
+
+ __slots__ = ("_account", "_basin", "_direct")
+
+ def __init__(self, account: str, basin: str):
+ account_scheme = _parse_scheme(account)
+ basin_scheme = _parse_scheme(basin)
+ if account_scheme != basin_scheme:
+ raise S2ClientError("Account and basin endpoints must have the same scheme")
+ self._account = account
+ self._basin = basin
+ self._direct = "{basin}" not in basin
+
+ @classmethod
+ def default(cls) -> Endpoints:
+ """Construct default S2 cloud endpoints."""
+ return cls(
+ account="https://aws.s2.dev",
+ basin="https://{basin}.b.s2.dev",
+ )
+
+ @classmethod
+ @fallible
+ def from_env(cls) -> Endpoints:
+ """Construct endpoints from ``S2_ACCOUNT_ENDPOINT`` and ``S2_BASIN_ENDPOINT`` environment variables."""
+ account = os.getenv("S2_ACCOUNT_ENDPOINT")
+ basin = os.getenv("S2_BASIN_ENDPOINT")
+ if account and basin:
+ return cls(account=account, basin=basin)
+ raise S2ClientError(
+ "Both S2_ACCOUNT_ENDPOINT and S2_BASIN_ENDPOINT must be set"
+ )
+
+ def _account_url(self) -> str:
+ return self._account
+
+ def _basin_url(self, basin_name: str) -> str:
+ return self._basin.format(basin=basin_name)
+
+ def _is_direct_basin(self) -> bool:
+ return self._direct
+
+
+@dataclass(slots=True)
+class Timeout:
+ """Timeout configuration."""
+
+ request: timedelta = field(default_factory=lambda: timedelta(seconds=5))
+ """Timeout for read, write, and pool operations. Default is 5 seconds."""
+
+ connection: timedelta = field(default_factory=lambda: timedelta(seconds=3))
+ """Timeout for establishing connections. Default is 3 seconds."""
+
+
+@dataclass(slots=True)
+class Retry:
+ """Retry configuration."""
+
+ max_attempts: int = 3
+ """Maximum number of attempts, including the initial try. Must be at least 1. Default is 3."""
+
+ min_base_delay: timedelta = field(
+ default_factory=lambda: timedelta(milliseconds=100)
+ )
+ """Minimum base delay between retries, before jitter. Default is 100 ms."""
+
+ max_base_delay: timedelta = field(default_factory=lambda: timedelta(seconds=1))
+ """Maximum base delay between retries, before jitter. Default is 1 second."""
+
+ append_retry_policy: AppendRetryPolicy = AppendRetryPolicy.ALL
+ """Policy controlling when append operations are retried. Default is ``ALL``."""
+
+ def _max_retries(self) -> int:
+ return max(self.max_attempts - 1, 0)
+
+
+@dataclass(slots=True)
+class Record:
+ """A record to append."""
+
+ body: bytes
+ """Body of the record."""
+
+ headers: list[tuple[bytes, bytes]] = field(default_factory=list)
+ """Headers for the record."""
+
+ timestamp: int | None = None
+ """Timestamp for the record. Precise semantics depend on the stream's timestamping
+ configuration."""
+
+
+@dataclass(slots=True)
+class AppendInput:
+ """Input for :meth:`~S2Stream.append` and :meth:`AppendSession.submit`."""
+
+ records: list[Record]
+ """Batch of records to append atomically. Must contain 1--1000 records totalling at most
+ 1 MiB in metered bytes."""
+
+ match_seq_num: int | None = None
+ """Expected sequence number for the first record in the batch. If unset, no matching is
+ performed. If set and mismatched, the append fails."""
+
+ fencing_token: str | None = None
+ """Fencing token to match against the stream's current fencing token. If unset, no matching
+ is performed. If set and mismatched, the append fails."""
+
+
+@dataclass(slots=True)
+class StreamPosition:
+ """Position of a record in a stream."""
+
+ seq_num: int
+ """Sequence number of the record."""
+
+ timestamp: int
+ """Timestamp of the record."""
+
+
+@dataclass(slots=True)
+class AppendAck:
+ """Acknowledgement for an :class:`AppendInput`."""
+
+ start: StreamPosition
+ """Sequence number and timestamp of the first appended record."""
+
+ end: StreamPosition
+ """Sequence number of the last appended record + 1, and timestamp of the last appended
+ record. ``end.seq_num - start.seq_num`` is the number of records appended."""
+
+ tail: StreamPosition
+ """Next sequence number to be assigned on the stream, and timestamp of the last record
+ on the stream. Can be greater than ``end`` in case of concurrent appends."""
+
+
+@dataclass(slots=True)
+class IndexedAppendAck:
+ """Acknowledgement for an appended record."""
+
+ seq_num: int
+ """Sequence number assigned to the record."""
+
+ batch: AppendAck
+ """Acknowledgement for the containing batch."""
+
+
+@dataclass(slots=True)
+class Batching:
+ """Configuration for auto-batching records."""
+
+ max_records: int = 1000
+ """Maximum number of records per batch. Must be between 1 and 1000. Default is 1000."""
+
+ max_bytes: int = ONE_MIB
+ """Maximum metered bytes per batch. Must be between 8 and 1 MiB. Default is 1 MiB."""
+
+ linger: timedelta = field(default_factory=lambda: timedelta(milliseconds=5))
+ """Maximum time to wait for more records before flushing a partial batch. Default is 5 ms."""
+
+
+@dataclass(slots=True)
+class ReadLimit:
+ """Limits for read operations."""
+
+ count: int | None = None
+ """Maximum number of records to return."""
+
+ bytes: int | None = None
+ """Maximum cumulative size of records calculated using :func:`metered_bytes`."""
+
+
+@dataclass(slots=True)
+class SequencedRecord:
+ """Record read from a stream."""
+
+ seq_num: int
+ """Sequence number assigned to this record."""
+
+ body: bytes
+ """Body of this record."""
+
+ headers: list[tuple[bytes, bytes]]
+ """Series of name-value pairs for this record."""
+
+ timestamp: int
+ """Timestamp for this record."""
+
+
+@dataclass(slots=True)
+class ReadBatch:
+ """Batch of records from a read session."""
+
+ records: list[SequencedRecord]
+ """Records that are durably sequenced on the stream."""
+
+ tail: StreamPosition | None = None
+ """Tail position of the stream, present when reading recent records."""
+
+
+class CommandRecord:
+ """Factory class for creating command records."""
+
+ FENCE = b"fence"
+ TRIM = b"trim"
+
+ @staticmethod
+ def fence(token: str) -> Record:
+ """Create a fence command record.
+
+ The fencing token must not exceed 36 bytes when UTF-8 encoded.
+ """
+ encoded_token = token.encode()
+ if len(encoded_token) > 36:
+ raise S2ClientError("UTF-8 byte count of fencing token exceeds 36 bytes")
+ return Record(body=encoded_token, headers=[(bytes(), CommandRecord.FENCE)])
+
+ @staticmethod
+ def trim(desired_first_seq_num: int) -> Record:
+ """Create a trim command record.
+
+ Has no effect if the sequence number is smaller than the first existing record.
+ """
+ return Record(
+ body=desired_first_seq_num.to_bytes(8, "big"),
+ headers=[(bytes(), CommandRecord.TRIM)],
+ )
+
+
+def metered_bytes(records: Iterable[Record | SequencedRecord]) -> int:
+ """Each record is metered using the following formula:
+
+ .. code-block:: python
+
+ 8 + 2 * len(headers)
+ + sum((len(name) + len(value)) for (name, value) in headers)
+ + len(body)
+
+ """
+ return sum(
+ (
+ 8
+ + 2 * len(record.headers)
+ + sum((len(name) + len(value)) for (name, value) in record.headers)
+ + len(record.body)
+ )
+ for record in records
+ )
+
+
+@dataclass(slots=True)
+class SeqNum:
+ """Read starting from this sequence number."""
+
+ value: int
+
+
+@dataclass(slots=True)
+class Timestamp:
+ """Read starting from this timestamp."""
+
+ value: int
+
+
+@dataclass(slots=True)
+class TailOffset:
+ """Read starting from this many records before the tail."""
+
+ value: int
+
+
+@dataclass(slots=True)
+class Page(Generic[T]):
+ """A page of values."""
+
+ items: list[T]
+ """Items in this page."""
+
+ has_more: bool
+ """Whether there are more pages."""
+
+
+class StorageClass(_DocEnum):
+ """Storage class for recent appends."""
+
+ STANDARD = "standard", "Offers end-to-end latencies under 500 ms."
+ EXPRESS = "express", "Offers end-to-end latencies under 50 ms."
+
+
+class TimestampingMode(_DocEnum):
+ """Timestamping mode for appends. Timestamps are milliseconds since Unix epoch."""
+
+ CLIENT_PREFER = (
+ "client-prefer",
+ "Prefer client-specified timestamp if present, otherwise use arrival time.",
+ )
+ CLIENT_REQUIRE = (
+ "client-require",
+ "Require a client-specified timestamp and reject if absent.",
+ )
+ ARRIVAL = (
+ "arrival",
+ "Use the arrival time and ignore any client-specified timestamp.",
+ )
+
+
+class BasinScope(_DocEnum):
+ """Scope of a basin."""
+
+ AWS_US_EAST_1 = "aws:us-east-1", "AWS us-east-1 region."
+
+
+class Operation(_DocEnum):
+ """Granular operation for access token scoping."""
+
+ LIST_BASINS = "list-basins"
+ CREATE_BASIN = "create-basin"
+ DELETE_BASIN = "delete-basin"
+ RECONFIGURE_BASIN = "reconfigure-basin"
+ GET_BASIN_CONFIG = "get-basin-config"
+ ISSUE_ACCESS_TOKEN = "issue-access-token"
+ REVOKE_ACCESS_TOKEN = "revoke-access-token"
+ LIST_ACCESS_TOKENS = "list-access-tokens"
+ LIST_STREAMS = "list-streams"
+ CREATE_STREAM = "create-stream"
+ DELETE_STREAM = "delete-stream"
+ GET_STREAM_CONFIG = "get-stream-config"
+ RECONFIGURE_STREAM = "reconfigure-stream"
+ CHECK_TAIL = "check-tail"
+ APPEND = "append"
+ READ = "read"
+ TRIM = "trim"
+ FENCE = "fence"
+ ACCOUNT_METRICS = "account-metrics"
+ BASIN_METRICS = "basin-metrics"
+ STREAM_METRICS = "stream-metrics"
+
+
+class Permission(_DocEnum):
+ """Permission level for operation groups."""
+
+ READ = "read"
+ WRITE = "write"
+ READ_WRITE = "read-write"
+
+
+class MetricUnit(_DocEnum):
+ """Unit of a metric value."""
+
+ BYTES = "bytes"
+ OPERATIONS = "operations"
+
+
+class TimeseriesInterval(_DocEnum):
+ """Bucket interval for timeseries metrics."""
+
+ MINUTE = "minute"
+ HOUR = "hour"
+ DAY = "day"
+
+
+class AccountMetricSet(_DocEnum):
+ """Available account-level metric sets."""
+
+ ACTIVE_BASINS = "active-basins"
+ ACCOUNT_OPS = "account-ops"
+
+
+class BasinMetricSet(_DocEnum):
+ """Available basin-level metric sets."""
+
+ STORAGE = "storage"
+ APPEND_OPS = "append-ops"
+ READ_OPS = "read-ops"
+ READ_THROUGHPUT = "read-throughput"
+ APPEND_THROUGHPUT = "append-throughput"
+ BASIN_OPS = "basin-ops"
+
+
+class StreamMetricSet(_DocEnum):
+ """Available stream-level metric sets."""
+
+ STORAGE = "storage"
+
+
+@dataclass(slots=True)
+class Timestamping:
+ """Timestamping behavior for appends."""
+
+ mode: TimestampingMode | None = None
+ """Timestamping mode. Defaults to ``CLIENT_PREFER``."""
+
+ uncapped: bool | None = None
+ """Allow client-specified timestamps to exceed the arrival time."""
+
+
+@dataclass(slots=True)
+class StreamConfig:
+ """Stream configuration."""
+
+ storage_class: StorageClass | None = None
+ """Storage class for recent appends. Defaults to ``EXPRESS``."""
+
+ retention_policy: int | Literal["infinite"] | None = None
+ """Retention duration in seconds, or ``"infinite"``. Default is 7 days."""
+
+ timestamping: Timestamping | None = None
+ """Timestamping behavior for appends."""
+
+ delete_on_empty_min_age: int | None = None
+ """Minimum age in seconds before this stream can be automatically deleted if empty."""
+
+
+@dataclass(slots=True)
+class BasinConfig:
+ """Basin configuration."""
+
+ default_stream_config: StreamConfig | None = None
+ """Default configuration for streams in this basin."""
+
+ create_stream_on_append: bool | None = None
+ """Create stream on append if it doesn't exist."""
+
+ create_stream_on_read: bool | None = None
+ """Create stream on read if it doesn't exist."""
+
+
+@dataclass(slots=True)
+class BasinInfo:
+ """Basin information."""
+
+ name: str
+ """Basin name."""
+
+ scope: BasinScope | None
+ """Scope of the basin. ``None`` for self-hosted (S2-Lite) basins."""
+
+ created_at: datetime
+ """Creation time."""
+
+ deleted_at: datetime | None
+ """Deletion time if the basin is being deleted."""
+
+
+@dataclass(slots=True)
+class StreamInfo:
+ """Stream information."""
+
+ name: str
+ """Stream name."""
+
+ created_at: datetime
+ """Creation time."""
+
+ deleted_at: datetime | None
+ """Deletion time if the stream is being deleted."""
+
+
+@dataclass(slots=True)
+class ExactMatch:
+ """Match only the resource with this exact name."""
+
+ value: str
+
+
+@dataclass(slots=True)
+class PrefixMatch:
+ """Match all resources that start with this prefix."""
+
+ value: str
+
+
+@dataclass(slots=True)
+class OperationGroupPermissions:
+ """Permissions at the operation group level."""
+
+ account: Permission | None = None
+ """Permission for account operations."""
+
+ basin: Permission | None = None
+ """Permission for basin operations."""
+
+ stream: Permission | None = None
+ """Permission for stream operations."""
+
+
+@dataclass(slots=True)
+class AccessTokenScope:
+ """Scope of an access token."""
+
+ basins: ExactMatch | PrefixMatch | None = None
+ """Permitted basins."""
+
+ streams: ExactMatch | PrefixMatch | None = None
+ """Permitted streams."""
+
+ access_tokens: ExactMatch | PrefixMatch | None = None
+ """Permitted access tokens."""
+
+ op_groups: OperationGroupPermissions | None = None
+ """Permissions at operation group level."""
+
+ ops: list[Operation] = field(default_factory=list)
+ """Permitted operations."""
+
+
+@dataclass(slots=True)
+class AccessTokenInfo:
+ """Access token information."""
+
+ id: str
+ """Access token ID."""
+
+ scope: AccessTokenScope
+ """Scope of the access token."""
+
+ expires_at: str | None
+ """Expiration time."""
+
+ auto_prefix_streams: bool
+ """Whether to automatically prefix stream names during creation and strip the prefix during listing."""
+
+
+@dataclass(slots=True)
+class Scalar:
+ """Single named metric value."""
+
+ name: str
+ unit: MetricUnit
+ value: float
+
+
+@dataclass(slots=True)
+class Accumulation:
+ """Timeseries of accumulated values over buckets."""
+
+ name: str
+ unit: MetricUnit
+ interval: TimeseriesInterval | None
+ values: list[tuple[int, float]]
+
+
+@dataclass(slots=True)
+class Gauge:
+ """Timeseries of instantaneous values."""
+
+ name: str
+ unit: MetricUnit
+ values: list[tuple[int, float]]
+
+
+@dataclass(slots=True)
+class Label:
+ """Set of string labels."""
+
+ name: str
+ values: list[str]
diff --git a/src/s2_sdk/_validators.py b/src/s2_sdk/_validators.py
new file mode 100644
index 0000000..7e7a007
--- /dev/null
+++ b/src/s2_sdk/_validators.py
@@ -0,0 +1,53 @@
+import re
+
+from s2_sdk._exceptions import S2ClientError
+from s2_sdk._types import ONE_MIB
+
+_BASIN_NAME_REGEX = re.compile(r"^[a-z0-9]([a-z0-9-]*[a-z0-9])?$")
+
+
+def validate_basin(name: str) -> None:
+ if (
+ isinstance(name, str)
+ and (8 <= len(name) <= 48)
+ and _BASIN_NAME_REGEX.match(name)
+ ):
+ return
+ raise S2ClientError(f"Invalid basin name: {name}")
+
+
+def validate_max_unacked(max_bytes: int, max_batches: int | None = None) -> None:
+ if max_bytes < ONE_MIB:
+ raise S2ClientError(
+ f"max_unacked_bytes must be at least {ONE_MIB} (1 MiB), got {max_bytes}"
+ )
+ if max_batches is not None and max_batches < 1:
+ raise S2ClientError(
+ f"max_unacked_batches must be at least 1, got {max_batches}"
+ )
+
+
+def validate_batching(max_records: int, max_bytes: int) -> None:
+ if not (1 <= max_records <= 1000):
+ raise S2ClientError(
+ f"max_records must be between 1 and 1000, got {max_records}"
+ )
+ if not (8 <= max_bytes <= ONE_MIB):
+ raise S2ClientError(
+ f"max_bytes must be between 8 and {ONE_MIB}, got {max_bytes}"
+ )
+
+
+def validate_retry(max_attempts: int) -> None:
+ if max_attempts < 1:
+ raise S2ClientError(
+ f"Retry.max_attempts must be at least 1, got {max_attempts}"
+ )
+
+
+def validate_append_input(num_records: int, num_bytes: int) -> None:
+ if 1 <= num_records <= 1000 and num_bytes <= ONE_MIB:
+ return
+ raise S2ClientError(
+ f"Invalid append input: num_records={num_records}, metered_bytes={num_bytes}"
+ )
diff --git a/src/s2_sdk/py.typed b/src/s2_sdk/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/src/streamstore/__init__.py b/src/streamstore/__init__.py
deleted file mode 100644
index 5c21e12..0000000
--- a/src/streamstore/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-__all__ = [
- "S2",
- "Basin",
- "Stream",
- "S2Error",
- "streamstore.schemas",
- "streamstore.utils",
-]
-
-from streamstore._client import S2, Basin, Stream
-from streamstore._exceptions import S2Error
diff --git a/src/streamstore/_client.py b/src/streamstore/_client.py
deleted file mode 100644
index 0fdf561..0000000
--- a/src/streamstore/_client.py
+++ /dev/null
@@ -1,1004 +0,0 @@
-import asyncio
-import re
-import uuid
-from collections import deque
-from dataclasses import dataclass
-from datetime import timedelta
-from typing import AsyncIterable, Self, cast
-
-from anyio import create_memory_object_stream, create_task_group
-from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
-from google.protobuf.field_mask_pb2 import FieldMask
-from grpc import Compression, StatusCode, ssl_channel_credentials
-from grpc.aio import AioRpcError, Channel, secure_channel
-
-from streamstore import schemas
-from streamstore._exceptions import fallible
-from streamstore._lib.s2.v1alpha.s2_pb2 import (
- AppendRequest,
- AppendSessionRequest,
- BasinConfig,
- CheckTailRequest,
- CreateBasinRequest,
- CreateStreamRequest,
- DeleteBasinRequest,
- DeleteStreamRequest,
- GetBasinConfigRequest,
- GetStreamConfigRequest,
- IssueAccessTokenRequest,
- ListAccessTokensRequest,
- ListBasinsRequest,
- ListStreamsRequest,
- ReadSessionRequest,
- ReconfigureBasinRequest,
- ReconfigureStreamRequest,
- RevokeAccessTokenRequest,
- StreamConfig,
-)
-from streamstore._lib.s2.v1alpha.s2_pb2_grpc import (
- AccountServiceStub,
- BasinServiceStub,
- StreamServiceStub,
-)
-from streamstore._mappers import (
- access_token_info_message,
- access_token_info_schema,
- append_input_message,
- append_output_schema,
- basin_config_message,
- basin_config_schema,
- basin_info_schema,
- read_request_message,
- read_session_request_message,
- sequenced_records_schema,
- stream_config_message,
- stream_config_schema,
- stream_info_schema,
-)
-from streamstore._retrier import Attempt, Retrier, compute_backoffs
-from streamstore.utils import metered_bytes
-
-_BASIN_NAME_REGEX = re.compile(r"^[a-z0-9]([a-z0-9-]*[a-z0-9])?$")
-_MEMORY_STREAM_MAX_BUF_SIZE = 100
-
-
-def _grpc_retry_on(e: Exception) -> bool:
- if isinstance(e, AioRpcError) and e.code() in (
- StatusCode.DEADLINE_EXCEEDED,
- StatusCode.UNAVAILABLE,
- StatusCode.UNKNOWN,
- ):
- return True
- return False
-
-
-def _validate_basin(name: str) -> None:
- if (
- isinstance(name, str)
- and (8 <= len(name) <= 48)
- and _BASIN_NAME_REGEX.match(name)
- ):
- return
- raise ValueError(f"Invalid basin name: {name}")
-
-
-def _s2_request_token() -> str:
- return uuid.uuid4().hex
-
-
-def _validate_append_input(input: schemas.AppendInput) -> None:
- num_bytes = metered_bytes(input.records)
- num_records = len(input.records)
- if 1 <= num_records <= 1000 and num_bytes <= schemas.ONE_MIB:
- return
- raise ValueError(
- f"Invalid append input: num_records={num_records}, metered_bytes={num_bytes}"
- )
-
-
-async def _pipe_append_inputs(
- inputs: AsyncIterable[schemas.AppendInput],
- input_tx: MemoryObjectSendStream[schemas.AppendInput],
-):
- async with input_tx:
- async for input in inputs:
- _validate_append_input(input)
- await input_tx.send(input)
-
-
-async def _append_session_request_aiter(
- stream: str,
- inputs: AsyncIterable[schemas.AppendInput],
-) -> AsyncIterable[AppendSessionRequest]:
- async for input in inputs:
- _validate_append_input(input)
- yield AppendSessionRequest(input=append_input_message(stream, input))
-
-
-def _prepare_read_session_request_for_retry(
- request: ReadSessionRequest, last_read_batch: list[schemas.SequencedRecord]
-) -> None:
- if len(last_read_batch) > 0:
- request.seq_num = last_read_batch[-1].seq_num + 1
- if request.limit.count is not None and request.limit.count != 0:
- request.limit.count = max(request.limit.count - len(last_read_batch), 0)
- if request.limit.bytes is not None and request.limit.bytes != 0:
- request.limit.bytes = max(
- request.limit.bytes - metered_bytes(last_read_batch),
- 0,
- )
-
-
-@dataclass(slots=True)
-class _RpcConfig:
- timeout: float
- metadata: list[tuple[str, str]]
- compression: Compression
-
-
-@dataclass(slots=True)
-class _Config:
- max_retries: int
- enable_append_retries: bool
- rpc: _RpcConfig
-
-
-class S2:
- """
- Async client for interacting with `s2.dev `_.
-
- Args:
- access_token: Access token generated from `S2 dashboard `_.
- endpoints: S2 endpoints. If not specified, public endpoints for S2 service running in AWS cloud will be used.
- request_timeout: Timeout for requests made by the client. Default value is ``5`` seconds.
- max_retries: Maximum number of retries for a request. Default value is ``3``.
- enable_append_retries: Enable retries for appends i.e for both :meth:`.Stream.append` and
- :meth:`.Stream.append_session`. Default value is ``True``.
- enable_compression: Enable compression (Gzip) for :meth:`.Stream.append`, :meth:`.Stream.append_session`,
- :meth:`.Stream.read`, and :meth:`.Stream.read_session`. Default value is ``False``.
- """
-
- __slots__ = (
- "_endpoints",
- "_account_channel",
- "_basin_channels",
- "_config",
- "_stub",
- "_retrier",
- )
-
- @fallible
- def __init__(
- self,
- access_token: str,
- endpoints: schemas.Endpoints | None = None,
- request_timeout: timedelta = timedelta(seconds=5.0),
- max_retries: int = 3,
- enable_append_retries: bool = True,
- enable_compression: bool = False,
- ) -> None:
- self._endpoints = (
- endpoints
- if endpoints is not None
- else schemas.Endpoints.for_cloud(schemas.Cloud.AWS)
- )
- self._account_channel = secure_channel(
- target=self._endpoints._account(),
- credentials=ssl_channel_credentials(),
- )
- self._basin_channels: dict[str, Channel] = {}
- self._config = _Config(
- max_retries=max_retries,
- enable_append_retries=enable_append_retries,
- rpc=_RpcConfig(
- timeout=request_timeout.total_seconds(),
- metadata=[("authorization", f"Bearer {access_token}")],
- compression=Compression.Gzip
- if enable_compression
- else Compression.NoCompression,
- ),
- )
- self._stub = AccountServiceStub(self._account_channel)
- self._retrier = Retrier(
- should_retry_on=_grpc_retry_on,
- max_attempts=max_retries,
- )
-
- async def __aenter__(self) -> Self:
- return self
-
- async def __aexit__(self, exc_type, exc_value, traceback) -> bool:
- await self.close()
- if exc_type is None and exc_value is None and traceback is None:
- return True
- return False
-
- def __getitem__(self, name: str) -> "Basin":
- return self.basin(name)
-
- async def close(self) -> None:
- """
- Close all open connections to S2 service endpoints.
-
- Tip:
- ``S2`` supports async context manager protocol, so you can also do the following instead of
- explicitly closing:
-
- .. code-block:: python
-
- async with S2(..) as s2:
- ..
-
- """
-
- await self._account_channel.close(None)
- for basin_channel in self._basin_channels.values():
- await basin_channel.close(None)
-
- @fallible
- async def create_basin(
- self,
- name: str,
- config: schemas.BasinConfig | None = None,
- ) -> schemas.BasinInfo:
- """
- Create a basin.
-
- Args:
- name: Name of the basin.
- config: Configuration for the basin.
-
- Note:
- ``name`` must be globally unique and must be between 8 and 48 characters, comprising lowercase
- letters, numbers and hyphens. It cannot begin or end with a hyphen.
- """
- _validate_basin(name)
- request = CreateBasinRequest(
- basin=name,
- config=cast(
- BasinConfig,
- basin_config_message(config),
- ),
- )
- metadata = self._config.rpc.metadata + [
- ("s2-request-token", _s2_request_token())
- ]
- response = await self._retrier(
- self._stub.CreateBasin,
- request,
- timeout=self._config.rpc.timeout,
- metadata=metadata,
- )
- return basin_info_schema(response.info)
-
- def basin(self, name: str) -> "Basin":
- """
- Get a Basin object that can be used for performing basin operations.
-
- Args:
- name: Name of the basin.
-
- Note:
- The basin must have been created already, else the operations will fail.
-
- Tip:
- .. code-block:: python
-
- async with S2(..) as s2:
- basin = s2.basin("your-basin-name")
-
- :class:`.S2` implements the ``getitem`` magic method, so you can also do the following instead:
-
- .. code-block:: python
-
- async with S2(..) as s2:
- basin = s2["your-basin-name"]
- """
- _validate_basin(name)
- if name not in self._basin_channels:
- self._basin_channels[name] = secure_channel(
- target=self._endpoints._basin(name),
- credentials=ssl_channel_credentials(),
- )
- return Basin(name, self._basin_channels[name], self._config)
-
- @fallible
- async def list_basins(
- self,
- prefix: str = "",
- start_after: str = "",
- limit: int = 1000,
- ) -> schemas.Page[schemas.BasinInfo]:
- """
- List basins.
-
- Args:
- prefix: Filter to basins whose name begins with this prefix.
- start_after: Filter to basins whose name starts lexicographically after this value.
- limit: Number of items to return per page, up to a maximum of 1000.
- """
- request = ListBasinsRequest(prefix=prefix, start_after=start_after, limit=limit)
- response = await self._retrier(
- self._stub.ListBasins,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return schemas.Page(
- items=[basin_info_schema(b) for b in response.basins],
- has_more=response.has_more,
- )
-
- @fallible
- async def delete_basin(self, name: str) -> None:
- """
- Delete a basin.
-
- Args:
- name: Name of the basin.
-
- Note:
- Basin deletion is asynchronous, and may take a few minutes to complete.
- """
- request = DeleteBasinRequest(basin=name)
- await self._retrier(
- self._stub.DeleteBasin,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
-
- @fallible
- async def get_basin_config(self, name: str) -> schemas.BasinConfig:
- """
- Get the current configuration of a basin.
-
- Args:
- name: Name of the basin.
- """
- request = GetBasinConfigRequest(basin=name)
- response = await self._retrier(
- self._stub.GetBasinConfig,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return basin_config_schema(response.config)
-
- @fallible
- async def reconfigure_basin(
- self,
- name: str,
- config: schemas.BasinConfig,
- ) -> schemas.BasinConfig:
- """
- Modify the configuration of a basin.
-
- Args:
- name: Name of the basin.
- config: Configuration for the basin.
-
- Note:
- Modifiying the :attr:`.BasinConfig.default_stream_config` doesn't affect already
- existing streams; it only applies to new streams created hereafter.
- """
- basin_config, mask_paths = cast(
- tuple[BasinConfig, list[str]],
- basin_config_message(
- config,
- return_mask_paths=True,
- ),
- )
- request = ReconfigureBasinRequest(
- basin=name, config=basin_config, mask=FieldMask(paths=mask_paths)
- )
- response = await self._retrier(
- self._stub.ReconfigureBasin,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return basin_config_schema(response.config)
-
- @fallible
- async def issue_access_token(
- self,
- id: str,
- scope: schemas.AccessTokenScope,
- expires_at: int | None = None,
- auto_prefix_streams: bool = False,
- ) -> str:
- """
- Issue a new access token.
-
- Args:
- id: Access token ID.
- scope: Access token scope.
- expires_at: Expiration time in seconds since Unix epoch. If not specified, expiration
- time of ``access_token`` passed to :class:`.S2` will be used.
- auto_prefix_streams: Enable auto-prefixing: the specified prefix in
- :attr:`.AccessTokenScope.streams` will be added to stream names in requests and stripped
- from stream names in responses.
-
- Note:
- ``id`` must be unique to the account and between 1 and 96 bytes in length.
- """
- request = IssueAccessTokenRequest(
- info=access_token_info_message(id, scope, auto_prefix_streams, expires_at)
- )
- response = await self._retrier(
- self._stub.IssueAccessToken,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return response.access_token
-
- @fallible
- async def list_access_tokens(
- self, prefix: str = "", start_after: str = "", limit: int = 1000
- ) -> schemas.Page[schemas.AccessTokenInfo]:
- """
- List access tokens.
-
- Args:
- prefix: Filter to access tokens whose ID begins with this prefix.
- start_after: Filter to access tokens whose ID starts lexicographically after this value.
- limit: Number of items to return per page, up to a maximum of 1000.
- """
- request = ListAccessTokensRequest(
- prefix=prefix, start_after=start_after, limit=limit
- )
- response = await self._retrier(
- self._stub.ListAccessTokens,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return schemas.Page(
- items=[access_token_info_schema(info) for info in response.access_tokens],
- has_more=response.has_more,
- )
-
- @fallible
- async def revoke_access_token(self, id: str) -> schemas.AccessTokenInfo:
- """
- Revoke an access token.
-
- Args:
- id: Access token ID.
- """
- request = RevokeAccessTokenRequest(id=id)
- response = await self._retrier(
- self._stub.RevokeAccessToken,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return access_token_info_schema(response.info)
-
-
-class Basin:
- """
- Caution:
- Returned by :meth:`.S2.basin`. Do not instantiate directly.
- """
-
- __slots__ = (
- "_channel",
- "_config",
- "_retrier",
- "_stub",
- "_name",
- )
-
- @fallible
- def __init__(
- self,
- name: str,
- channel: Channel,
- config: _Config,
- ) -> None:
- self._channel = channel
- self._config = config
- self._retrier = Retrier(
- should_retry_on=_grpc_retry_on,
- max_attempts=config.max_retries,
- )
- self._stub = BasinServiceStub(self._channel)
- self._name = name
-
- def __repr__(self) -> str:
- return f"Basin(name={self.name})"
-
- def __getitem__(self, name: str) -> "Stream":
- return self.stream(name)
-
- @property
- def name(self) -> str:
- """Basin name."""
- return self._name
-
- @fallible
- async def create_stream(
- self,
- name: str,
- config: schemas.StreamConfig | None = None,
- ) -> schemas.StreamInfo:
- """
- Create a stream.
-
- Args:
- name: Name of the stream.
- config: Configuration for the stream.
-
- Note:
- ``name`` must be unique within the basin. It can be an arbitrary string upto 512 characters.
- Backslash (``/``) is recommended as a delimiter for hierarchical naming.
- """
- request = CreateStreamRequest(
- stream=name,
- config=cast(
- StreamConfig,
- stream_config_message(config),
- ),
- )
- metadata = self._config.rpc.metadata + [
- ("s2-request-token", _s2_request_token())
- ]
- response = await self._retrier(
- self._stub.CreateStream,
- request,
- timeout=self._config.rpc.timeout,
- metadata=metadata,
- )
- return stream_info_schema(response.info)
-
- def stream(self, name: str) -> "Stream":
- """
- Get a Stream object that can be used for performing stream operations.
-
- Args:
- name: Name of the stream.
-
- Note:
- The stream must have been created already, else the operations will fail.
-
- Tip:
- .. code-block:: python
-
- async with S2(..) as s2:
- stream = s2.basin("your-basin-name").stream("your-stream-name")
-
- :class:`.Basin` implements the ``getitem`` magic method, so you can also do the following instead:
-
- .. code-block:: python
-
- async with S2(..) as s2:
- stream = s2["your-basin-name"]["your-stream-name"]
-
- """
- return Stream(name, self._channel, self._config)
-
- @fallible
- async def list_streams(
- self,
- prefix: str = "",
- start_after: str = "",
- limit: int = 1000,
- ) -> schemas.Page[schemas.StreamInfo]:
- """
- List streams.
-
- Args:
- prefix: Filter to streams whose name begins with this prefix.
- start_after: Filter to streams whose name starts lexicographically after this value.
- limit: Number of items to return per page, up to a maximum of 1000.
- """
- request = ListStreamsRequest(
- prefix=prefix, start_after=start_after, limit=limit
- )
- response = await self._retrier(
- self._stub.ListStreams,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return schemas.Page(
- items=[stream_info_schema(s) for s in response.streams],
- has_more=response.has_more,
- )
-
- @fallible
- async def delete_stream(self, name: str) -> None:
- """
- Delete a stream.
-
- Args:
- name: Name of the stream.
-
- Note:
- Stream deletion is asynchronous, and may take a few minutes to complete.
- """
- request = DeleteStreamRequest(stream=name)
- await self._retrier(
- self._stub.DeleteStream,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
-
- @fallible
- async def get_stream_config(self, name: str) -> schemas.StreamConfig:
- """
- Get the current configuration of a stream.
-
- Args:
- name: Name of the stream.
- """
- request = GetStreamConfigRequest(stream=name)
- response = await self._retrier(
- self._stub.GetStreamConfig,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return stream_config_schema(response.config)
-
- @fallible
- async def reconfigure_stream(
- self,
- name: str,
- config: schemas.StreamConfig,
- ) -> schemas.StreamConfig:
- """
- Modify the configuration of a stream.
-
- Args:
- name: Name of the stream.
- config: Configuration for the stream.
-
- Note:
- Modifying :attr:`.StreamConfig.storage_class` will take effect only when this stream has
- been inactive for 10 minutes. This will become a live migration in future.
- """
- stream_config, mask_paths = cast(
- tuple[StreamConfig, list[str]],
- stream_config_message(config, return_mask_paths=True),
- )
- request = ReconfigureStreamRequest(
- stream=name, config=stream_config, mask=FieldMask(paths=mask_paths)
- )
- response = await self._retrier(
- self._stub.ReconfigureStream,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return stream_config_schema(response.config)
-
-
-class Stream:
- """
- Caution:
- Returned by :meth:`.Basin.stream`. Do not instantiate directly.
- """
-
- __slots__ = (
- "_name",
- "_config",
- "_retrier",
- "_stub",
- )
-
- def __init__(self, name: str, channel: Channel, config: _Config) -> None:
- self._name = name
- self._config = config
- self._retrier = Retrier(
- should_retry_on=_grpc_retry_on,
- max_attempts=config.max_retries,
- )
- self._stub = StreamServiceStub(channel)
-
- def __repr__(self) -> str:
- return f"Stream(name={self.name})"
-
- @property
- def name(self) -> str:
- """Stream name."""
- return self._name
-
- @fallible
- async def check_tail(self) -> schemas.Tail:
- """
- Check the tail of a stream.
- """
- request = CheckTailRequest(stream=self.name)
- response = await self._retrier(
- self._stub.CheckTail,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- )
- return schemas.Tail(response.next_seq_num, response.last_timestamp)
-
- @fallible
- async def append(self, input: schemas.AppendInput) -> schemas.AppendOutput:
- """
- Append a batch of records to a stream.
- """
- _validate_append_input(input)
- request = AppendRequest(input=append_input_message(self.name, input))
- response = (
- await self._retrier(
- self._stub.Append,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- compression=self._config.rpc.compression,
- )
- if self._config.enable_append_retries
- else await self._stub.Append(
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- compression=self._config.rpc.compression,
- )
- )
- return append_output_schema(response.output)
-
- async def _append_session(
- self,
- attempt: Attempt,
- inflight_inputs: deque[schemas.AppendInput],
- request_rx: MemoryObjectReceiveStream[AppendSessionRequest],
- output_tx: MemoryObjectSendStream[schemas.AppendOutput],
- ):
- async for response in self._stub.AppendSession(
- request_rx,
- metadata=self._config.rpc.metadata,
- compression=self._config.rpc.compression,
- ):
- if attempt.value > 0:
- attempt.value = 0
- output = response.output
- corresponding_input = inflight_inputs.popleft()
- num_records_sent = len(corresponding_input.records)
- num_records_ackd = output.end_seq_num - output.start_seq_num
- if num_records_sent == num_records_ackd:
- await output_tx.send(append_output_schema(response.output))
- else:
- raise RuntimeError(
- "Number of records sent doesn't match the number of acknowledgements received"
- )
-
- async def _retrying_append_session_inner(
- self,
- input_rx: MemoryObjectReceiveStream[schemas.AppendInput],
- output_tx: MemoryObjectSendStream[schemas.AppendOutput],
- ):
- inflight_inputs: deque[schemas.AppendInput] = deque()
- max_attempts = self._config.max_retries
- backoffs = compute_backoffs(max_attempts)
- attempt = Attempt(value=0)
- async with output_tx:
- while True:
- request_tx, request_rx = create_memory_object_stream[
- AppendSessionRequest
- ](max_buffer_size=_MEMORY_STREAM_MAX_BUF_SIZE)
- try:
- async with create_task_group() as tg:
- tg.start_soon(
- self._append_session,
- attempt,
- inflight_inputs,
- request_rx,
- output_tx,
- )
- async with request_tx:
- if len(inflight_inputs) > 0:
- for input in list(inflight_inputs):
- await request_tx.send(
- AppendSessionRequest(
- input=append_input_message(self.name, input)
- )
- )
- async for input in input_rx:
- inflight_inputs.append(input)
- await request_tx.send(
- AppendSessionRequest(
- input=append_input_message(self.name, input)
- )
- )
- return
- except* AioRpcError as eg:
- if attempt.value < max_attempts and any(
- _grpc_retry_on(e) for e in eg.exceptions
- ):
- await asyncio.sleep(backoffs[attempt.value])
- attempt.value += 1
- else:
- raise eg
-
- async def _retrying_append_session(
- self,
- inputs: AsyncIterable[schemas.AppendInput],
- ) -> AsyncIterable[schemas.AppendOutput]:
- input_tx, input_rx = create_memory_object_stream[schemas.AppendInput](
- max_buffer_size=_MEMORY_STREAM_MAX_BUF_SIZE
- )
- output_tx, output_rx = create_memory_object_stream[schemas.AppendOutput](
- max_buffer_size=_MEMORY_STREAM_MAX_BUF_SIZE
- )
- async with create_task_group() as tg:
- tg.start_soon(
- self._retrying_append_session_inner,
- input_rx,
- output_tx,
- )
- tg.start_soon(_pipe_append_inputs, inputs, input_tx)
- async with output_rx:
- async for output in output_rx:
- yield output
-
- @fallible
- async def append_session(
- self, inputs: AsyncIterable[schemas.AppendInput]
- ) -> AsyncIterable[schemas.AppendOutput]:
- """
- Append batches of records to a stream continuously, while guaranteeing pipelined inputs are
- processed in order.
-
- Tip:
- You can use :func:`.append_inputs_gen` for automatic batching of records instead of explicitly
- preparing and providing batches of records.
-
- Yields:
- :class:`.AppendOutput` for each corresponding :class:`.AppendInput`.
-
- Returns:
- If ``enable_append_retries=False`` in :class:`.S2`, and if processing any of the
- :class:`.AppendInput` fails.
-
- (or)
-
- If ``enable_append_retries=True`` in :class:`.S2`, and if retry budget gets exhausted after
- trying to recover from failures.
- """
- if self._config.enable_append_retries:
- async for output in self._retrying_append_session(inputs):
- yield output
- else:
- async for response in self._stub.AppendSession(
- _append_session_request_aiter(self.name, inputs),
- metadata=self._config.rpc.metadata,
- compression=self._config.rpc.compression,
- ):
- yield append_output_schema(response.output)
-
- @fallible
- async def read(
- self,
- start: schemas.SeqNum | schemas.Timestamp | schemas.TailOffset,
- limit: schemas.ReadLimit | None = None,
- until: int | None = None,
- ignore_command_records: bool = False,
- ) -> list[schemas.SequencedRecord] | schemas.Tail:
- """
- Read a batch of records from a stream.
-
- Args:
- start: Inclusive start position.
- limit: Number of records to return, up to a maximum of 1000 or 1MiB of :func:`.metered_bytes`.
- until: Exclusive timestamp to read until. It is applied as an additional constraint on
- top of the ``limit`` and guarantees that all returned records have timestamps less
- than this timestamp.
- ignore_command_records: Filters out command records if present from the batch.
-
- Returns:
- Batch of sequenced records. It can be empty only if ``limit`` and/or ``until`` were provided
- and no records satisfy those constraints.
-
- (or)
-
- Tail of the stream. It will be returned only if ``start`` equals or exceeds the tail of
- the stream.
- """
- request = read_request_message(self.name, start, limit, until)
- response = await self._retrier(
- self._stub.Read,
- request,
- timeout=self._config.rpc.timeout,
- metadata=self._config.rpc.metadata,
- compression=self._config.rpc.compression,
- )
- output = response.output
- match output.WhichOneof("output"):
- case "batch":
- return sequenced_records_schema(output.batch, ignore_command_records)
- case "next_seq_num":
- # TODO: use correct last_timestamp when migrating to v1 API.
- return schemas.Tail(output.next_seq_num, 0)
- case _:
- raise RuntimeError(
- "Read output doesn't match any of the expected values"
- )
-
- @fallible
- async def read_session(
- self,
- start: schemas.SeqNum | schemas.Timestamp | schemas.TailOffset,
- limit: schemas.ReadLimit | None = None,
- until: int | None = None,
- clamp: bool = False,
- ignore_command_records: bool = False,
- ) -> AsyncIterable[list[schemas.SequencedRecord] | schemas.Tail]:
- """
- Read batches of records from a stream continuously.
-
- Args:
- start: Inclusive start position.
- limit: Number of records to return, up to a maximum of 1000 or 1MiB of :func:`.metered_bytes`.
- until: Exclusive timestamp to read until. It is applied as an additional constraint on
- top of the ``limit`` and guarantees that all returned records have timestamps less
- than this timestamp.
- clamp: Clamp the ``start`` position to the stream's tail when it exceeds the tail.
- ignore_command_records: Filters out command records if present from the batch.
-
- Note:
- With a session, you are able to read in a streaming fashion. If ``limit`` and/or ``until``
- were not provided and the tail of the stream is reached, the session goes into
- real-time tailing mode and will yield records as they are appended to the stream.
-
- Yields:
- Batch of sequenced records.
-
- (or)
-
- Tail of the stream. It will be yielded only if ``start`` exceeds the tail and ``clamp``
- was ``False``.
-
- Returns:
- If ``limit`` and/or ``until`` were provided, and if there are no further records that
- satisfy those constraints.
-
- (or)
-
- If the previous yield was the tail of the stream.
- """
- request = read_session_request_message(self.name, start, limit, until, clamp)
- max_attempts = self._config.max_retries
- backoffs = compute_backoffs(max_attempts)
- attempt = 0
- while True:
- try:
- async for response in self._stub.ReadSession(
- request,
- metadata=self._config.rpc.metadata,
- compression=self._config.rpc.compression,
- ):
- if attempt > 0:
- attempt = 0
- output = response.output
- match output.WhichOneof("output"):
- case "batch":
- records = sequenced_records_schema(
- output.batch, ignore_command_records
- )
- _prepare_read_session_request_for_retry(request, records)
- yield records
- case "next_seq_num":
- # TODO: use correct last_timestamp when migrating to v1 API.
- yield schemas.Tail(output.next_seq_num, 0)
- return
- case _:
- raise RuntimeError(
- "Read output doesn't match any of the expected values"
- )
- return
- except Exception as e:
- if attempt < max_attempts and _grpc_retry_on(e):
- await asyncio.sleep(backoffs[attempt])
- attempt += 1
- else:
- raise e
diff --git a/src/streamstore/_exceptions.py b/src/streamstore/_exceptions.py
deleted file mode 100644
index 76d6802..0000000
--- a/src/streamstore/_exceptions.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from functools import wraps
-from inspect import isasyncgenfunction, iscoroutinefunction
-
-
-class S2Error(Exception):
- """
- Base class for all S2 related exceptions.
- """
-
-
-S2Error.__module__ = "streamstore"
-
-
-def fallible(f):
- @wraps(f)
- def sync_wrapper(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except Exception as e:
- if isinstance(e, S2Error):
- raise e
- raise S2Error(e) from e
-
- @wraps(f)
- async def async_gen_wrapper(*args, **kwargs):
- try:
- async for val in f(*args, **kwargs):
- yield val
- except Exception as e:
- if isinstance(e, S2Error):
- raise e
- raise S2Error(e) from e
-
- @wraps(f)
- async def coro_wrapper(*args, **kwargs):
- try:
- return await f(*args, **kwargs)
- except Exception as e:
- if isinstance(e, S2Error):
- raise e
- raise S2Error(e) from e
-
- if iscoroutinefunction(f):
- return coro_wrapper
- elif isasyncgenfunction(f):
- return async_gen_wrapper
- else:
- return sync_wrapper
diff --git a/src/streamstore/_lib/s2/v1alpha/s2_pb2.py b/src/streamstore/_lib/s2/v1alpha/s2_pb2.py
deleted file mode 100644
index 5f3d685..0000000
--- a/src/streamstore/_lib/s2/v1alpha/s2_pb2.py
+++ /dev/null
@@ -1,228 +0,0 @@
-# -*- coding: utf-8 -*-
-# Generated by the protocol buffer compiler. DO NOT EDIT!
-# NO CHECKED-IN PROTOBUF GENCODE
-# source: s2/v1alpha/s2.proto
-# Protobuf Python Version: 5.29.0
-"""Generated protocol buffer code."""
-
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import descriptor_pool as _descriptor_pool
-from google.protobuf import runtime_version as _runtime_version
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf.internal import builder as _builder
-
-_runtime_version.ValidateProtobufRuntimeVersion(
- _runtime_version.Domain.PUBLIC, 5, 29, 0, "", "s2/v1alpha/s2.proto"
-)
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(
- b'\n\x13s2/v1alpha/s2.proto\x12\ns2.v1alpha\x1a google/protobuf/field_mask.proto"V\n\x11ListBasinsRequest\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12\x13\n\x0bstart_after\x18\x02 \x01(\t\x12\x12\n\x05limit\x18\x03 \x01(\x04H\x00\x88\x01\x01\x42\x08\n\x06_limit"M\n\x12ListBasinsResponse\x12%\n\x06\x62\x61sins\x18\x01 \x03(\x0b\x32\x15.s2.v1alpha.BasinInfo\x12\x10\n\x08has_more\x18\x02 \x01(\x08"s\n\x12\x43reateBasinRequest\x12\r\n\x05\x62\x61sin\x18\x01 \x01(\t\x12\'\n\x06\x63onfig\x18\x02 \x01(\x0b\x32\x17.s2.v1alpha.BasinConfig\x12%\n\x05scope\x18\x03 \x01(\x0e\x32\x16.s2.v1alpha.BasinScope":\n\x13\x43reateBasinResponse\x12#\n\x04info\x18\x01 \x01(\x0b\x32\x15.s2.v1alpha.BasinInfo"#\n\x12\x44\x65leteBasinRequest\x12\r\n\x05\x62\x61sin\x18\x01 \x01(\t"\x15\n\x13\x44\x65leteBasinResponse"&\n\x15GetBasinConfigRequest\x12\r\n\x05\x62\x61sin\x18\x01 \x01(\t"A\n\x16GetBasinConfigResponse\x12\'\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x17.s2.v1alpha.BasinConfig"{\n\x17ReconfigureBasinRequest\x12\r\n\x05\x62\x61sin\x18\x01 \x01(\t\x12\'\n\x06\x63onfig\x18\x02 \x01(\x0b\x32\x17.s2.v1alpha.BasinConfig\x12(\n\x04mask\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"C\n\x18ReconfigureBasinResponse\x12\'\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x17.s2.v1alpha.BasinConfig"D\n\x17IssueAccessTokenRequest\x12)\n\x04info\x18\x01 \x01(\x0b\x32\x1b.s2.v1alpha.AccessTokenInfo"3\n\x14ReadWritePermissions\x12\x0c\n\x04read\x18\x01 \x01(\x08\x12\r\n\x05write\x18\x02 \x01(\x08"\xb0\x01\n\x18PermittedOperationGroups\x12\x31\n\x07\x61\x63\x63ount\x18\x01 \x01(\x0b\x32 .s2.v1alpha.ReadWritePermissions\x12/\n\x05\x62\x61sin\x18\x02 \x01(\x0b\x32 .s2.v1alpha.ReadWritePermissions\x12\x30\n\x06stream\x18\x03 \x01(\x0b\x32 .s2.v1alpha.ReadWritePermissions"&\n\x18RevokeAccessTokenRequest\x12\n\n\x02id\x18\x01 \x01(\t"F\n\x19RevokeAccessTokenResponse\x12)\n\x04info\x18\x01 \x01(\x0b\x32\x1b.s2.v1alpha.AccessTokenInfo"\\\n\x17ListAccessTokensRequest\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12\x13\n\x0bstart_after\x18\x02 \x01(\t\x12\x12\n\x05limit\x18\x03 \x01(\x04H\x00\x88\x01\x01\x42\x08\n\x06_limit"`\n\x18ListAccessTokensResponse\x12\x32\n\raccess_tokens\x18\x01 \x03(\x0b\x32\x1b.s2.v1alpha.AccessTokenInfo\x12\x10\n\x08has_more\x18\x02 \x01(\x08"\x8f\x01\n\x0f\x41\x63\x63\x65ssTokenInfo\x12\n\n\x02id\x18\x01 \x01(\t\x12\x17\n\nexpires_at\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x1b\n\x13\x61uto_prefix_streams\x18\x03 \x01(\x08\x12+\n\x05scope\x18\x04 \x01(\x0b\x32\x1c.s2.v1alpha.AccessTokenScopeB\r\n\x0b_expires_at"\xf2\x01\n\x10\x41\x63\x63\x65ssTokenScope\x12\'\n\x06\x62\x61sins\x18\x01 \x01(\x0b\x32\x17.s2.v1alpha.ResourceSet\x12(\n\x07streams\x18\x02 \x01(\x0b\x32\x17.s2.v1alpha.ResourceSet\x12.\n\raccess_tokens\x18\x03 \x01(\x0b\x32\x17.s2.v1alpha.ResourceSet\x12\x37\n\top_groups\x18\x04 \x01(\x0b\x32$.s2.v1alpha.PermittedOperationGroups\x12"\n\x03ops\x18\x05 \x03(\x0e\x32\x15.s2.v1alpha.Operation"<\n\x0bResourceSet\x12\x0f\n\x05\x65xact\x18\x01 \x01(\tH\x00\x12\x10\n\x06prefix\x18\x02 \x01(\tH\x00\x42\n\n\x08matching"0\n\x18IssueAccessTokenResponse\x12\x14\n\x0c\x61\x63\x63\x65ss_token\x18\x01 \x01(\t"V\n\nStreamInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ncreated_at\x18\x02 \x01(\r\x12\x17\n\ndeleted_at\x18\x03 \x01(\rH\x00\x88\x01\x01\x42\r\n\x0b_deleted_at"W\n\x12ListStreamsRequest\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12\x13\n\x0bstart_after\x18\x02 \x01(\t\x12\x12\n\x05limit\x18\x03 \x01(\x04H\x00\x88\x01\x01\x42\x08\n\x06_limit"P\n\x13ListStreamsResponse\x12\'\n\x07streams\x18\x01 \x03(\x0b\x32\x16.s2.v1alpha.StreamInfo\x12\x10\n\x08has_more\x18\x02 \x01(\x08"O\n\x13\x43reateStreamRequest\x12\x0e\n\x06stream\x18\x01 \x01(\t\x12(\n\x06\x63onfig\x18\x02 \x01(\x0b\x32\x18.s2.v1alpha.StreamConfig"<\n\x14\x43reateStreamResponse\x12$\n\x04info\x18\x01 \x01(\x0b\x32\x16.s2.v1alpha.StreamInfo"%\n\x13\x44\x65leteStreamRequest\x12\x0e\n\x06stream\x18\x01 \x01(\t"\x16\n\x14\x44\x65leteStreamResponse"(\n\x16GetStreamConfigRequest\x12\x0e\n\x06stream\x18\x01 \x01(\t"C\n\x17GetStreamConfigResponse\x12(\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x18.s2.v1alpha.StreamConfig"~\n\x18ReconfigureStreamRequest\x12\x0e\n\x06stream\x18\x01 \x01(\t\x12(\n\x06\x63onfig\x18\x02 \x01(\x0b\x32\x18.s2.v1alpha.StreamConfig\x12(\n\x04mask\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"E\n\x19ReconfigureStreamResponse\x12(\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x18.s2.v1alpha.StreamConfig""\n\x10\x43heckTailRequest\x12\x0e\n\x06stream\x18\x01 \x01(\t"A\n\x11\x43heckTailResponse\x12\x14\n\x0cnext_seq_num\x18\x01 \x01(\x04\x12\x16\n\x0elast_timestamp\x18\x02 \x01(\x04"\xa4\x01\n\x0b\x41ppendInput\x12\x0e\n\x06stream\x18\x01 \x01(\t\x12)\n\x07records\x18\x02 \x03(\x0b\x32\x18.s2.v1alpha.AppendRecord\x12\x1a\n\rmatch_seq_num\x18\x03 \x01(\x04H\x00\x88\x01\x01\x12\x1a\n\rfencing_token\x18\x04 \x01(\tH\x01\x88\x01\x01\x42\x10\n\x0e_match_seq_numB\x10\n\x0e_fencing_token"\x98\x01\n\x0c\x41ppendOutput\x12\x15\n\rstart_seq_num\x18\x01 \x01(\x04\x12\x17\n\x0fstart_timestamp\x18\x04 \x01(\x04\x12\x13\n\x0b\x65nd_seq_num\x18\x02 \x01(\x04\x12\x15\n\rend_timestamp\x18\x05 \x01(\x04\x12\x14\n\x0cnext_seq_num\x18\x03 \x01(\x04\x12\x16\n\x0elast_timestamp\x18\x06 \x01(\x04"7\n\rAppendRequest\x12&\n\x05input\x18\x01 \x01(\x0b\x32\x17.s2.v1alpha.AppendInput":\n\x0e\x41ppendResponse\x12(\n\x06output\x18\x01 \x01(\x0b\x32\x18.s2.v1alpha.AppendOutput">\n\x14\x41ppendSessionRequest\x12&\n\x05input\x18\x01 \x01(\x0b\x32\x17.s2.v1alpha.AppendInput"A\n\x15\x41ppendSessionResponse\x12(\n\x06output\x18\x01 \x01(\x0b\x32\x18.s2.v1alpha.AppendOutput"g\n\nReadOutput\x12\x31\n\x05\x62\x61tch\x18\x01 \x01(\x0b\x32 .s2.v1alpha.SequencedRecordBatchH\x00\x12\x16\n\x0cnext_seq_num\x18\x03 \x01(\x04H\x00\x42\x08\n\x06outputJ\x04\x08\x02\x10\x03"\xb8\x01\n\x0bReadRequest\x12\x0e\n\x06stream\x18\x01 \x01(\t\x12\x11\n\x07seq_num\x18\x02 \x01(\x04H\x00\x12\x13\n\ttimestamp\x18\x04 \x01(\x04H\x00\x12\x15\n\x0btail_offset\x18\x05 \x01(\x04H\x00\x12$\n\x05limit\x18\x03 \x01(\x0b\x32\x15.s2.v1alpha.ReadLimit\x12\x12\n\x05until\x18\x06 \x01(\x04H\x01\x88\x01\x01\x12\r\n\x05\x63lamp\x18\x07 \x01(\x08\x42\x07\n\x05startB\x08\n\x06_until"6\n\x0cReadResponse\x12&\n\x06output\x18\x01 \x01(\x0b\x32\x16.s2.v1alpha.ReadOutput"G\n\tReadLimit\x12\x12\n\x05\x63ount\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05\x62ytes\x18\x02 \x01(\x04H\x01\x88\x01\x01\x42\x08\n\x06_countB\x08\n\x06_bytes"\xd3\x01\n\x12ReadSessionRequest\x12\x0e\n\x06stream\x18\x01 \x01(\t\x12\x11\n\x07seq_num\x18\x02 \x01(\x04H\x00\x12\x13\n\ttimestamp\x18\x05 \x01(\x04H\x00\x12\x15\n\x0btail_offset\x18\x06 \x01(\x04H\x00\x12$\n\x05limit\x18\x03 \x01(\x0b\x32\x15.s2.v1alpha.ReadLimit\x12\x12\n\nheartbeats\x18\x04 \x01(\x08\x12\x12\n\x05until\x18\x07 \x01(\x04H\x01\x88\x01\x01\x12\r\n\x05\x63lamp\x18\x08 \x01(\x08\x42\x07\n\x05startB\x08\n\x06_until"M\n\x13ReadSessionResponse\x12+\n\x06output\x18\x01 \x01(\x0b\x32\x16.s2.v1alpha.ReadOutputH\x00\x88\x01\x01\x42\t\n\x07_output"\xc8\x03\n\x0cStreamConfig\x12/\n\rstorage_class\x18\x01 \x01(\x0e\x32\x18.s2.v1alpha.StorageClass\x12\r\n\x03\x61ge\x18\x02 \x01(\x04H\x00\x12>\n\x08infinite\x18\x07 \x01(\x0b\x32*.s2.v1alpha.StreamConfig.InfiniteRetentionH\x00\x12;\n\x0ctimestamping\x18\x05 \x01(\x0b\x32%.s2.v1alpha.StreamConfig.Timestamping\x12?\n\x0f\x64\x65lete_on_empty\x18\x06 \x01(\x0b\x32&.s2.v1alpha.StreamConfig.DeleteOnEmpty\x1a^\n\x0cTimestamping\x12*\n\x04mode\x18\x01 \x01(\x0e\x32\x1c.s2.v1alpha.TimestampingMode\x12\x15\n\x08uncapped\x18\x02 \x01(\x08H\x00\x88\x01\x01\x42\x0b\n\t_uncapped\x1a%\n\rDeleteOnEmpty\x12\x14\n\x0cmin_age_secs\x18\x01 \x01(\x04\x1a\x13\n\x11InfiniteRetentionB\x12\n\x10retention_policyJ\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05"\x86\x01\n\x0b\x42\x61sinConfig\x12\x37\n\x15\x64\x65\x66\x61ult_stream_config\x18\x01 \x01(\x0b\x32\x18.s2.v1alpha.StreamConfig\x12\x1f\n\x17\x63reate_stream_on_append\x18\x02 \x01(\x08\x12\x1d\n\x15\x63reate_stream_on_read\x18\x03 \x01(\x08"g\n\tBasinInfo\x12\x0c\n\x04name\x18\x01 \x01(\t\x12%\n\x05scope\x18\x05 \x01(\x0e\x32\x16.s2.v1alpha.BasinScope\x12%\n\x05state\x18\x04 \x01(\x0e\x32\x16.s2.v1alpha.BasinState"%\n\x06Header\x12\x0c\n\x04name\x18\x01 \x01(\x0c\x12\r\n\x05value\x18\x02 \x01(\x0c"g\n\x0c\x41ppendRecord\x12\x16\n\ttimestamp\x18\x03 \x01(\x04H\x00\x88\x01\x01\x12#\n\x07headers\x18\x01 \x03(\x0b\x32\x12.s2.v1alpha.Header\x12\x0c\n\x04\x62ody\x18\x02 \x01(\x0c\x42\x0c\n\n_timestamp"h\n\x0fSequencedRecord\x12\x0f\n\x07seq_num\x18\x01 \x01(\x04\x12\x11\n\ttimestamp\x18\x04 \x01(\x04\x12#\n\x07headers\x18\x02 \x03(\x0b\x32\x12.s2.v1alpha.Header\x12\x0c\n\x04\x62ody\x18\x03 \x01(\x0c"D\n\x14SequencedRecordBatch\x12,\n\x07records\x18\x01 \x03(\x0b\x32\x1b.s2.v1alpha.SequencedRecord*H\n\nBasinScope\x12\x1b\n\x17\x42\x41SIN_SCOPE_UNSPECIFIED\x10\x00\x12\x1d\n\x19\x42\x41SIN_SCOPE_AWS_US_EAST_1\x10\x01*\x81\x05\n\tOperation\x12\x19\n\x15OPERATION_UNSPECIFIED\x10\x00\x12\x19\n\x15OPERATION_LIST_BASINS\x10\x01\x12\x1a\n\x16OPERATION_CREATE_BASIN\x10\x02\x12\x1a\n\x16OPERATION_DELETE_BASIN\x10\x03\x12\x1f\n\x1bOPERATION_RECONFIGURE_BASIN\x10\x04\x12\x1e\n\x1aOPERATION_GET_BASIN_CONFIG\x10\x05\x12 \n\x1cOPERATION_ISSUE_ACCESS_TOKEN\x10\x06\x12!\n\x1dOPERATION_REVOKE_ACCESS_TOKEN\x10\x07\x12 \n\x1cOPERATION_LIST_ACCESS_TOKENS\x10\x08\x12\x1a\n\x16OPERATION_LIST_STREAMS\x10\t\x12\x1b\n\x17OPERATION_CREATE_STREAM\x10\n\x12\x1b\n\x17OPERATION_DELETE_STREAM\x10\x0b\x12\x1f\n\x1bOPERATION_GET_STREAM_CONFIG\x10\x0c\x12 \n\x1cOPERATION_RECONFIGURE_STREAM\x10\r\x12\x18\n\x14OPERATION_CHECK_TAIL\x10\x0e\x12\x14\n\x10OPERATION_APPEND\x10\x0f\x12\x12\n\x0eOPERATION_READ\x10\x10\x12\x12\n\x0eOPERATION_TRIM\x10\x11\x12\x13\n\x0fOPERATION_FENCE\x10\x12\x12\x1d\n\x19OPERATION_ACCOUNT_METRICS\x10\x13\x12\x1b\n\x17OPERATION_BASIN_METRICS\x10\x14\x12\x1c\n\x18OPERATION_STREAM_METRICS\x10\x15*d\n\x0cStorageClass\x12\x1d\n\x19STORAGE_CLASS_UNSPECIFIED\x10\x00\x12\x1a\n\x16STORAGE_CLASS_STANDARD\x10\x01\x12\x19\n\x15STORAGE_CLASS_EXPRESS\x10\x02*\x9f\x01\n\x10TimestampingMode\x12!\n\x1dTIMESTAMPING_MODE_UNSPECIFIED\x10\x00\x12#\n\x1fTIMESTAMPING_MODE_CLIENT_PREFER\x10\x01\x12$\n TIMESTAMPING_MODE_CLIENT_REQUIRE\x10\x02\x12\x1d\n\x19TIMESTAMPING_MODE_ARRIVAL\x10\x03*u\n\nBasinState\x12\x1b\n\x17\x42\x41SIN_STATE_UNSPECIFIED\x10\x00\x12\x16\n\x12\x42\x41SIN_STATE_ACTIVE\x10\x01\x12\x18\n\x14\x42\x41SIN_STATE_CREATING\x10\x02\x12\x18\n\x14\x42\x41SIN_STATE_DELETING\x10\x03\x32\xf8\x05\n\x0e\x41\x63\x63ountService\x12P\n\nListBasins\x12\x1d.s2.v1alpha.ListBasinsRequest\x1a\x1e.s2.v1alpha.ListBasinsResponse"\x03\x90\x02\x01\x12S\n\x0b\x43reateBasin\x12\x1e.s2.v1alpha.CreateBasinRequest\x1a\x1f.s2.v1alpha.CreateBasinResponse"\x03\x90\x02\x02\x12S\n\x0b\x44\x65leteBasin\x12\x1e.s2.v1alpha.DeleteBasinRequest\x1a\x1f.s2.v1alpha.DeleteBasinResponse"\x03\x90\x02\x02\x12\x62\n\x10ReconfigureBasin\x12#.s2.v1alpha.ReconfigureBasinRequest\x1a$.s2.v1alpha.ReconfigureBasinResponse"\x03\x90\x02\x02\x12\\\n\x0eGetBasinConfig\x12!.s2.v1alpha.GetBasinConfigRequest\x1a".s2.v1alpha.GetBasinConfigResponse"\x03\x90\x02\x01\x12]\n\x10IssueAccessToken\x12#.s2.v1alpha.IssueAccessTokenRequest\x1a$.s2.v1alpha.IssueAccessTokenResponse\x12\x65\n\x11RevokeAccessToken\x12$.s2.v1alpha.RevokeAccessTokenRequest\x1a%.s2.v1alpha.RevokeAccessTokenResponse"\x03\x90\x02\x02\x12\x62\n\x10ListAccessTokens\x12#.s2.v1alpha.ListAccessTokensRequest\x1a$.s2.v1alpha.ListAccessTokensResponse"\x03\x90\x02\x01\x32\xdb\x03\n\x0c\x42\x61sinService\x12S\n\x0bListStreams\x12\x1e.s2.v1alpha.ListStreamsRequest\x1a\x1f.s2.v1alpha.ListStreamsResponse"\x03\x90\x02\x01\x12V\n\x0c\x43reateStream\x12\x1f.s2.v1alpha.CreateStreamRequest\x1a .s2.v1alpha.CreateStreamResponse"\x03\x90\x02\x02\x12V\n\x0c\x44\x65leteStream\x12\x1f.s2.v1alpha.DeleteStreamRequest\x1a .s2.v1alpha.DeleteStreamResponse"\x03\x90\x02\x02\x12_\n\x0fGetStreamConfig\x12".s2.v1alpha.GetStreamConfigRequest\x1a#.s2.v1alpha.GetStreamConfigResponse"\x03\x90\x02\x01\x12\x65\n\x11ReconfigureStream\x12$.s2.v1alpha.ReconfigureStreamRequest\x1a%.s2.v1alpha.ReconfigureStreamResponse"\x03\x90\x02\x02\x32\x90\x03\n\rStreamService\x12M\n\tCheckTail\x12\x1c.s2.v1alpha.CheckTailRequest\x1a\x1d.s2.v1alpha.CheckTailResponse"\x03\x90\x02\x01\x12?\n\x06\x41ppend\x12\x19.s2.v1alpha.AppendRequest\x1a\x1a.s2.v1alpha.AppendResponse\x12X\n\rAppendSession\x12 .s2.v1alpha.AppendSessionRequest\x1a!.s2.v1alpha.AppendSessionResponse(\x01\x30\x01\x12>\n\x04Read\x12\x17.s2.v1alpha.ReadRequest\x1a\x18.s2.v1alpha.ReadResponse"\x03\x90\x02\x01\x12U\n\x0bReadSession\x12\x1e.s2.v1alpha.ReadSessionRequest\x1a\x1f.s2.v1alpha.ReadSessionResponse"\x03\x90\x02\x01\x30\x01\x42\x0e\n\ns2.v1alphaP\x01\x62\x06proto3'
-)
-
-_globals = globals()
-_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
-_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "s2.v1alpha.s2_pb2", _globals)
-if not _descriptor._USE_C_DESCRIPTORS:
- _globals["DESCRIPTOR"]._loaded_options = None
- _globals["DESCRIPTOR"]._serialized_options = b"\n\ns2.v1alphaP\001"
- _globals["_ACCOUNTSERVICE"].methods_by_name["ListBasins"]._loaded_options = None
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "ListBasins"
- ]._serialized_options = b"\220\002\001"
- _globals["_ACCOUNTSERVICE"].methods_by_name["CreateBasin"]._loaded_options = None
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "CreateBasin"
- ]._serialized_options = b"\220\002\002"
- _globals["_ACCOUNTSERVICE"].methods_by_name["DeleteBasin"]._loaded_options = None
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "DeleteBasin"
- ]._serialized_options = b"\220\002\002"
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "ReconfigureBasin"
- ]._loaded_options = None
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "ReconfigureBasin"
- ]._serialized_options = b"\220\002\002"
- _globals["_ACCOUNTSERVICE"].methods_by_name["GetBasinConfig"]._loaded_options = None
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "GetBasinConfig"
- ]._serialized_options = b"\220\002\001"
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "RevokeAccessToken"
- ]._loaded_options = None
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "RevokeAccessToken"
- ]._serialized_options = b"\220\002\002"
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "ListAccessTokens"
- ]._loaded_options = None
- _globals["_ACCOUNTSERVICE"].methods_by_name[
- "ListAccessTokens"
- ]._serialized_options = b"\220\002\001"
- _globals["_BASINSERVICE"].methods_by_name["ListStreams"]._loaded_options = None
- _globals["_BASINSERVICE"].methods_by_name[
- "ListStreams"
- ]._serialized_options = b"\220\002\001"
- _globals["_BASINSERVICE"].methods_by_name["CreateStream"]._loaded_options = None
- _globals["_BASINSERVICE"].methods_by_name[
- "CreateStream"
- ]._serialized_options = b"\220\002\002"
- _globals["_BASINSERVICE"].methods_by_name["DeleteStream"]._loaded_options = None
- _globals["_BASINSERVICE"].methods_by_name[
- "DeleteStream"
- ]._serialized_options = b"\220\002\002"
- _globals["_BASINSERVICE"].methods_by_name["GetStreamConfig"]._loaded_options = None
- _globals["_BASINSERVICE"].methods_by_name[
- "GetStreamConfig"
- ]._serialized_options = b"\220\002\001"
- _globals["_BASINSERVICE"].methods_by_name[
- "ReconfigureStream"
- ]._loaded_options = None
- _globals["_BASINSERVICE"].methods_by_name[
- "ReconfigureStream"
- ]._serialized_options = b"\220\002\002"
- _globals["_STREAMSERVICE"].methods_by_name["CheckTail"]._loaded_options = None
- _globals["_STREAMSERVICE"].methods_by_name[
- "CheckTail"
- ]._serialized_options = b"\220\002\001"
- _globals["_STREAMSERVICE"].methods_by_name["Read"]._loaded_options = None
- _globals["_STREAMSERVICE"].methods_by_name[
- "Read"
- ]._serialized_options = b"\220\002\001"
- _globals["_STREAMSERVICE"].methods_by_name["ReadSession"]._loaded_options = None
- _globals["_STREAMSERVICE"].methods_by_name[
- "ReadSession"
- ]._serialized_options = b"\220\002\001"
- _globals["_BASINSCOPE"]._serialized_start = 5066
- _globals["_BASINSCOPE"]._serialized_end = 5138
- _globals["_OPERATION"]._serialized_start = 5141
- _globals["_OPERATION"]._serialized_end = 5782
- _globals["_STORAGECLASS"]._serialized_start = 5784
- _globals["_STORAGECLASS"]._serialized_end = 5884
- _globals["_TIMESTAMPINGMODE"]._serialized_start = 5887
- _globals["_TIMESTAMPINGMODE"]._serialized_end = 6046
- _globals["_BASINSTATE"]._serialized_start = 6048
- _globals["_BASINSTATE"]._serialized_end = 6165
- _globals["_LISTBASINSREQUEST"]._serialized_start = 69
- _globals["_LISTBASINSREQUEST"]._serialized_end = 155
- _globals["_LISTBASINSRESPONSE"]._serialized_start = 157
- _globals["_LISTBASINSRESPONSE"]._serialized_end = 234
- _globals["_CREATEBASINREQUEST"]._serialized_start = 236
- _globals["_CREATEBASINREQUEST"]._serialized_end = 351
- _globals["_CREATEBASINRESPONSE"]._serialized_start = 353
- _globals["_CREATEBASINRESPONSE"]._serialized_end = 411
- _globals["_DELETEBASINREQUEST"]._serialized_start = 413
- _globals["_DELETEBASINREQUEST"]._serialized_end = 448
- _globals["_DELETEBASINRESPONSE"]._serialized_start = 450
- _globals["_DELETEBASINRESPONSE"]._serialized_end = 471
- _globals["_GETBASINCONFIGREQUEST"]._serialized_start = 473
- _globals["_GETBASINCONFIGREQUEST"]._serialized_end = 511
- _globals["_GETBASINCONFIGRESPONSE"]._serialized_start = 513
- _globals["_GETBASINCONFIGRESPONSE"]._serialized_end = 578
- _globals["_RECONFIGUREBASINREQUEST"]._serialized_start = 580
- _globals["_RECONFIGUREBASINREQUEST"]._serialized_end = 703
- _globals["_RECONFIGUREBASINRESPONSE"]._serialized_start = 705
- _globals["_RECONFIGUREBASINRESPONSE"]._serialized_end = 772
- _globals["_ISSUEACCESSTOKENREQUEST"]._serialized_start = 774
- _globals["_ISSUEACCESSTOKENREQUEST"]._serialized_end = 842
- _globals["_READWRITEPERMISSIONS"]._serialized_start = 844
- _globals["_READWRITEPERMISSIONS"]._serialized_end = 895
- _globals["_PERMITTEDOPERATIONGROUPS"]._serialized_start = 898
- _globals["_PERMITTEDOPERATIONGROUPS"]._serialized_end = 1074
- _globals["_REVOKEACCESSTOKENREQUEST"]._serialized_start = 1076
- _globals["_REVOKEACCESSTOKENREQUEST"]._serialized_end = 1114
- _globals["_REVOKEACCESSTOKENRESPONSE"]._serialized_start = 1116
- _globals["_REVOKEACCESSTOKENRESPONSE"]._serialized_end = 1186
- _globals["_LISTACCESSTOKENSREQUEST"]._serialized_start = 1188
- _globals["_LISTACCESSTOKENSREQUEST"]._serialized_end = 1280
- _globals["_LISTACCESSTOKENSRESPONSE"]._serialized_start = 1282
- _globals["_LISTACCESSTOKENSRESPONSE"]._serialized_end = 1378
- _globals["_ACCESSTOKENINFO"]._serialized_start = 1381
- _globals["_ACCESSTOKENINFO"]._serialized_end = 1524
- _globals["_ACCESSTOKENSCOPE"]._serialized_start = 1527
- _globals["_ACCESSTOKENSCOPE"]._serialized_end = 1769
- _globals["_RESOURCESET"]._serialized_start = 1771
- _globals["_RESOURCESET"]._serialized_end = 1831
- _globals["_ISSUEACCESSTOKENRESPONSE"]._serialized_start = 1833
- _globals["_ISSUEACCESSTOKENRESPONSE"]._serialized_end = 1881
- _globals["_STREAMINFO"]._serialized_start = 1883
- _globals["_STREAMINFO"]._serialized_end = 1969
- _globals["_LISTSTREAMSREQUEST"]._serialized_start = 1971
- _globals["_LISTSTREAMSREQUEST"]._serialized_end = 2058
- _globals["_LISTSTREAMSRESPONSE"]._serialized_start = 2060
- _globals["_LISTSTREAMSRESPONSE"]._serialized_end = 2140
- _globals["_CREATESTREAMREQUEST"]._serialized_start = 2142
- _globals["_CREATESTREAMREQUEST"]._serialized_end = 2221
- _globals["_CREATESTREAMRESPONSE"]._serialized_start = 2223
- _globals["_CREATESTREAMRESPONSE"]._serialized_end = 2283
- _globals["_DELETESTREAMREQUEST"]._serialized_start = 2285
- _globals["_DELETESTREAMREQUEST"]._serialized_end = 2322
- _globals["_DELETESTREAMRESPONSE"]._serialized_start = 2324
- _globals["_DELETESTREAMRESPONSE"]._serialized_end = 2346
- _globals["_GETSTREAMCONFIGREQUEST"]._serialized_start = 2348
- _globals["_GETSTREAMCONFIGREQUEST"]._serialized_end = 2388
- _globals["_GETSTREAMCONFIGRESPONSE"]._serialized_start = 2390
- _globals["_GETSTREAMCONFIGRESPONSE"]._serialized_end = 2457
- _globals["_RECONFIGURESTREAMREQUEST"]._serialized_start = 2459
- _globals["_RECONFIGURESTREAMREQUEST"]._serialized_end = 2585
- _globals["_RECONFIGURESTREAMRESPONSE"]._serialized_start = 2587
- _globals["_RECONFIGURESTREAMRESPONSE"]._serialized_end = 2656
- _globals["_CHECKTAILREQUEST"]._serialized_start = 2658
- _globals["_CHECKTAILREQUEST"]._serialized_end = 2692
- _globals["_CHECKTAILRESPONSE"]._serialized_start = 2694
- _globals["_CHECKTAILRESPONSE"]._serialized_end = 2759
- _globals["_APPENDINPUT"]._serialized_start = 2762
- _globals["_APPENDINPUT"]._serialized_end = 2926
- _globals["_APPENDOUTPUT"]._serialized_start = 2929
- _globals["_APPENDOUTPUT"]._serialized_end = 3081
- _globals["_APPENDREQUEST"]._serialized_start = 3083
- _globals["_APPENDREQUEST"]._serialized_end = 3138
- _globals["_APPENDRESPONSE"]._serialized_start = 3140
- _globals["_APPENDRESPONSE"]._serialized_end = 3198
- _globals["_APPENDSESSIONREQUEST"]._serialized_start = 3200
- _globals["_APPENDSESSIONREQUEST"]._serialized_end = 3262
- _globals["_APPENDSESSIONRESPONSE"]._serialized_start = 3264
- _globals["_APPENDSESSIONRESPONSE"]._serialized_end = 3329
- _globals["_READOUTPUT"]._serialized_start = 3331
- _globals["_READOUTPUT"]._serialized_end = 3434
- _globals["_READREQUEST"]._serialized_start = 3437
- _globals["_READREQUEST"]._serialized_end = 3621
- _globals["_READRESPONSE"]._serialized_start = 3623
- _globals["_READRESPONSE"]._serialized_end = 3677
- _globals["_READLIMIT"]._serialized_start = 3679
- _globals["_READLIMIT"]._serialized_end = 3750
- _globals["_READSESSIONREQUEST"]._serialized_start = 3753
- _globals["_READSESSIONREQUEST"]._serialized_end = 3964
- _globals["_READSESSIONRESPONSE"]._serialized_start = 3966
- _globals["_READSESSIONRESPONSE"]._serialized_end = 4043
- _globals["_STREAMCONFIG"]._serialized_start = 4046
- _globals["_STREAMCONFIG"]._serialized_end = 4502
- _globals["_STREAMCONFIG_TIMESTAMPING"]._serialized_start = 4316
- _globals["_STREAMCONFIG_TIMESTAMPING"]._serialized_end = 4410
- _globals["_STREAMCONFIG_DELETEONEMPTY"]._serialized_start = 4412
- _globals["_STREAMCONFIG_DELETEONEMPTY"]._serialized_end = 4449
- _globals["_STREAMCONFIG_INFINITERETENTION"]._serialized_start = 4451
- _globals["_STREAMCONFIG_INFINITERETENTION"]._serialized_end = 4470
- _globals["_BASINCONFIG"]._serialized_start = 4505
- _globals["_BASINCONFIG"]._serialized_end = 4639
- _globals["_BASININFO"]._serialized_start = 4641
- _globals["_BASININFO"]._serialized_end = 4744
- _globals["_HEADER"]._serialized_start = 4746
- _globals["_HEADER"]._serialized_end = 4783
- _globals["_APPENDRECORD"]._serialized_start = 4785
- _globals["_APPENDRECORD"]._serialized_end = 4888
- _globals["_SEQUENCEDRECORD"]._serialized_start = 4890
- _globals["_SEQUENCEDRECORD"]._serialized_end = 4994
- _globals["_SEQUENCEDRECORDBATCH"]._serialized_start = 4996
- _globals["_SEQUENCEDRECORDBATCH"]._serialized_end = 5064
- _globals["_ACCOUNTSERVICE"]._serialized_start = 6168
- _globals["_ACCOUNTSERVICE"]._serialized_end = 6928
- _globals["_BASINSERVICE"]._serialized_start = 6931
- _globals["_BASINSERVICE"]._serialized_end = 7406
- _globals["_STREAMSERVICE"]._serialized_start = 7409
- _globals["_STREAMSERVICE"]._serialized_end = 7809
-# @@protoc_insertion_point(module_scope)
diff --git a/src/streamstore/_lib/s2/v1alpha/s2_pb2.pyi b/src/streamstore/_lib/s2/v1alpha/s2_pb2.pyi
deleted file mode 100644
index c77b8fa..0000000
--- a/src/streamstore/_lib/s2/v1alpha/s2_pb2.pyi
+++ /dev/null
@@ -1,765 +0,0 @@
-from typing import ClassVar as _ClassVar
-from typing import Iterable as _Iterable
-from typing import Mapping as _Mapping
-from typing import Optional as _Optional
-from typing import Union as _Union
-
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import field_mask_pb2 as _field_mask_pb2
-from google.protobuf import message as _message
-from google.protobuf.internal import containers as _containers
-from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper
-
-DESCRIPTOR: _descriptor.FileDescriptor
-
-class BasinScope(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
- __slots__ = ()
- BASIN_SCOPE_UNSPECIFIED: _ClassVar[BasinScope]
- BASIN_SCOPE_AWS_US_EAST_1: _ClassVar[BasinScope]
-
-class Operation(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
- __slots__ = ()
- OPERATION_UNSPECIFIED: _ClassVar[Operation]
- OPERATION_LIST_BASINS: _ClassVar[Operation]
- OPERATION_CREATE_BASIN: _ClassVar[Operation]
- OPERATION_DELETE_BASIN: _ClassVar[Operation]
- OPERATION_RECONFIGURE_BASIN: _ClassVar[Operation]
- OPERATION_GET_BASIN_CONFIG: _ClassVar[Operation]
- OPERATION_ISSUE_ACCESS_TOKEN: _ClassVar[Operation]
- OPERATION_REVOKE_ACCESS_TOKEN: _ClassVar[Operation]
- OPERATION_LIST_ACCESS_TOKENS: _ClassVar[Operation]
- OPERATION_LIST_STREAMS: _ClassVar[Operation]
- OPERATION_CREATE_STREAM: _ClassVar[Operation]
- OPERATION_DELETE_STREAM: _ClassVar[Operation]
- OPERATION_GET_STREAM_CONFIG: _ClassVar[Operation]
- OPERATION_RECONFIGURE_STREAM: _ClassVar[Operation]
- OPERATION_CHECK_TAIL: _ClassVar[Operation]
- OPERATION_APPEND: _ClassVar[Operation]
- OPERATION_READ: _ClassVar[Operation]
- OPERATION_TRIM: _ClassVar[Operation]
- OPERATION_FENCE: _ClassVar[Operation]
- OPERATION_ACCOUNT_METRICS: _ClassVar[Operation]
- OPERATION_BASIN_METRICS: _ClassVar[Operation]
- OPERATION_STREAM_METRICS: _ClassVar[Operation]
-
-class StorageClass(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
- __slots__ = ()
- STORAGE_CLASS_UNSPECIFIED: _ClassVar[StorageClass]
- STORAGE_CLASS_STANDARD: _ClassVar[StorageClass]
- STORAGE_CLASS_EXPRESS: _ClassVar[StorageClass]
-
-class TimestampingMode(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
- __slots__ = ()
- TIMESTAMPING_MODE_UNSPECIFIED: _ClassVar[TimestampingMode]
- TIMESTAMPING_MODE_CLIENT_PREFER: _ClassVar[TimestampingMode]
- TIMESTAMPING_MODE_CLIENT_REQUIRE: _ClassVar[TimestampingMode]
- TIMESTAMPING_MODE_ARRIVAL: _ClassVar[TimestampingMode]
-
-class BasinState(int, metaclass=_enum_type_wrapper.EnumTypeWrapper):
- __slots__ = ()
- BASIN_STATE_UNSPECIFIED: _ClassVar[BasinState]
- BASIN_STATE_ACTIVE: _ClassVar[BasinState]
- BASIN_STATE_CREATING: _ClassVar[BasinState]
- BASIN_STATE_DELETING: _ClassVar[BasinState]
-
-BASIN_SCOPE_UNSPECIFIED: BasinScope
-BASIN_SCOPE_AWS_US_EAST_1: BasinScope
-OPERATION_UNSPECIFIED: Operation
-OPERATION_LIST_BASINS: Operation
-OPERATION_CREATE_BASIN: Operation
-OPERATION_DELETE_BASIN: Operation
-OPERATION_RECONFIGURE_BASIN: Operation
-OPERATION_GET_BASIN_CONFIG: Operation
-OPERATION_ISSUE_ACCESS_TOKEN: Operation
-OPERATION_REVOKE_ACCESS_TOKEN: Operation
-OPERATION_LIST_ACCESS_TOKENS: Operation
-OPERATION_LIST_STREAMS: Operation
-OPERATION_CREATE_STREAM: Operation
-OPERATION_DELETE_STREAM: Operation
-OPERATION_GET_STREAM_CONFIG: Operation
-OPERATION_RECONFIGURE_STREAM: Operation
-OPERATION_CHECK_TAIL: Operation
-OPERATION_APPEND: Operation
-OPERATION_READ: Operation
-OPERATION_TRIM: Operation
-OPERATION_FENCE: Operation
-OPERATION_ACCOUNT_METRICS: Operation
-OPERATION_BASIN_METRICS: Operation
-OPERATION_STREAM_METRICS: Operation
-STORAGE_CLASS_UNSPECIFIED: StorageClass
-STORAGE_CLASS_STANDARD: StorageClass
-STORAGE_CLASS_EXPRESS: StorageClass
-TIMESTAMPING_MODE_UNSPECIFIED: TimestampingMode
-TIMESTAMPING_MODE_CLIENT_PREFER: TimestampingMode
-TIMESTAMPING_MODE_CLIENT_REQUIRE: TimestampingMode
-TIMESTAMPING_MODE_ARRIVAL: TimestampingMode
-BASIN_STATE_UNSPECIFIED: BasinState
-BASIN_STATE_ACTIVE: BasinState
-BASIN_STATE_CREATING: BasinState
-BASIN_STATE_DELETING: BasinState
-
-class ListBasinsRequest(_message.Message):
- __slots__ = ("prefix", "start_after", "limit")
- PREFIX_FIELD_NUMBER: _ClassVar[int]
- START_AFTER_FIELD_NUMBER: _ClassVar[int]
- LIMIT_FIELD_NUMBER: _ClassVar[int]
- prefix: str
- start_after: str
- limit: int
- def __init__(
- self,
- prefix: _Optional[str] = ...,
- start_after: _Optional[str] = ...,
- limit: _Optional[int] = ...,
- ) -> None: ...
-
-class ListBasinsResponse(_message.Message):
- __slots__ = ("basins", "has_more")
- BASINS_FIELD_NUMBER: _ClassVar[int]
- HAS_MORE_FIELD_NUMBER: _ClassVar[int]
- basins: _containers.RepeatedCompositeFieldContainer[BasinInfo]
- has_more: bool
- def __init__(
- self,
- basins: _Optional[_Iterable[_Union[BasinInfo, _Mapping]]] = ...,
- has_more: bool = ...,
- ) -> None: ...
-
-class CreateBasinRequest(_message.Message):
- __slots__ = ("basin", "config", "scope")
- BASIN_FIELD_NUMBER: _ClassVar[int]
- CONFIG_FIELD_NUMBER: _ClassVar[int]
- SCOPE_FIELD_NUMBER: _ClassVar[int]
- basin: str
- config: BasinConfig
- scope: BasinScope
- def __init__(
- self,
- basin: _Optional[str] = ...,
- config: _Optional[_Union[BasinConfig, _Mapping]] = ...,
- scope: _Optional[_Union[BasinScope, str]] = ...,
- ) -> None: ...
-
-class CreateBasinResponse(_message.Message):
- __slots__ = ("info",)
- INFO_FIELD_NUMBER: _ClassVar[int]
- info: BasinInfo
- def __init__(self, info: _Optional[_Union[BasinInfo, _Mapping]] = ...) -> None: ...
-
-class DeleteBasinRequest(_message.Message):
- __slots__ = ("basin",)
- BASIN_FIELD_NUMBER: _ClassVar[int]
- basin: str
- def __init__(self, basin: _Optional[str] = ...) -> None: ...
-
-class DeleteBasinResponse(_message.Message):
- __slots__ = ()
- def __init__(self) -> None: ...
-
-class GetBasinConfigRequest(_message.Message):
- __slots__ = ("basin",)
- BASIN_FIELD_NUMBER: _ClassVar[int]
- basin: str
- def __init__(self, basin: _Optional[str] = ...) -> None: ...
-
-class GetBasinConfigResponse(_message.Message):
- __slots__ = ("config",)
- CONFIG_FIELD_NUMBER: _ClassVar[int]
- config: BasinConfig
- def __init__(
- self, config: _Optional[_Union[BasinConfig, _Mapping]] = ...
- ) -> None: ...
-
-class ReconfigureBasinRequest(_message.Message):
- __slots__ = ("basin", "config", "mask")
- BASIN_FIELD_NUMBER: _ClassVar[int]
- CONFIG_FIELD_NUMBER: _ClassVar[int]
- MASK_FIELD_NUMBER: _ClassVar[int]
- basin: str
- config: BasinConfig
- mask: _field_mask_pb2.FieldMask
- def __init__(
- self,
- basin: _Optional[str] = ...,
- config: _Optional[_Union[BasinConfig, _Mapping]] = ...,
- mask: _Optional[_Union[_field_mask_pb2.FieldMask, _Mapping]] = ...,
- ) -> None: ...
-
-class ReconfigureBasinResponse(_message.Message):
- __slots__ = ("config",)
- CONFIG_FIELD_NUMBER: _ClassVar[int]
- config: BasinConfig
- def __init__(
- self, config: _Optional[_Union[BasinConfig, _Mapping]] = ...
- ) -> None: ...
-
-class IssueAccessTokenRequest(_message.Message):
- __slots__ = ("info",)
- INFO_FIELD_NUMBER: _ClassVar[int]
- info: AccessTokenInfo
- def __init__(
- self, info: _Optional[_Union[AccessTokenInfo, _Mapping]] = ...
- ) -> None: ...
-
-class ReadWritePermissions(_message.Message):
- __slots__ = ("read", "write")
- READ_FIELD_NUMBER: _ClassVar[int]
- WRITE_FIELD_NUMBER: _ClassVar[int]
- read: bool
- write: bool
- def __init__(self, read: bool = ..., write: bool = ...) -> None: ...
-
-class PermittedOperationGroups(_message.Message):
- __slots__ = ("account", "basin", "stream")
- ACCOUNT_FIELD_NUMBER: _ClassVar[int]
- BASIN_FIELD_NUMBER: _ClassVar[int]
- STREAM_FIELD_NUMBER: _ClassVar[int]
- account: ReadWritePermissions
- basin: ReadWritePermissions
- stream: ReadWritePermissions
- def __init__(
- self,
- account: _Optional[_Union[ReadWritePermissions, _Mapping]] = ...,
- basin: _Optional[_Union[ReadWritePermissions, _Mapping]] = ...,
- stream: _Optional[_Union[ReadWritePermissions, _Mapping]] = ...,
- ) -> None: ...
-
-class RevokeAccessTokenRequest(_message.Message):
- __slots__ = ("id",)
- ID_FIELD_NUMBER: _ClassVar[int]
- id: str
- def __init__(self, id: _Optional[str] = ...) -> None: ...
-
-class RevokeAccessTokenResponse(_message.Message):
- __slots__ = ("info",)
- INFO_FIELD_NUMBER: _ClassVar[int]
- info: AccessTokenInfo
- def __init__(
- self, info: _Optional[_Union[AccessTokenInfo, _Mapping]] = ...
- ) -> None: ...
-
-class ListAccessTokensRequest(_message.Message):
- __slots__ = ("prefix", "start_after", "limit")
- PREFIX_FIELD_NUMBER: _ClassVar[int]
- START_AFTER_FIELD_NUMBER: _ClassVar[int]
- LIMIT_FIELD_NUMBER: _ClassVar[int]
- prefix: str
- start_after: str
- limit: int
- def __init__(
- self,
- prefix: _Optional[str] = ...,
- start_after: _Optional[str] = ...,
- limit: _Optional[int] = ...,
- ) -> None: ...
-
-class ListAccessTokensResponse(_message.Message):
- __slots__ = ("access_tokens", "has_more")
- ACCESS_TOKENS_FIELD_NUMBER: _ClassVar[int]
- HAS_MORE_FIELD_NUMBER: _ClassVar[int]
- access_tokens: _containers.RepeatedCompositeFieldContainer[AccessTokenInfo]
- has_more: bool
- def __init__(
- self,
- access_tokens: _Optional[_Iterable[_Union[AccessTokenInfo, _Mapping]]] = ...,
- has_more: bool = ...,
- ) -> None: ...
-
-class AccessTokenInfo(_message.Message):
- __slots__ = ("id", "expires_at", "auto_prefix_streams", "scope")
- ID_FIELD_NUMBER: _ClassVar[int]
- EXPIRES_AT_FIELD_NUMBER: _ClassVar[int]
- AUTO_PREFIX_STREAMS_FIELD_NUMBER: _ClassVar[int]
- SCOPE_FIELD_NUMBER: _ClassVar[int]
- id: str
- expires_at: int
- auto_prefix_streams: bool
- scope: AccessTokenScope
- def __init__(
- self,
- id: _Optional[str] = ...,
- expires_at: _Optional[int] = ...,
- auto_prefix_streams: bool = ...,
- scope: _Optional[_Union[AccessTokenScope, _Mapping]] = ...,
- ) -> None: ...
-
-class AccessTokenScope(_message.Message):
- __slots__ = ("basins", "streams", "access_tokens", "op_groups", "ops")
- BASINS_FIELD_NUMBER: _ClassVar[int]
- STREAMS_FIELD_NUMBER: _ClassVar[int]
- ACCESS_TOKENS_FIELD_NUMBER: _ClassVar[int]
- OP_GROUPS_FIELD_NUMBER: _ClassVar[int]
- OPS_FIELD_NUMBER: _ClassVar[int]
- basins: ResourceSet
- streams: ResourceSet
- access_tokens: ResourceSet
- op_groups: PermittedOperationGroups
- ops: _containers.RepeatedScalarFieldContainer[Operation]
- def __init__(
- self,
- basins: _Optional[_Union[ResourceSet, _Mapping]] = ...,
- streams: _Optional[_Union[ResourceSet, _Mapping]] = ...,
- access_tokens: _Optional[_Union[ResourceSet, _Mapping]] = ...,
- op_groups: _Optional[_Union[PermittedOperationGroups, _Mapping]] = ...,
- ops: _Optional[_Iterable[_Union[Operation, str]]] = ...,
- ) -> None: ...
-
-class ResourceSet(_message.Message):
- __slots__ = ("exact", "prefix")
- EXACT_FIELD_NUMBER: _ClassVar[int]
- PREFIX_FIELD_NUMBER: _ClassVar[int]
- exact: str
- prefix: str
- def __init__(
- self, exact: _Optional[str] = ..., prefix: _Optional[str] = ...
- ) -> None: ...
-
-class IssueAccessTokenResponse(_message.Message):
- __slots__ = ("access_token",)
- ACCESS_TOKEN_FIELD_NUMBER: _ClassVar[int]
- access_token: str
- def __init__(self, access_token: _Optional[str] = ...) -> None: ...
-
-class StreamInfo(_message.Message):
- __slots__ = ("name", "created_at", "deleted_at")
- NAME_FIELD_NUMBER: _ClassVar[int]
- CREATED_AT_FIELD_NUMBER: _ClassVar[int]
- DELETED_AT_FIELD_NUMBER: _ClassVar[int]
- name: str
- created_at: int
- deleted_at: int
- def __init__(
- self,
- name: _Optional[str] = ...,
- created_at: _Optional[int] = ...,
- deleted_at: _Optional[int] = ...,
- ) -> None: ...
-
-class ListStreamsRequest(_message.Message):
- __slots__ = ("prefix", "start_after", "limit")
- PREFIX_FIELD_NUMBER: _ClassVar[int]
- START_AFTER_FIELD_NUMBER: _ClassVar[int]
- LIMIT_FIELD_NUMBER: _ClassVar[int]
- prefix: str
- start_after: str
- limit: int
- def __init__(
- self,
- prefix: _Optional[str] = ...,
- start_after: _Optional[str] = ...,
- limit: _Optional[int] = ...,
- ) -> None: ...
-
-class ListStreamsResponse(_message.Message):
- __slots__ = ("streams", "has_more")
- STREAMS_FIELD_NUMBER: _ClassVar[int]
- HAS_MORE_FIELD_NUMBER: _ClassVar[int]
- streams: _containers.RepeatedCompositeFieldContainer[StreamInfo]
- has_more: bool
- def __init__(
- self,
- streams: _Optional[_Iterable[_Union[StreamInfo, _Mapping]]] = ...,
- has_more: bool = ...,
- ) -> None: ...
-
-class CreateStreamRequest(_message.Message):
- __slots__ = ("stream", "config")
- STREAM_FIELD_NUMBER: _ClassVar[int]
- CONFIG_FIELD_NUMBER: _ClassVar[int]
- stream: str
- config: StreamConfig
- def __init__(
- self,
- stream: _Optional[str] = ...,
- config: _Optional[_Union[StreamConfig, _Mapping]] = ...,
- ) -> None: ...
-
-class CreateStreamResponse(_message.Message):
- __slots__ = ("info",)
- INFO_FIELD_NUMBER: _ClassVar[int]
- info: StreamInfo
- def __init__(self, info: _Optional[_Union[StreamInfo, _Mapping]] = ...) -> None: ...
-
-class DeleteStreamRequest(_message.Message):
- __slots__ = ("stream",)
- STREAM_FIELD_NUMBER: _ClassVar[int]
- stream: str
- def __init__(self, stream: _Optional[str] = ...) -> None: ...
-
-class DeleteStreamResponse(_message.Message):
- __slots__ = ()
- def __init__(self) -> None: ...
-
-class GetStreamConfigRequest(_message.Message):
- __slots__ = ("stream",)
- STREAM_FIELD_NUMBER: _ClassVar[int]
- stream: str
- def __init__(self, stream: _Optional[str] = ...) -> None: ...
-
-class GetStreamConfigResponse(_message.Message):
- __slots__ = ("config",)
- CONFIG_FIELD_NUMBER: _ClassVar[int]
- config: StreamConfig
- def __init__(
- self, config: _Optional[_Union[StreamConfig, _Mapping]] = ...
- ) -> None: ...
-
-class ReconfigureStreamRequest(_message.Message):
- __slots__ = ("stream", "config", "mask")
- STREAM_FIELD_NUMBER: _ClassVar[int]
- CONFIG_FIELD_NUMBER: _ClassVar[int]
- MASK_FIELD_NUMBER: _ClassVar[int]
- stream: str
- config: StreamConfig
- mask: _field_mask_pb2.FieldMask
- def __init__(
- self,
- stream: _Optional[str] = ...,
- config: _Optional[_Union[StreamConfig, _Mapping]] = ...,
- mask: _Optional[_Union[_field_mask_pb2.FieldMask, _Mapping]] = ...,
- ) -> None: ...
-
-class ReconfigureStreamResponse(_message.Message):
- __slots__ = ("config",)
- CONFIG_FIELD_NUMBER: _ClassVar[int]
- config: StreamConfig
- def __init__(
- self, config: _Optional[_Union[StreamConfig, _Mapping]] = ...
- ) -> None: ...
-
-class CheckTailRequest(_message.Message):
- __slots__ = ("stream",)
- STREAM_FIELD_NUMBER: _ClassVar[int]
- stream: str
- def __init__(self, stream: _Optional[str] = ...) -> None: ...
-
-class CheckTailResponse(_message.Message):
- __slots__ = ("next_seq_num", "last_timestamp")
- NEXT_SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- LAST_TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
- next_seq_num: int
- last_timestamp: int
- def __init__(
- self, next_seq_num: _Optional[int] = ..., last_timestamp: _Optional[int] = ...
- ) -> None: ...
-
-class AppendInput(_message.Message):
- __slots__ = ("stream", "records", "match_seq_num", "fencing_token")
- STREAM_FIELD_NUMBER: _ClassVar[int]
- RECORDS_FIELD_NUMBER: _ClassVar[int]
- MATCH_SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- FENCING_TOKEN_FIELD_NUMBER: _ClassVar[int]
- stream: str
- records: _containers.RepeatedCompositeFieldContainer[AppendRecord]
- match_seq_num: int
- fencing_token: str
- def __init__(
- self,
- stream: _Optional[str] = ...,
- records: _Optional[_Iterable[_Union[AppendRecord, _Mapping]]] = ...,
- match_seq_num: _Optional[int] = ...,
- fencing_token: _Optional[str] = ...,
- ) -> None: ...
-
-class AppendOutput(_message.Message):
- __slots__ = (
- "start_seq_num",
- "start_timestamp",
- "end_seq_num",
- "end_timestamp",
- "next_seq_num",
- "last_timestamp",
- )
- START_SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- START_TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
- END_SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- END_TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
- NEXT_SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- LAST_TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
- start_seq_num: int
- start_timestamp: int
- end_seq_num: int
- end_timestamp: int
- next_seq_num: int
- last_timestamp: int
- def __init__(
- self,
- start_seq_num: _Optional[int] = ...,
- start_timestamp: _Optional[int] = ...,
- end_seq_num: _Optional[int] = ...,
- end_timestamp: _Optional[int] = ...,
- next_seq_num: _Optional[int] = ...,
- last_timestamp: _Optional[int] = ...,
- ) -> None: ...
-
-class AppendRequest(_message.Message):
- __slots__ = ("input",)
- INPUT_FIELD_NUMBER: _ClassVar[int]
- input: AppendInput
- def __init__(
- self, input: _Optional[_Union[AppendInput, _Mapping]] = ...
- ) -> None: ...
-
-class AppendResponse(_message.Message):
- __slots__ = ("output",)
- OUTPUT_FIELD_NUMBER: _ClassVar[int]
- output: AppendOutput
- def __init__(
- self, output: _Optional[_Union[AppendOutput, _Mapping]] = ...
- ) -> None: ...
-
-class AppendSessionRequest(_message.Message):
- __slots__ = ("input",)
- INPUT_FIELD_NUMBER: _ClassVar[int]
- input: AppendInput
- def __init__(
- self, input: _Optional[_Union[AppendInput, _Mapping]] = ...
- ) -> None: ...
-
-class AppendSessionResponse(_message.Message):
- __slots__ = ("output",)
- OUTPUT_FIELD_NUMBER: _ClassVar[int]
- output: AppendOutput
- def __init__(
- self, output: _Optional[_Union[AppendOutput, _Mapping]] = ...
- ) -> None: ...
-
-class ReadOutput(_message.Message):
- __slots__ = ("batch", "next_seq_num")
- BATCH_FIELD_NUMBER: _ClassVar[int]
- NEXT_SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- batch: SequencedRecordBatch
- next_seq_num: int
- def __init__(
- self,
- batch: _Optional[_Union[SequencedRecordBatch, _Mapping]] = ...,
- next_seq_num: _Optional[int] = ...,
- ) -> None: ...
-
-class ReadRequest(_message.Message):
- __slots__ = (
- "stream",
- "seq_num",
- "timestamp",
- "tail_offset",
- "limit",
- "until",
- "clamp",
- )
- STREAM_FIELD_NUMBER: _ClassVar[int]
- SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
- TAIL_OFFSET_FIELD_NUMBER: _ClassVar[int]
- LIMIT_FIELD_NUMBER: _ClassVar[int]
- UNTIL_FIELD_NUMBER: _ClassVar[int]
- CLAMP_FIELD_NUMBER: _ClassVar[int]
- stream: str
- seq_num: int
- timestamp: int
- tail_offset: int
- limit: ReadLimit
- until: int
- clamp: bool
- def __init__(
- self,
- stream: _Optional[str] = ...,
- seq_num: _Optional[int] = ...,
- timestamp: _Optional[int] = ...,
- tail_offset: _Optional[int] = ...,
- limit: _Optional[_Union[ReadLimit, _Mapping]] = ...,
- until: _Optional[int] = ...,
- clamp: bool = ...,
- ) -> None: ...
-
-class ReadResponse(_message.Message):
- __slots__ = ("output",)
- OUTPUT_FIELD_NUMBER: _ClassVar[int]
- output: ReadOutput
- def __init__(
- self, output: _Optional[_Union[ReadOutput, _Mapping]] = ...
- ) -> None: ...
-
-class ReadLimit(_message.Message):
- __slots__ = ("count", "bytes")
- COUNT_FIELD_NUMBER: _ClassVar[int]
- BYTES_FIELD_NUMBER: _ClassVar[int]
- count: int
- bytes: int
- def __init__(
- self, count: _Optional[int] = ..., bytes: _Optional[int] = ...
- ) -> None: ...
-
-class ReadSessionRequest(_message.Message):
- __slots__ = (
- "stream",
- "seq_num",
- "timestamp",
- "tail_offset",
- "limit",
- "heartbeats",
- "until",
- "clamp",
- )
- STREAM_FIELD_NUMBER: _ClassVar[int]
- SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
- TAIL_OFFSET_FIELD_NUMBER: _ClassVar[int]
- LIMIT_FIELD_NUMBER: _ClassVar[int]
- HEARTBEATS_FIELD_NUMBER: _ClassVar[int]
- UNTIL_FIELD_NUMBER: _ClassVar[int]
- CLAMP_FIELD_NUMBER: _ClassVar[int]
- stream: str
- seq_num: int
- timestamp: int
- tail_offset: int
- limit: ReadLimit
- heartbeats: bool
- until: int
- clamp: bool
- def __init__(
- self,
- stream: _Optional[str] = ...,
- seq_num: _Optional[int] = ...,
- timestamp: _Optional[int] = ...,
- tail_offset: _Optional[int] = ...,
- limit: _Optional[_Union[ReadLimit, _Mapping]] = ...,
- heartbeats: bool = ...,
- until: _Optional[int] = ...,
- clamp: bool = ...,
- ) -> None: ...
-
-class ReadSessionResponse(_message.Message):
- __slots__ = ("output",)
- OUTPUT_FIELD_NUMBER: _ClassVar[int]
- output: ReadOutput
- def __init__(
- self, output: _Optional[_Union[ReadOutput, _Mapping]] = ...
- ) -> None: ...
-
-class StreamConfig(_message.Message):
- __slots__ = ("storage_class", "age", "infinite", "timestamping", "delete_on_empty")
- class Timestamping(_message.Message):
- __slots__ = ("mode", "uncapped")
- MODE_FIELD_NUMBER: _ClassVar[int]
- UNCAPPED_FIELD_NUMBER: _ClassVar[int]
- mode: TimestampingMode
- uncapped: bool
- def __init__(
- self,
- mode: _Optional[_Union[TimestampingMode, str]] = ...,
- uncapped: bool = ...,
- ) -> None: ...
-
- class DeleteOnEmpty(_message.Message):
- __slots__ = ("min_age_secs",)
- MIN_AGE_SECS_FIELD_NUMBER: _ClassVar[int]
- min_age_secs: int
- def __init__(self, min_age_secs: _Optional[int] = ...) -> None: ...
-
- class InfiniteRetention(_message.Message):
- __slots__ = ()
- def __init__(self) -> None: ...
-
- STORAGE_CLASS_FIELD_NUMBER: _ClassVar[int]
- AGE_FIELD_NUMBER: _ClassVar[int]
- INFINITE_FIELD_NUMBER: _ClassVar[int]
- TIMESTAMPING_FIELD_NUMBER: _ClassVar[int]
- DELETE_ON_EMPTY_FIELD_NUMBER: _ClassVar[int]
- storage_class: StorageClass
- age: int
- infinite: StreamConfig.InfiniteRetention
- timestamping: StreamConfig.Timestamping
- delete_on_empty: StreamConfig.DeleteOnEmpty
- def __init__(
- self,
- storage_class: _Optional[_Union[StorageClass, str]] = ...,
- age: _Optional[int] = ...,
- infinite: _Optional[_Union[StreamConfig.InfiniteRetention, _Mapping]] = ...,
- timestamping: _Optional[_Union[StreamConfig.Timestamping, _Mapping]] = ...,
- delete_on_empty: _Optional[_Union[StreamConfig.DeleteOnEmpty, _Mapping]] = ...,
- ) -> None: ...
-
-class BasinConfig(_message.Message):
- __slots__ = (
- "default_stream_config",
- "create_stream_on_append",
- "create_stream_on_read",
- )
- DEFAULT_STREAM_CONFIG_FIELD_NUMBER: _ClassVar[int]
- CREATE_STREAM_ON_APPEND_FIELD_NUMBER: _ClassVar[int]
- CREATE_STREAM_ON_READ_FIELD_NUMBER: _ClassVar[int]
- default_stream_config: StreamConfig
- create_stream_on_append: bool
- create_stream_on_read: bool
- def __init__(
- self,
- default_stream_config: _Optional[_Union[StreamConfig, _Mapping]] = ...,
- create_stream_on_append: bool = ...,
- create_stream_on_read: bool = ...,
- ) -> None: ...
-
-class BasinInfo(_message.Message):
- __slots__ = ("name", "scope", "state")
- NAME_FIELD_NUMBER: _ClassVar[int]
- SCOPE_FIELD_NUMBER: _ClassVar[int]
- STATE_FIELD_NUMBER: _ClassVar[int]
- name: str
- scope: BasinScope
- state: BasinState
- def __init__(
- self,
- name: _Optional[str] = ...,
- scope: _Optional[_Union[BasinScope, str]] = ...,
- state: _Optional[_Union[BasinState, str]] = ...,
- ) -> None: ...
-
-class Header(_message.Message):
- __slots__ = ("name", "value")
- NAME_FIELD_NUMBER: _ClassVar[int]
- VALUE_FIELD_NUMBER: _ClassVar[int]
- name: bytes
- value: bytes
- def __init__(
- self, name: _Optional[bytes] = ..., value: _Optional[bytes] = ...
- ) -> None: ...
-
-class AppendRecord(_message.Message):
- __slots__ = ("timestamp", "headers", "body")
- TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
- HEADERS_FIELD_NUMBER: _ClassVar[int]
- BODY_FIELD_NUMBER: _ClassVar[int]
- timestamp: int
- headers: _containers.RepeatedCompositeFieldContainer[Header]
- body: bytes
- def __init__(
- self,
- timestamp: _Optional[int] = ...,
- headers: _Optional[_Iterable[_Union[Header, _Mapping]]] = ...,
- body: _Optional[bytes] = ...,
- ) -> None: ...
-
-class SequencedRecord(_message.Message):
- __slots__ = ("seq_num", "timestamp", "headers", "body")
- SEQ_NUM_FIELD_NUMBER: _ClassVar[int]
- TIMESTAMP_FIELD_NUMBER: _ClassVar[int]
- HEADERS_FIELD_NUMBER: _ClassVar[int]
- BODY_FIELD_NUMBER: _ClassVar[int]
- seq_num: int
- timestamp: int
- headers: _containers.RepeatedCompositeFieldContainer[Header]
- body: bytes
- def __init__(
- self,
- seq_num: _Optional[int] = ...,
- timestamp: _Optional[int] = ...,
- headers: _Optional[_Iterable[_Union[Header, _Mapping]]] = ...,
- body: _Optional[bytes] = ...,
- ) -> None: ...
-
-class SequencedRecordBatch(_message.Message):
- __slots__ = ("records",)
- RECORDS_FIELD_NUMBER: _ClassVar[int]
- records: _containers.RepeatedCompositeFieldContainer[SequencedRecord]
- def __init__(
- self, records: _Optional[_Iterable[_Union[SequencedRecord, _Mapping]]] = ...
- ) -> None: ...
diff --git a/src/streamstore/_lib/s2/v1alpha/s2_pb2_grpc.py b/src/streamstore/_lib/s2/v1alpha/s2_pb2_grpc.py
deleted file mode 100644
index bc2f816..0000000
--- a/src/streamstore/_lib/s2/v1alpha/s2_pb2_grpc.py
+++ /dev/null
@@ -1,980 +0,0 @@
-# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
-"""Client and server classes corresponding to protobuf-defined services."""
-
-import grpc
-
-from streamstore._lib.s2.v1alpha import s2_pb2 as s2_dot_v1alpha_dot_s2__pb2
-
-GRPC_GENERATED_VERSION = "1.69.0"
-GRPC_VERSION = grpc.__version__
-_version_not_supported = False
-
-try:
- from grpc._utilities import first_version_is_lower
-
- _version_not_supported = first_version_is_lower(
- GRPC_VERSION, GRPC_GENERATED_VERSION
- )
-except ImportError:
- _version_not_supported = True
-
-if _version_not_supported:
- raise RuntimeError(
- f"The grpc package installed is at version {GRPC_VERSION},"
- + " but the generated code in s2/v1alpha/s2_pb2_grpc.py depends on"
- + f" grpcio>={GRPC_GENERATED_VERSION}."
- + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
- + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
- )
-
-
-class AccountServiceStub(object):
- """Operate on an S2 account."""
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.ListBasins = channel.unary_unary(
- "/s2.v1alpha.AccountService/ListBasins",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.ListBasinsRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.ListBasinsResponse.FromString,
- _registered_method=True,
- )
- self.CreateBasin = channel.unary_unary(
- "/s2.v1alpha.AccountService/CreateBasin",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.CreateBasinRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.CreateBasinResponse.FromString,
- _registered_method=True,
- )
- self.DeleteBasin = channel.unary_unary(
- "/s2.v1alpha.AccountService/DeleteBasin",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.DeleteBasinRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.DeleteBasinResponse.FromString,
- _registered_method=True,
- )
- self.ReconfigureBasin = channel.unary_unary(
- "/s2.v1alpha.AccountService/ReconfigureBasin",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.ReconfigureBasinRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.ReconfigureBasinResponse.FromString,
- _registered_method=True,
- )
- self.GetBasinConfig = channel.unary_unary(
- "/s2.v1alpha.AccountService/GetBasinConfig",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.GetBasinConfigRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.GetBasinConfigResponse.FromString,
- _registered_method=True,
- )
- self.IssueAccessToken = channel.unary_unary(
- "/s2.v1alpha.AccountService/IssueAccessToken",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.IssueAccessTokenRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.IssueAccessTokenResponse.FromString,
- _registered_method=True,
- )
- self.RevokeAccessToken = channel.unary_unary(
- "/s2.v1alpha.AccountService/RevokeAccessToken",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.RevokeAccessTokenRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.RevokeAccessTokenResponse.FromString,
- _registered_method=True,
- )
- self.ListAccessTokens = channel.unary_unary(
- "/s2.v1alpha.AccountService/ListAccessTokens",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.ListAccessTokensRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.ListAccessTokensResponse.FromString,
- _registered_method=True,
- )
-
-
-class AccountServiceServicer(object):
- """Operate on an S2 account."""
-
- def ListBasins(self, request, context):
- """List basins."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def CreateBasin(self, request, context):
- """Create a new basin.
- Provide a client request token with the `S2-Request-Token` header for idempotent retry behaviour.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def DeleteBasin(self, request, context):
- """Delete a basin.
- Basin deletion is asynchronous, and may take a few minutes to complete.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def ReconfigureBasin(self, request, context):
- """Update basin configuration."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def GetBasinConfig(self, request, context):
- """Get basin configuration."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def IssueAccessToken(self, request, context):
- """Issue a new access token."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def RevokeAccessToken(self, request, context):
- """Revoke an access token."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def ListAccessTokens(self, request, context):
- """List access tokens."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
-
-def add_AccountServiceServicer_to_server(servicer, server):
- rpc_method_handlers = {
- "ListBasins": grpc.unary_unary_rpc_method_handler(
- servicer.ListBasins,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.ListBasinsRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.ListBasinsResponse.SerializeToString,
- ),
- "CreateBasin": grpc.unary_unary_rpc_method_handler(
- servicer.CreateBasin,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.CreateBasinRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.CreateBasinResponse.SerializeToString,
- ),
- "DeleteBasin": grpc.unary_unary_rpc_method_handler(
- servicer.DeleteBasin,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.DeleteBasinRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.DeleteBasinResponse.SerializeToString,
- ),
- "ReconfigureBasin": grpc.unary_unary_rpc_method_handler(
- servicer.ReconfigureBasin,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.ReconfigureBasinRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.ReconfigureBasinResponse.SerializeToString,
- ),
- "GetBasinConfig": grpc.unary_unary_rpc_method_handler(
- servicer.GetBasinConfig,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.GetBasinConfigRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.GetBasinConfigResponse.SerializeToString,
- ),
- "IssueAccessToken": grpc.unary_unary_rpc_method_handler(
- servicer.IssueAccessToken,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.IssueAccessTokenRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.IssueAccessTokenResponse.SerializeToString,
- ),
- "RevokeAccessToken": grpc.unary_unary_rpc_method_handler(
- servicer.RevokeAccessToken,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.RevokeAccessTokenRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.RevokeAccessTokenResponse.SerializeToString,
- ),
- "ListAccessTokens": grpc.unary_unary_rpc_method_handler(
- servicer.ListAccessTokens,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.ListAccessTokensRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.ListAccessTokensResponse.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- "s2.v1alpha.AccountService", rpc_method_handlers
- )
- server.add_generic_rpc_handlers((generic_handler,))
- server.add_registered_method_handlers(
- "s2.v1alpha.AccountService", rpc_method_handlers
- )
-
-
-# This class is part of an EXPERIMENTAL API.
-class AccountService(object):
- """Operate on an S2 account."""
-
- @staticmethod
- def ListBasins(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.AccountService/ListBasins",
- s2_dot_v1alpha_dot_s2__pb2.ListBasinsRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.ListBasinsResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def CreateBasin(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.AccountService/CreateBasin",
- s2_dot_v1alpha_dot_s2__pb2.CreateBasinRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.CreateBasinResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def DeleteBasin(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.AccountService/DeleteBasin",
- s2_dot_v1alpha_dot_s2__pb2.DeleteBasinRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.DeleteBasinResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def ReconfigureBasin(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.AccountService/ReconfigureBasin",
- s2_dot_v1alpha_dot_s2__pb2.ReconfigureBasinRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.ReconfigureBasinResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def GetBasinConfig(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.AccountService/GetBasinConfig",
- s2_dot_v1alpha_dot_s2__pb2.GetBasinConfigRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.GetBasinConfigResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def IssueAccessToken(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.AccountService/IssueAccessToken",
- s2_dot_v1alpha_dot_s2__pb2.IssueAccessTokenRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.IssueAccessTokenResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def RevokeAccessToken(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.AccountService/RevokeAccessToken",
- s2_dot_v1alpha_dot_s2__pb2.RevokeAccessTokenRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.RevokeAccessTokenResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def ListAccessTokens(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.AccountService/ListAccessTokens",
- s2_dot_v1alpha_dot_s2__pb2.ListAccessTokensRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.ListAccessTokensResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
-
-class BasinServiceStub(object):
- """Operate on an S2 basin."""
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.ListStreams = channel.unary_unary(
- "/s2.v1alpha.BasinService/ListStreams",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.ListStreamsRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.ListStreamsResponse.FromString,
- _registered_method=True,
- )
- self.CreateStream = channel.unary_unary(
- "/s2.v1alpha.BasinService/CreateStream",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.CreateStreamRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.CreateStreamResponse.FromString,
- _registered_method=True,
- )
- self.DeleteStream = channel.unary_unary(
- "/s2.v1alpha.BasinService/DeleteStream",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.DeleteStreamRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.DeleteStreamResponse.FromString,
- _registered_method=True,
- )
- self.GetStreamConfig = channel.unary_unary(
- "/s2.v1alpha.BasinService/GetStreamConfig",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.GetStreamConfigRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.GetStreamConfigResponse.FromString,
- _registered_method=True,
- )
- self.ReconfigureStream = channel.unary_unary(
- "/s2.v1alpha.BasinService/ReconfigureStream",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.ReconfigureStreamRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.ReconfigureStreamResponse.FromString,
- _registered_method=True,
- )
-
-
-class BasinServiceServicer(object):
- """Operate on an S2 basin."""
-
- def ListStreams(self, request, context):
- """List streams."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def CreateStream(self, request, context):
- """Create a stream.
- Provide a client request token with the `S2-Request-Token` header for idempotent retry behaviour.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def DeleteStream(self, request, context):
- """Delete a stream.
- Stream deletion is asynchronous, and may take a few minutes to complete.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def GetStreamConfig(self, request, context):
- """Get stream configuration."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def ReconfigureStream(self, request, context):
- """Update stream configuration."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
-
-def add_BasinServiceServicer_to_server(servicer, server):
- rpc_method_handlers = {
- "ListStreams": grpc.unary_unary_rpc_method_handler(
- servicer.ListStreams,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.ListStreamsRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.ListStreamsResponse.SerializeToString,
- ),
- "CreateStream": grpc.unary_unary_rpc_method_handler(
- servicer.CreateStream,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.CreateStreamRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.CreateStreamResponse.SerializeToString,
- ),
- "DeleteStream": grpc.unary_unary_rpc_method_handler(
- servicer.DeleteStream,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.DeleteStreamRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.DeleteStreamResponse.SerializeToString,
- ),
- "GetStreamConfig": grpc.unary_unary_rpc_method_handler(
- servicer.GetStreamConfig,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.GetStreamConfigRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.GetStreamConfigResponse.SerializeToString,
- ),
- "ReconfigureStream": grpc.unary_unary_rpc_method_handler(
- servicer.ReconfigureStream,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.ReconfigureStreamRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.ReconfigureStreamResponse.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- "s2.v1alpha.BasinService", rpc_method_handlers
- )
- server.add_generic_rpc_handlers((generic_handler,))
- server.add_registered_method_handlers(
- "s2.v1alpha.BasinService", rpc_method_handlers
- )
-
-
-# This class is part of an EXPERIMENTAL API.
-class BasinService(object):
- """Operate on an S2 basin."""
-
- @staticmethod
- def ListStreams(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.BasinService/ListStreams",
- s2_dot_v1alpha_dot_s2__pb2.ListStreamsRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.ListStreamsResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def CreateStream(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.BasinService/CreateStream",
- s2_dot_v1alpha_dot_s2__pb2.CreateStreamRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.CreateStreamResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def DeleteStream(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.BasinService/DeleteStream",
- s2_dot_v1alpha_dot_s2__pb2.DeleteStreamRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.DeleteStreamResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def GetStreamConfig(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.BasinService/GetStreamConfig",
- s2_dot_v1alpha_dot_s2__pb2.GetStreamConfigRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.GetStreamConfigResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def ReconfigureStream(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.BasinService/ReconfigureStream",
- s2_dot_v1alpha_dot_s2__pb2.ReconfigureStreamRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.ReconfigureStreamResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
-
-class StreamServiceStub(object):
- """Operate on an S2 stream."""
-
- def __init__(self, channel):
- """Constructor.
-
- Args:
- channel: A grpc.Channel.
- """
- self.CheckTail = channel.unary_unary(
- "/s2.v1alpha.StreamService/CheckTail",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.CheckTailRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.CheckTailResponse.FromString,
- _registered_method=True,
- )
- self.Append = channel.unary_unary(
- "/s2.v1alpha.StreamService/Append",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.AppendRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.AppendResponse.FromString,
- _registered_method=True,
- )
- self.AppendSession = channel.stream_stream(
- "/s2.v1alpha.StreamService/AppendSession",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.AppendSessionRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.AppendSessionResponse.FromString,
- _registered_method=True,
- )
- self.Read = channel.unary_unary(
- "/s2.v1alpha.StreamService/Read",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.ReadRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.ReadResponse.FromString,
- _registered_method=True,
- )
- self.ReadSession = channel.unary_stream(
- "/s2.v1alpha.StreamService/ReadSession",
- request_serializer=s2_dot_v1alpha_dot_s2__pb2.ReadSessionRequest.SerializeToString,
- response_deserializer=s2_dot_v1alpha_dot_s2__pb2.ReadSessionResponse.FromString,
- _registered_method=True,
- )
-
-
-class StreamServiceServicer(object):
- """Operate on an S2 stream."""
-
- def CheckTail(self, request, context):
- """Check the tail of the stream."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def Append(self, request, context):
- """Append a batch of records to a stream."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def AppendSession(self, request_iterator, context):
- """Append batches of records to a stream continuously, while guaranteeing pipelined requests are processed in order.
- If any request fails, the session is terminated.
- """
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def Read(self, request, context):
- """Retrieve a batch of records from a stream."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
- def ReadSession(self, request, context):
- """Retrieve batches of records from a stream continuously."""
- context.set_code(grpc.StatusCode.UNIMPLEMENTED)
- context.set_details("Method not implemented!")
- raise NotImplementedError("Method not implemented!")
-
-
-def add_StreamServiceServicer_to_server(servicer, server):
- rpc_method_handlers = {
- "CheckTail": grpc.unary_unary_rpc_method_handler(
- servicer.CheckTail,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.CheckTailRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.CheckTailResponse.SerializeToString,
- ),
- "Append": grpc.unary_unary_rpc_method_handler(
- servicer.Append,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.AppendRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.AppendResponse.SerializeToString,
- ),
- "AppendSession": grpc.stream_stream_rpc_method_handler(
- servicer.AppendSession,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.AppendSessionRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.AppendSessionResponse.SerializeToString,
- ),
- "Read": grpc.unary_unary_rpc_method_handler(
- servicer.Read,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.ReadRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.ReadResponse.SerializeToString,
- ),
- "ReadSession": grpc.unary_stream_rpc_method_handler(
- servicer.ReadSession,
- request_deserializer=s2_dot_v1alpha_dot_s2__pb2.ReadSessionRequest.FromString,
- response_serializer=s2_dot_v1alpha_dot_s2__pb2.ReadSessionResponse.SerializeToString,
- ),
- }
- generic_handler = grpc.method_handlers_generic_handler(
- "s2.v1alpha.StreamService", rpc_method_handlers
- )
- server.add_generic_rpc_handlers((generic_handler,))
- server.add_registered_method_handlers(
- "s2.v1alpha.StreamService", rpc_method_handlers
- )
-
-
-# This class is part of an EXPERIMENTAL API.
-class StreamService(object):
- """Operate on an S2 stream."""
-
- @staticmethod
- def CheckTail(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.StreamService/CheckTail",
- s2_dot_v1alpha_dot_s2__pb2.CheckTailRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.CheckTailResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def Append(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.StreamService/Append",
- s2_dot_v1alpha_dot_s2__pb2.AppendRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.AppendResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def AppendSession(
- request_iterator,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.stream_stream(
- request_iterator,
- target,
- "/s2.v1alpha.StreamService/AppendSession",
- s2_dot_v1alpha_dot_s2__pb2.AppendSessionRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.AppendSessionResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def Read(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_unary(
- request,
- target,
- "/s2.v1alpha.StreamService/Read",
- s2_dot_v1alpha_dot_s2__pb2.ReadRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.ReadResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
-
- @staticmethod
- def ReadSession(
- request,
- target,
- options=(),
- channel_credentials=None,
- call_credentials=None,
- insecure=False,
- compression=None,
- wait_for_ready=None,
- timeout=None,
- metadata=None,
- ):
- return grpc.experimental.unary_stream(
- request,
- target,
- "/s2.v1alpha.StreamService/ReadSession",
- s2_dot_v1alpha_dot_s2__pb2.ReadSessionRequest.SerializeToString,
- s2_dot_v1alpha_dot_s2__pb2.ReadSessionResponse.FromString,
- options,
- channel_credentials,
- insecure,
- call_credentials,
- compression,
- wait_for_ready,
- timeout,
- metadata,
- _registered_method=True,
- )
diff --git a/src/streamstore/_mappers.py b/src/streamstore/_mappers.py
deleted file mode 100644
index 0a97393..0000000
--- a/src/streamstore/_mappers.py
+++ /dev/null
@@ -1,361 +0,0 @@
-from datetime import datetime
-from typing import Literal, cast
-
-from google.protobuf.internal.containers import RepeatedCompositeFieldContainer
-
-import streamstore._lib.s2.v1alpha.s2_pb2 as msgs
-from streamstore.schemas import (
- AccessTokenInfo,
- AccessTokenScope,
- AppendInput,
- AppendOutput,
- BasinConfig,
- BasinInfo,
- BasinScope,
- BasinState,
- Operation,
- OperationGroupPermissions,
- Permission,
- ReadLimit,
- Record,
- ResourceMatchOp,
- ResourceMatchRule,
- SeqNum,
- SequencedRecord,
- StorageClass,
- StreamConfig,
- StreamInfo,
- TailOffset,
- Timestamp,
- Timestamping,
- TimestampingMode,
-)
-
-_ReadStart = SeqNum | Timestamp | TailOffset
-
-
-def append_record_message(record: Record) -> msgs.AppendRecord:
- headers = [msgs.Header(name=name, value=value) for (name, value) in record.headers]
- return msgs.AppendRecord(
- timestamp=record.timestamp, headers=headers, body=record.body
- )
-
-
-def append_input_message(stream: str, input: AppendInput) -> msgs.AppendInput:
- records = [append_record_message(r) for r in input.records]
- return msgs.AppendInput(
- stream=stream,
- records=records,
- match_seq_num=input.match_seq_num,
- fencing_token=input.fencing_token,
- )
-
-
-def read_request_message(
- stream: str,
- start: _ReadStart,
- limit: ReadLimit | None,
- until: int | None,
-) -> msgs.ReadRequest:
- seq_num, timestamp, tail_offset = _read_start_pos(start)
- return msgs.ReadRequest(
- stream=stream,
- seq_num=seq_num,
- timestamp=timestamp,
- tail_offset=tail_offset,
- limit=_read_limit_message(limit),
- until=until,
- )
-
-
-def read_session_request_message(
- stream: str,
- start: _ReadStart,
- limit: ReadLimit | None,
- until: int | None,
- clamp: bool = False,
-) -> msgs.ReadSessionRequest:
- seq_num, timestamp, tail_offset = _read_start_pos(start)
- return msgs.ReadSessionRequest(
- stream=stream,
- seq_num=seq_num,
- timestamp=timestamp,
- tail_offset=tail_offset,
- limit=_read_limit_message(limit),
- until=until,
- clamp=clamp,
- )
-
-
-def _read_start_pos(start: _ReadStart) -> tuple[int | None, int | None, int | None]:
- seq_num = None
- timestamp = None
- tail_offset = None
- if isinstance(start, SeqNum):
- seq_num = start.value
- elif isinstance(start, Timestamp):
- timestamp = start.value
- elif isinstance(start, TailOffset):
- tail_offset = start.value
- else:
- raise ValueError("start doesn't match any of the expected types")
- return (
- seq_num,
- timestamp,
- tail_offset,
- )
-
-
-def basin_info_schema(info: msgs.BasinInfo) -> BasinInfo:
- return BasinInfo(info.name, BasinScope(info.scope), BasinState(info.state))
-
-
-def stream_info_schema(info: msgs.StreamInfo) -> StreamInfo:
- return StreamInfo(
- info.name,
- datetime.fromtimestamp(info.created_at),
- datetime.fromtimestamp(info.deleted_at) if info.deleted_at != 0 else None,
- )
-
-
-def stream_config_message(
- config: StreamConfig | None = None,
- return_mask_paths: bool = False,
- mask_path_prefix: str = "",
-) -> msgs.StreamConfig | tuple[msgs.StreamConfig, list[str]]:
- paths = []
- stream_config = msgs.StreamConfig()
- if config:
- storage_class = config.storage_class
- retention_policy = config.retention_policy
- timestamping = config.timestamping
- delete_on_empty_min_age = config.delete_on_empty_min_age
- if storage_class is not None:
- paths.append(f"{mask_path_prefix}storage_class")
- stream_config.storage_class = storage_class.value
- if retention_policy is not None:
- paths.append(f"{mask_path_prefix}retention_policy")
- if retention_policy == "infinite":
- stream_config.infinite.CopyFrom(msgs.StreamConfig.InfiniteRetention())
- else:
- stream_config.age = retention_policy
- if timestamping is not None:
- paths.append(f"{mask_path_prefix}timestamping")
- if timestamping.mode is not None:
- paths.append(f"{mask_path_prefix}timestamping.mode")
- stream_config.timestamping.mode = timestamping.mode.value
- if timestamping.uncapped is not None:
- paths.append(f"{mask_path_prefix}timestamping.uncapped")
- stream_config.timestamping.uncapped = timestamping.uncapped
- if delete_on_empty_min_age is not None:
- paths.append(f"{mask_path_prefix}delete_on_empty.min_age_secs")
- stream_config.delete_on_empty.min_age_secs = delete_on_empty_min_age
- if return_mask_paths:
- return (stream_config, paths)
- return stream_config
-
-
-def basin_config_message(
- config: BasinConfig | None = None,
- return_mask_paths: bool = False,
-) -> msgs.BasinConfig | tuple[msgs.BasinConfig, list[str]]:
- paths = []
- basin_config = msgs.BasinConfig()
- if config:
- if return_mask_paths:
- default_stream_config, deep_paths = cast(
- tuple[msgs.StreamConfig, list[str]],
- stream_config_message(
- config.default_stream_config,
- return_mask_paths,
- mask_path_prefix="default_stream_config.",
- ),
- )
- paths.extend(deep_paths)
- else:
- default_stream_config = cast(
- msgs.StreamConfig, stream_config_message(config.default_stream_config)
- )
- basin_config.default_stream_config.CopyFrom(default_stream_config)
- if config.create_stream_on_append is not None:
- basin_config.create_stream_on_append = config.create_stream_on_append
- paths.append("create_stream_on_append")
- if return_mask_paths:
- return (basin_config, paths)
- return basin_config
-
-
-def stream_config_schema(config: msgs.StreamConfig) -> StreamConfig:
- retention_policy: int | Literal["infinite"]
- match config.WhichOneof("retention_policy"):
- case "age":
- retention_policy = config.age
- case "infinite":
- retention_policy = "infinite"
- case _:
- raise RuntimeError(
- "StreamConfig retention_policy doesn't match any of the expected values"
- )
- return StreamConfig(
- StorageClass(config.storage_class),
- retention_policy,
- Timestamping(
- mode=TimestampingMode(config.timestamping.mode),
- uncapped=config.timestamping.uncapped,
- ),
- config.delete_on_empty.min_age_secs,
- )
-
-
-def basin_config_schema(config: msgs.BasinConfig) -> BasinConfig:
- return BasinConfig(
- stream_config_schema(config.default_stream_config),
- config.create_stream_on_append,
- )
-
-
-def append_output_schema(output: msgs.AppendOutput) -> AppendOutput:
- return AppendOutput(
- output.start_seq_num,
- output.start_timestamp,
- output.end_seq_num,
- output.end_timestamp,
- output.next_seq_num,
- output.last_timestamp,
- )
-
-
-def sequenced_records_schema(
- batch: msgs.SequencedRecordBatch, ignore_command_records: bool = False
-) -> list[SequencedRecord]:
- if ignore_command_records:
- return [
- SequencedRecord(
- sr.seq_num,
- sr.body,
- [(h.name, h.value) for h in sr.headers],
- sr.timestamp,
- )
- for sr in batch.records
- if _not_a_command_record(sr.headers)
- ]
- return [
- SequencedRecord(
- sr.seq_num, sr.body, [(h.name, h.value) for h in sr.headers], sr.timestamp
- )
- for sr in batch.records
- ]
-
-
-def access_token_info_message(
- id: str, scope: AccessTokenScope, auto_prefix_streams: bool, expires_at: int | None
-) -> msgs.AccessTokenInfo:
- def resource_set(rule: ResourceMatchRule | None) -> msgs.ResourceSet | None:
- if rule is None:
- return None
- match rule.match_op:
- case ResourceMatchOp.EXACT:
- return msgs.ResourceSet(exact=rule.value)
- case ResourceMatchOp.PREFIX:
- return msgs.ResourceSet(prefix=rule.value)
- case _:
- raise ValueError(
- "ResourceMatchOp doesn't match any of the expected values"
- )
-
- def permissions(perm: Permission) -> msgs.ReadWritePermissions:
- read = False
- write = False
- match perm:
- case Permission.UNSPECIFIED:
- pass
- case Permission.READ:
- read = True
- case Permission.WRITE:
- write = True
- case Permission.READ_WRITE:
- read = True
- write = True
- return msgs.ReadWritePermissions(read=read, write=write)
-
- def permitted_op_groups(
- op_group_perms: OperationGroupPermissions | None,
- ) -> msgs.PermittedOperationGroups | None:
- if op_group_perms is None:
- return None
- return msgs.PermittedOperationGroups(
- account=permissions(op_group_perms.account),
- basin=permissions(op_group_perms.basin),
- stream=permissions(op_group_perms.stream),
- )
-
- return msgs.AccessTokenInfo(
- id=id,
- expires_at=expires_at,
- auto_prefix_streams=auto_prefix_streams,
- scope=msgs.AccessTokenScope(
- basins=resource_set(scope.basins),
- streams=resource_set(scope.streams),
- access_tokens=resource_set(scope.access_tokens),
- op_groups=permitted_op_groups(scope.op_group_perms),
- ops=(op.value for op in scope.ops),
- ),
- )
-
-
-def access_token_info_schema(info: msgs.AccessTokenInfo) -> AccessTokenInfo:
- def resource_match_rule(resource_set: msgs.ResourceSet) -> ResourceMatchRule | None:
- if not resource_set.HasField("matching"):
- return None
- match resource_set.WhichOneof("matching"):
- case "exact":
- return ResourceMatchRule(ResourceMatchOp.EXACT, resource_set.exact)
- case "prefix":
- return ResourceMatchRule(ResourceMatchOp.PREFIX, resource_set.prefix)
- case _:
- raise RuntimeError(
- "ResourceSet matching doesn't match any of the expected values"
- )
-
- def permission(perms: msgs.ReadWritePermissions) -> Permission:
- if perms.read and perms.write:
- return Permission.READ_WRITE
- elif perms.read:
- return Permission.READ
- elif perms.write:
- return Permission.WRITE
- else:
- return Permission.UNSPECIFIED
-
- return AccessTokenInfo(
- id=info.id,
- scope=AccessTokenScope(
- basins=resource_match_rule(info.scope.basins),
- streams=resource_match_rule(info.scope.streams),
- access_tokens=resource_match_rule(info.scope.access_tokens),
- op_group_perms=OperationGroupPermissions(
- account=permission(info.scope.op_groups.account),
- basin=permission(info.scope.op_groups.basin),
- stream=permission(info.scope.op_groups.stream),
- ),
- ops=[Operation(op) for op in info.scope.ops],
- ),
- auto_prefix_streams=info.auto_prefix_streams,
- expires_at=info.expires_at if info.HasField("expires_at") else None,
- )
-
-
-def _read_limit_message(limit: ReadLimit | None) -> msgs.ReadLimit:
- return (
- msgs.ReadLimit(count=limit.count, bytes=limit.bytes)
- if limit
- else msgs.ReadLimit()
- )
-
-
-def _not_a_command_record(
- headers: RepeatedCompositeFieldContainer[msgs.Header],
-) -> bool:
- if len(headers) == 1 and headers[0].name == b"":
- return False
- return True
diff --git a/src/streamstore/_retrier.py b/src/streamstore/_retrier.py
deleted file mode 100644
index e6c7556..0000000
--- a/src/streamstore/_retrier.py
+++ /dev/null
@@ -1,43 +0,0 @@
-import asyncio
-import random
-from dataclasses import dataclass
-from typing import Callable
-
-
-@dataclass(slots=True)
-class Attempt:
- value: int
-
-
-def compute_backoffs(
- attempts: int,
- wait_min: float = 0.1,
- wait_max: float = 5.0,
-) -> list[float]:
- backoffs = []
- for attempt in range(attempts):
- backoffs.append(random.uniform(wait_min, min(wait_max, 2**attempt)))
- return backoffs
-
-
-class Retrier:
- def __init__(
- self,
- should_retry_on: Callable[[Exception], bool],
- max_attempts: int,
- ):
- self.should_retry_on = should_retry_on
- self.max_attempts = max_attempts
-
- async def __call__(self, f: Callable, *args, **kwargs):
- backoffs = compute_backoffs(attempts=self.max_attempts)
- attempt = 0
- while True:
- try:
- return await f(*args, **kwargs)
- except Exception as e:
- if attempt < self.max_attempts and self.should_retry_on(e):
- await asyncio.sleep(backoffs[attempt])
- attempt += 1
- else:
- raise e
diff --git a/src/streamstore/schemas.py b/src/streamstore/schemas.py
deleted file mode 100644
index 34ad5ee..0000000
--- a/src/streamstore/schemas.py
+++ /dev/null
@@ -1,509 +0,0 @@
-__all__ = [
- "Record",
- "AppendInput",
- "AppendOutput",
- "Tail",
- "SeqNum",
- "Timestamp",
- "TailOffset",
- "ReadLimit",
- "SequencedRecord",
- "FirstSeqNum",
- "NextSeqNum",
- "Page",
- "BasinScope",
- "BasinState",
- "BasinInfo",
- "StreamInfo",
- "StorageClass",
- "TimestampingMode",
- "Timestamping",
- "StreamConfig",
- "BasinConfig",
- "ResourceMatchOp",
- "ResourceMatchRule",
- "Permission",
- "OperationGroupPermissions",
- "Operation",
- "AccessTokenScope",
- "AccessTokenInfo",
- "Cloud",
- "Endpoints",
-]
-
-import os
-from dataclasses import dataclass, field
-from datetime import datetime
-from enum import Enum
-from typing import Generic, Literal, TypeVar
-
-from streamstore._exceptions import fallible
-
-T = TypeVar("T")
-
-ONE_MIB = 1024 * 1024
-
-
-class DocEnum(Enum):
- def __new__(cls, value, doc=None):
- self = object.__new__(cls)
- self._value_ = value
- if doc is not None:
- self.__doc__ = doc
- return self
-
-
-@dataclass(slots=True)
-class Record:
- """
- Record to be appended to a stream.
- """
-
- #: Body of this record.
- body: bytes
- #: Series of name-value pairs for this record.
- headers: list[tuple[bytes, bytes]] = field(default_factory=list)
- #: Timestamp for this record.
- #:
- #: Precise semantics depend on :attr:`.StreamConfig.timestamping`.
- timestamp: int | None = None
-
-
-@dataclass(slots=True)
-class AppendInput:
- """
- Used in the parameters to :meth:`.Stream.append` and :meth:`.Stream.append_session`.
- """
-
- #: Batch of records to append atomically, which must contain at least one record,
- #: and no more than 1000. The size of the batch must not exceed 1MiB of :func:`.metered_bytes`.
- records: list[Record]
- #: Enforce that the sequence number issued to the first record in the batch matches this value.
- match_seq_num: int | None = None
- #: Enforce a fencing token, which must have been previously set by a ``fence`` command record.
- fencing_token: str | None = None
-
-
-@dataclass(slots=True)
-class AppendOutput:
- """
- Returned from :meth:`.Stream.append`.
-
- (or)
-
- Yielded from :meth:`.Stream.append_session`.
- """
-
- #: Sequence number of the first appended record.
- start_seq_num: int
- #: Timestamp of the first appended record.
- start_timestamp: int
- #: Sequence number of the last appended record + 1.
- #: ``end_seq_num - start_seq_num`` will be the number of records in the batch.
- end_seq_num: int
- #: Timestamp of the last appended record.
- end_timestamp: int
- #: Sequence number of the last durable record on the stream + 1.
- #: This can be greater than ``end_seq_num`` in case of concurrent appends.
- next_seq_num: int
- #: Timestamp of the last durable record on the stream.
- last_timestamp: int
-
-
-@dataclass(slots=True)
-class Tail:
- """
- Tail of a stream.
- """
-
- #: Sequence number of the last durable record on the stream + 1.
- next_seq_num: int
- #: Timestamp of the last durable record on the stream.
- last_timestamp: int
-
-
-@dataclass(slots=True)
-class ReadLimit:
- """
- Used in the parameters to :meth:`.Stream.read` and :meth:`.Stream.read_session`.
-
- If both ``count`` and ``bytes`` are specified, either limit may be hit.
- """
-
- #: Number of records.
- count: int | None = None
- #: Cumulative size of records calculated using :func:`.metered_bytes`.
- bytes: int | None = None
-
-
-@dataclass(slots=True)
-class SequencedRecord:
- """
- Record read from a stream.
- """
-
- #: Sequence number assigned to this record.
- seq_num: int
- #: Body of this record.
- body: bytes
- #: Series of name-value pairs for this record.
- headers: list[tuple[bytes, bytes]]
- #: Timestamp for this record.
- timestamp: int
-
-
-@dataclass(slots=True)
-class SeqNum:
- value: int
-
-
-@dataclass(slots=True)
-class Timestamp:
- value: int
-
-
-@dataclass(slots=True)
-class TailOffset:
- """
- Number of records before the tail.
- """
-
- value: int
-
-
-@dataclass(slots=True)
-class FirstSeqNum:
- value: int
-
-
-@dataclass(slots=True)
-class NextSeqNum:
- value: int
-
-
-@dataclass(slots=True)
-class Page(Generic[T]):
- """
- Page of items.
- """
-
- #: List of items of any type T.
- items: list[T]
- #: If ``True``, it means that there are more pages.
- has_more: bool
-
-
-class BasinScope(DocEnum):
- """
- Scope of a basin.
- """
-
- UNSPECIFIED = 0, "``UNSPECIFIED`` defaults to ``AWS_US_EAST_1``."
- AWS_US_EAST_1 = 1, "AWS ``us-east-1`` region."
-
-
-class BasinState(DocEnum):
- """
- Current state of a basin.
- """
-
- UNSPECIFIED = 0
- ACTIVE = 1
- CREATING = 2
- DELETING = 3
-
-
-@dataclass(slots=True)
-class BasinInfo:
- """
- Basin information.
- """
-
- #: Basin name.
- name: str
- #: Basin scope.
- scope: BasinScope
- #: Basin state.
- state: BasinState
-
-
-@dataclass(slots=True)
-class StreamInfo:
- """
- Stream information.
- """
-
- #: Stream name.
- name: str
- #: Creation time.
- created_at: datetime
- #: Deletion time, if this stream is being deleted.
- deleted_at: datetime | None
-
-
-class StorageClass(DocEnum):
- """
- Storage class for recent appends.
- """
-
- STANDARD = 1, "Offers end-to-end latencies under 500 ms."
- EXPRESS = 2, "Offers end-to-end latencies under 50 ms."
-
-
-class TimestampingMode(DocEnum):
- """
- Timestamping mode.
-
- Note:
- The arrival time is always in milliseconds since Unix epoch.
- """
-
- UNSPECIFIED = 0, "Defaults to ``CLIENT_PREFER``."
- CLIENT_PREFER = (
- 1,
- "Prefer client-specified timestamp if present, otherwise use arrival time.",
- )
- CLIENT_REQUIRE = (
- 2,
- "Require a client-specified timestamp and reject the append if it is absent.",
- )
- ARRIVAL = 3, "Use the arrival time and ignore any client-specified timestamp."
-
-
-@dataclass(slots=True)
-class Timestamping:
- """
- Timestamping behavior.
- """
-
- #: Timestamping mode.
- #:
- #: If not specified, the default is :attr:`.TimestampingMode.CLIENT_PREFER`.
- mode: TimestampingMode | None = None
- #: Allow client-specified timestamps to exceed the arrival time.
- uncapped: bool | None = None
-
-
-@dataclass(slots=True)
-class StreamConfig:
- """
- Stream configuration.
- """
-
- #: Storage class for this stream.
- #:
- #: If not specified, the default is :attr:`.StorageClass.EXPRESS`.
- storage_class: StorageClass | None = None
- #: Retention policy for records in this stream.
- #:
- #: Retention duration in seconds to automatically trim records older than this duration.
- #:
- #: ``'infinite'`` to retain records indefinitely.
- #: (While S2 is in public preview, this is capped at 28 days. Let us know if you'd like the cap removed.)
- #:
- #: If not specified, the default is to retain records for 7 days.
- retention_policy: int | Literal["infinite"] | None = None
- #: Timestamping behavior for appends to this stream, which influences how timestamps are handled.
- timestamping: Timestamping | None = None
- #: Minimum age in seconds before this stream can be automatically deleted if empty.
- #:
- #: If not specified or set to ``0``, this stream will not be automatically deleted.
- delete_on_empty_min_age: int | None = None
-
-
-@dataclass(slots=True)
-class BasinConfig:
- """
- Basin configuration.
- """
-
- #: Default configuration for streams in this basin.
- default_stream_config: StreamConfig | None = None
- #: Create stream on append if it doesn't exist, using the default stream configuration.
- create_stream_on_append: bool | None = None
-
-
-class ResourceMatchOp(DocEnum):
- """
- Resource match operator.
- """
-
- EXACT = (
- 1,
- "Match only the resource with the exact value. Use an empty string to match no resources.",
- )
- PREFIX = (
- 2,
- "Match all resources that start with the prefix value. Use an empty string to match all resources.",
- )
-
-
-@dataclass(slots=True)
-class ResourceMatchRule:
- """
- Resource match rule.
- """
-
- #: Match operator.
- match_op: ResourceMatchOp
- #: Value to match.
- value: str
-
-
-class Permission(DocEnum):
- """
- Permission.
- """
-
- UNSPECIFIED = 0
- READ = 1
- WRITE = 2
- READ_WRITE = 3
-
-
-@dataclass(slots=True)
-class OperationGroupPermissions:
- """
- Operation group permissions.
- """
-
- #: Permission for account operations.
- account: Permission = Permission.UNSPECIFIED
- #: Permission for basin operations.
- basin: Permission = Permission.UNSPECIFIED
- #: Permission for stream operations.
- stream: Permission = Permission.UNSPECIFIED
-
-
-class Operation(DocEnum):
- """
- Operation.
- """
-
- UNSPECIFIED = 0
- LIST_BASINS = 1
- CREATE_BASIN = 2
- DELETE_BASIN = 3
- RECONFIGURE_BASIN = 4
- GET_BASIN_CONFIG = 5
- ISSUE_ACCESS_TOKEN = 6
- REVOKE_ACCESS_TOKEN = 7
- LIST_ACCESS_TOKENS = 8
- LIST_STREAMS = 9
- CREATE_STREAM = 10
- DELETE_STREAM = 11
- GET_STREAM_CONFIG = 12
- RECONFIGURE_STREAM = 13
- CHECK_TAIL = 14
- APPEND = 15
- READ = 16
-
-
-@dataclass(slots=True)
-class AccessTokenScope:
- """
- Access token scope.
- """
-
- #: Allowed basins.
- basins: ResourceMatchRule | None = None
- #: Allowed streams.
- streams: ResourceMatchRule | None = None
- #: Allowed access token IDs.
- access_tokens: ResourceMatchRule | None = None
- #: Permissions at operation group level.
- op_group_perms: OperationGroupPermissions | None = None
- #: Allowed operations.
- #:
- #: Note:
- #: A union of allowed operations and groups is used as the effective set of allowed operations.
- ops: list[Operation] = field(default_factory=list)
-
-
-@dataclass(slots=True)
-class AccessTokenInfo:
- """
- Access token information.
- """
-
- #: Access token ID.
- id: str
- #: Access token scope.
- scope: AccessTokenScope
- #: Expiration time in seconds since Unix epoch.
- expires_at: int | None
- #: Whether auto-prefixing is enabled for streams in scope.
- auto_prefix_streams: bool
-
-
-class Cloud(DocEnum):
- """
- Cloud in which the S2 service runs.
- """
-
- AWS = 1
-
-
-class Endpoints:
- """
- `S2 endpoints `_.
- """
-
- __slots__ = ("_account_authority", "_basin_base_authority")
-
- _account_authority: str
- _basin_base_authority: str
-
- def __init__(self, account_authority: str, basin_base_authority: str):
- self._account_authority = account_authority
- self._basin_base_authority = basin_base_authority
-
- @classmethod
- @fallible
- def for_cloud(cls, cloud: Cloud) -> "Endpoints":
- """
- Construct S2 endpoints for the given cloud.
-
- Args:
- cloud: Cloud in which the S2 service runs.
- """
- return cls(
- _account_authority(cloud),
- _basin_authority(cloud),
- )
-
- @classmethod
- @fallible
- def _from_env(cls) -> "Endpoints":
- account_authority = os.getenv("S2_ACCOUNT_ENDPOINT")
- basin_authority = os.getenv("S2_BASIN_ENDPOINT")
- if (
- account_authority
- and basin_authority
- and basin_authority.startswith("{basin}.")
- ):
- basin_base_authority = basin_authority.removeprefix("{basin}.")
- return cls(account_authority, basin_base_authority)
- raise ValueError("Invalid S2_ACCOUNT_ENDPOINT and/or S2_BASIN_ENDPOINT")
-
- def _account(self) -> str:
- return self._account_authority
-
- def _basin(self, basin_name: str) -> str:
- return f"{basin_name}.{self._basin_base_authority}"
-
-
-def _account_authority(cloud: Cloud) -> str:
- match cloud:
- case Cloud.AWS:
- return "aws.s2.dev"
- case _:
- raise ValueError(f"Invalid cloud: {cloud}")
-
-
-def _basin_authority(cloud: Cloud) -> str:
- match cloud:
- case Cloud.AWS:
- return "b.aws.s2.dev"
- case _:
- raise ValueError(f"Invalid cloud: {cloud}")
diff --git a/src/streamstore/utils.py b/src/streamstore/utils.py
deleted file mode 100644
index d8f1978..0000000
--- a/src/streamstore/utils.py
+++ /dev/null
@@ -1,231 +0,0 @@
-__all__ = ["CommandRecord", "metered_bytes", "append_inputs_gen"]
-
-from asyncio import Queue, Task, create_task, sleep
-from dataclasses import dataclass, field
-from datetime import datetime, timedelta
-from typing import AsyncIterable, Iterable, Self
-
-from streamstore.schemas import ONE_MIB, AppendInput, Record, SequencedRecord
-
-
-class CommandRecord:
- """
- Factory class for creating `command records `_.
- """
-
- FENCE = b"fence"
- TRIM = b"trim"
-
- @staticmethod
- def fence(token: str) -> Record:
- """
- Create a fence command record.
-
- Args:
- token: Fencing token. Its UTF-8 byte count must not exceed 36 bytes. If empty, clears
- the previously set token.
- """
- encoded_token = token.encode()
- if len(encoded_token) > 36:
- raise ValueError("UTF-8 byte count of fencing token exceeds 36 bytes")
- return Record(body=encoded_token, headers=[(bytes(), CommandRecord.FENCE)])
-
- @staticmethod
- def trim(desired_first_seq_num: int) -> Record:
- """
- Create a trim command record.
-
- Args:
- desired_first_seq_num: Sequence number for the first record to exist after trimming
- preceeding records in the stream.
-
- Note:
- If ``desired_first_seq_num`` was smaller than the sequence number for the first existing
- record in the stream, trimming doesn't happen.
- """
- return Record(
- body=desired_first_seq_num.to_bytes(8),
- headers=[(bytes(), CommandRecord.TRIM)],
- )
-
-
-def metered_bytes(records: Iterable[Record | SequencedRecord]) -> int:
- """
- Each record is metered using the following formula:
-
- .. code-block:: python
-
- 8 + 2 * len(headers)
- + sum((len(name) + len(value)) for (name, value) in headers)
- + len(body)
-
- """
- return sum(
- (
- 8
- + 2 * len(record.headers)
- + sum((len(name) + len(value)) for (name, value) in record.headers)
- + len(record.body)
- )
- for record in records
- )
-
-
-@dataclass(slots=True)
-class _AutoBatcher:
- _next_batch_idx: int = field(init=False)
- _next_batch: list[Record] = field(init=False)
- _next_batch_count: int = field(init=False)
- _next_batch_bytes: int = field(init=False)
- _linger_queue: Queue[tuple[int, datetime]] | None = field(init=False)
- _linger_handler_task: Task | None = field(init=False)
- _limits_handler_task: Task | None = field(init=False)
-
- append_input_queue: Queue[AppendInput | None]
- match_seq_num: int | None
- fencing_token: str | None
- max_records_per_batch: int
- max_bytes_per_batch: int
- max_linger_per_batch: timedelta | None
-
- def __post_init__(self) -> None:
- self._next_batch_idx = 0
- self._next_batch = []
- self._next_batch_count = 0
- self._next_batch_bytes = 0
- self._linger_queue = Queue() if self.max_linger_per_batch is not None else None
- self._linger_handler_task = None
- self._limits_handler_task = None
-
- def _accumulate(self, record: Record) -> None:
- self._next_batch.append(record)
- self._next_batch_count += 1
- self._next_batch_bytes += metered_bytes([record])
-
- def _next_append_input(self) -> AppendInput:
- append_input = AppendInput(
- records=list(self._next_batch),
- match_seq_num=self.match_seq_num,
- fencing_token=self.fencing_token,
- )
- self._next_batch.clear()
- self._next_batch_count = 0
- self._next_batch_bytes = 0
- self._next_batch_idx += 1
- if self.match_seq_num is not None:
- self.match_seq_num = self.match_seq_num + len(append_input.records)
- return append_input
-
- async def linger_handler(self) -> None:
- if self.max_linger_per_batch is None:
- return
- if self._linger_queue is None:
- return
- linger_duration = self.max_linger_per_batch.total_seconds()
- prev_linger_start = None
- while True:
- batch_idx, linger_start = await self._linger_queue.get()
- if batch_idx < self._next_batch_idx:
- continue
- if prev_linger_start is None:
- prev_linger_start = linger_start
- missed_duration = (linger_start - prev_linger_start).total_seconds()
- await sleep(max(linger_duration - missed_duration, 0))
- if batch_idx == self._next_batch_idx:
- append_input = self._next_append_input()
- await self.append_input_queue.put(append_input)
- prev_linger_start = linger_start
-
- def _limits_met(self, record: Record) -> bool:
- if (
- self._next_batch_count + 1 <= self.max_records_per_batch
- and self._next_batch_bytes + metered_bytes([record])
- <= self.max_bytes_per_batch
- ):
- return False
- return True
-
- async def limits_handler(self, records: AsyncIterable[Record]) -> None:
- async for record in records:
- if self._limits_met(record):
- append_input = self._next_append_input()
- await self.append_input_queue.put(append_input)
- self._accumulate(record)
- if self._linger_queue is not None and len(self._next_batch) == 1:
- await self._linger_queue.put((self._next_batch_idx, datetime.now()))
- if len(self._next_batch) != 0:
- append_input = self._next_append_input()
- await self.append_input_queue.put(append_input)
- await self.append_input_queue.put(None)
-
- def run(self, records: AsyncIterable[Record]) -> None:
- if self.max_linger_per_batch is not None:
- self._linger_handler_task = create_task(self.linger_handler())
- self._limits_handler_task = create_task(self.limits_handler(records))
-
- def cancel(self) -> None:
- if self._linger_handler_task is not None:
- self._linger_handler_task.cancel()
- if self._limits_handler_task is not None:
- self._limits_handler_task.cancel()
-
-
-@dataclass(slots=True)
-class _AppendInputAsyncIterator:
- append_input_queue: Queue[AppendInput | None]
-
- def __aiter__(self) -> Self:
- return self
-
- async def __anext__(self) -> AppendInput:
- append_input = await self.append_input_queue.get()
- if append_input is None:
- raise StopAsyncIteration
- return append_input
-
-
-async def append_inputs_gen(
- records: AsyncIterable[Record],
- match_seq_num: int | None = None,
- fencing_token: str | None = None,
- max_records_per_batch: int = 1000,
- max_bytes_per_batch: int = ONE_MIB,
- max_linger_per_batch: timedelta | None = None,
-) -> AsyncIterable[AppendInput]:
- """
- Generator function for batching records and yielding :class:`.AppendInput`.
-
- Returned generator object can be used as the parameter to :meth:`.Stream.append_session`.
-
- Yields:
- :class:`.AppendInput`
-
- Args:
- records: Records that have to be appended to a stream.
- match_seq_num: If it is not ``None``, it is used in the first yield of :class:`.AppendInput`
- and is automatically advanced for subsequent yields.
- fencing_token: Used in each yield of :class:`.AppendInput`.
- max_records_per_batch: Maximum number of records in each batch.
- max_bytes_per_batch: Maximum size of each batch calculated using :func:`.metered_bytes`.
- max_linger_per_batch: Maximum duration for each batch to accumulate records before yielding.
-
- Note:
- If ``max_linger_per_batch`` is ``None``, :class:`.AppendInput` will be yielded only
- when ``max_records_per_batch`` or ``max_bytes_per_batch`` is reached.
- """
- append_input_queue: Queue[AppendInput | None] = Queue()
- append_input_aiter = _AppendInputAsyncIterator(append_input_queue)
- batcher = _AutoBatcher(
- append_input_queue,
- match_seq_num,
- fencing_token,
- max_records_per_batch,
- max_bytes_per_batch,
- max_linger_per_batch,
- )
- batcher.run(records)
- try:
- async for input in append_input_aiter:
- yield input
- finally:
- batcher.cancel()
diff --git a/tests/conftest.py b/tests/conftest.py
index 97c4c05..5081684 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -5,7 +5,7 @@
import pytest
import pytest_asyncio
-from streamstore import S2, Basin, Stream
+from s2_sdk import S2, Compression, Endpoints, S2Basin, S2Stream
pytest_plugins = ["pytest_asyncio"]
@@ -13,6 +13,21 @@
BASIN_PREFIX: Final[str] = "test-py-sdk"
+def pytest_addoption(parser):
+ parser.addoption(
+ "--compression",
+ action="store",
+ default="none",
+ choices=["none", "zstd", "gzip"],
+ help="Compression codec for E2E tests",
+ )
+
+
+@pytest.fixture(scope="session")
+def compression(request) -> Compression:
+ return Compression(request.config.getoption("--compression"))
+
+
@pytest.fixture(scope="session")
def access_token() -> str:
token = os.getenv("S2_ACCESS_TOKEN")
@@ -26,10 +41,21 @@ def basin_prefix() -> str:
return BASIN_PREFIX
+@pytest.fixture(scope="session")
+def endpoints() -> Endpoints | None:
+ account = os.getenv("S2_ACCOUNT_ENDPOINT")
+ basin = os.getenv("S2_BASIN_ENDPOINT")
+ if account and basin:
+ return Endpoints(account=account, basin=basin)
+ return None
+
+
@pytest_asyncio.fixture(scope="session")
-async def s2(access_token: str) -> AsyncGenerator[S2, None]:
- async with S2(access_token=access_token) as client:
- yield client
+async def s2(
+ access_token: str, compression: Compression, endpoints: Endpoints | None
+) -> AsyncGenerator[S2, None]:
+ async with S2(access_token, endpoints=endpoints, compression=compression) as s2:
+ yield s2
@pytest.fixture
@@ -58,10 +84,8 @@ def token_id() -> str:
@pytest_asyncio.fixture
-async def basin(s2: S2, basin_name: str) -> AsyncGenerator[Basin, None]:
- await s2.create_basin(
- name=basin_name,
- )
+async def basin(s2: S2, basin_name: str) -> AsyncGenerator[S2Basin, None]:
+ await s2.create_basin(name=basin_name)
try:
yield s2.basin(basin_name)
@@ -70,7 +94,7 @@ async def basin(s2: S2, basin_name: str) -> AsyncGenerator[Basin, None]:
@pytest_asyncio.fixture(scope="class")
-async def shared_basin(s2: S2) -> AsyncGenerator[Basin, None]:
+async def shared_basin(s2: S2) -> AsyncGenerator[S2Basin, None]:
basin_name = _basin_name()
await s2.create_basin(name=basin_name)
@@ -81,7 +105,9 @@ async def shared_basin(s2: S2) -> AsyncGenerator[Basin, None]:
@pytest_asyncio.fixture
-async def stream(shared_basin: Basin, stream_name: str) -> AsyncGenerator[Stream, None]:
+async def stream(
+ shared_basin: S2Basin, stream_name: str
+) -> AsyncGenerator[S2Stream, None]:
basin = shared_basin
await basin.create_stream(name=stream_name)
diff --git a/tests/test_account_ops.py b/tests/test_account_ops.py
index 230a657..60fc58b 100644
--- a/tests/test_account_ops.py
+++ b/tests/test_account_ops.py
@@ -1,23 +1,23 @@
-import time
+from datetime import datetime, timedelta, timezone
import pytest
-from streamstore import S2, Basin
-from streamstore.schemas import (
+from s2_sdk import (
+ S2,
AccessTokenScope,
BasinConfig,
- BasinScope,
- BasinState,
Operation,
OperationGroupPermissions,
Permission,
- ResourceMatchOp,
- ResourceMatchRule,
+ PrefixMatch,
+ S2Basin,
+ S2ServerError,
StorageClass,
StreamConfig,
Timestamping,
TimestampingMode,
)
+from tests.conftest import BASIN_PREFIX
@pytest.mark.account
@@ -27,8 +27,7 @@ async def test_create_basin(self, s2: S2, basin_name: str):
try:
assert basin_info.name == basin_name
- assert basin_info.scope == BasinScope.AWS_US_EAST_1
- assert basin_info.state in (BasinState.ACTIVE, BasinState.CREATING)
+ assert basin_info.created_at is not None
finally:
await s2.delete_basin(basin_name)
@@ -52,11 +51,25 @@ async def test_create_basin_with_config(self, s2: S2, basin_name: str):
assert basin_info.name == basin_name
retrieved_config = await s2.get_basin_config(basin_name)
- assert config == retrieved_config
+ assert retrieved_config.default_stream_config is not None
+ assert (
+ retrieved_config.default_stream_config.storage_class
+ == StorageClass.STANDARD
+ )
+ assert retrieved_config.default_stream_config.retention_policy == 86400 * 7
+ assert (
+ retrieved_config.default_stream_config.timestamping.mode
+ == TimestampingMode.CLIENT_REQUIRE
+ )
+ assert retrieved_config.default_stream_config.timestamping.uncapped is True
+ assert (
+ retrieved_config.default_stream_config.delete_on_empty_min_age == 3600
+ )
+ assert retrieved_config.create_stream_on_append is True
finally:
await s2.delete_basin(basin_name)
- async def test_reconfigure_basin(self, s2: S2, basin: Basin):
+ async def test_reconfigure_basin(self, s2: S2, basin: S2Basin):
config = BasinConfig(
default_stream_config=StreamConfig(
storage_class=StorageClass.STANDARD,
@@ -65,32 +78,21 @@ async def test_reconfigure_basin(self, s2: S2, basin: Basin):
create_stream_on_append=True,
)
- updated_config = await s2.reconfigure_basin(basin.name, config)
-
- assert config.default_stream_config is not None
- assert (
- updated_config.default_stream_config.storage_class
- == config.default_stream_config.storage_class
- )
- assert (
- updated_config.default_stream_config.retention_policy
- == config.default_stream_config.retention_policy
- )
- assert updated_config.create_stream_on_append == config.create_stream_on_append
+ updated_config = await s2.reconfigure_basin(basin.name, config=config)
+ assert updated_config.default_stream_config is not None
assert (
- updated_config.default_stream_config.timestamping.mode
- == TimestampingMode.UNSPECIFIED
+ updated_config.default_stream_config.storage_class == StorageClass.STANDARD
)
-
- assert updated_config.default_stream_config.delete_on_empty_min_age == 0
+ assert updated_config.default_stream_config.retention_policy == 3600
+ assert updated_config.create_stream_on_append is True
async def test_list_basins(self, s2: S2, basin_names: list[str]):
basin_infos = []
try:
for basin_name in basin_names:
- stream_info = await s2.create_basin(name=basin_name)
- basin_infos.append(stream_info)
+ basin_info = await s2.create_basin(name=basin_name)
+ basin_infos.append(basin_info)
page = await s2.list_basins()
@@ -105,8 +107,8 @@ async def test_list_basins_with_limit(self, s2: S2, basin_names: list[str]):
basin_infos = []
try:
for basin_name in basin_names:
- stream_info = await s2.create_basin(name=basin_name)
- basin_infos.append(stream_info)
+ basin_info = await s2.create_basin(name=basin_name)
+ basin_infos.append(basin_info)
page = await s2.list_basins(limit=1)
@@ -120,25 +122,24 @@ async def test_list_basins_with_prefix(self, s2: S2, basin_name: str):
await s2.create_basin(name=basin_name)
try:
- prefix = basin_name[:5]
+ prefix = basin_name[:12]
page = await s2.list_basins(prefix=prefix)
- basin_names = [b.name for b in page.items]
- assert basin_name in basin_names
+ names = [b.name for b in page.items]
+ assert basin_name in names
- for name in basin_names:
+ for name in names:
assert name.startswith(prefix)
finally:
await s2.delete_basin(basin_name)
+ @pytest.mark.cloud_only
async def test_issue_access_token(self, s2: S2, token_id: str, basin_prefix: str):
scope = AccessTokenScope(
- basins=ResourceMatchRule(
- match_op=ResourceMatchOp.PREFIX, value=basin_prefix
- ),
- streams=ResourceMatchRule(match_op=ResourceMatchOp.PREFIX, value=""),
- op_group_perms=OperationGroupPermissions(
+ basins=PrefixMatch(basin_prefix),
+ streams=PrefixMatch(""),
+ op_groups=OperationGroupPermissions(
basin=Permission.READ,
stream=Permission.READ,
),
@@ -150,14 +151,14 @@ async def test_issue_access_token(self, s2: S2, token_id: str, basin_prefix: str
assert isinstance(token, str)
assert len(token) > 0
finally:
- token_info = await s2.revoke_access_token(token_id)
- assert token_info.scope == scope
+ await s2.revoke_access_token(token_id)
+ @pytest.mark.cloud_only
async def test_issue_access_token_with_expiry(self, s2: S2, token_id: str):
- expires_at = int(time.time()) + 3600
+ expires_at = (datetime.now(timezone.utc) + timedelta(hours=1)).isoformat()
scope = AccessTokenScope(
- streams=ResourceMatchRule(match_op=ResourceMatchOp.PREFIX, value=""),
+ streams=PrefixMatch(""),
ops=[Operation.READ, Operation.CHECK_TAIL],
)
@@ -175,17 +176,17 @@ async def test_issue_access_token_with_expiry(self, s2: S2, token_id: str):
token_info = next((t for t in page.items if t.id == token_id), None)
assert token_info is not None
- assert token_info.expires_at == expires_at
- assert token_info.scope.streams == scope.streams
- assert set(token_info.scope.ops) == set(scope.ops)
+ assert token_info.expires_at is not None
+ assert token_info.scope.streams is not None
finally:
await s2.revoke_access_token(token_id)
+ @pytest.mark.cloud_only
async def test_issue_access_token_with_auto_prefix(self, s2: S2, token_id: str):
scope = AccessTokenScope(
- streams=ResourceMatchRule(match_op=ResourceMatchOp.PREFIX, value="prefix/"),
- op_group_perms=OperationGroupPermissions(stream=Permission.READ_WRITE),
+ streams=PrefixMatch("prefix/"),
+ op_groups=OperationGroupPermissions(stream=Permission.READ_WRITE),
)
token = await s2.issue_access_token(
@@ -204,8 +205,89 @@ async def test_issue_access_token_with_auto_prefix(self, s2: S2, token_id: str):
token_info = page.items[0]
assert token_info is not None
- assert token_info.scope == scope
assert token_info.auto_prefix_streams is True
finally:
await s2.revoke_access_token(token_id)
+
+ @pytest.mark.cloud_only
+ async def test_get_basin_config(self, s2: S2, basin: S2Basin):
+ config = await s2.get_basin_config(basin.name)
+ assert config is not None
+ assert config.default_stream_config is not None
+
+ async def test_delete_nonexistent_basin_errors(self, s2: S2):
+ with pytest.raises(S2ServerError):
+ await s2.delete_basin("nonexistent-basin-xyz")
+
+ async def test_list_basins_with_prefix_and_start_after(
+ self, s2: S2, basin_names: list[str]
+ ):
+ basin_infos = []
+ try:
+ for basin_name in sorted(basin_names):
+ basin_info = await s2.create_basin(name=basin_name)
+ basin_infos.append(basin_info)
+
+ sorted_names = sorted(basin_names)
+ page = await s2.list_basins(
+ prefix=BASIN_PREFIX, start_after=sorted_names[0], limit=100
+ )
+
+ retrieved = [b.name for b in page.items]
+ # start_after is exclusive, so the first basin should not appear
+ assert sorted_names[0] not in retrieved
+ finally:
+ for basin_info in basin_infos:
+ await s2.delete_basin(basin_info.name)
+
+ @pytest.mark.cloud_only
+ async def test_list_access_tokens_with_limit(self, s2: S2, token_id: str):
+ scope = AccessTokenScope(
+ streams=PrefixMatch(""),
+ op_groups=OperationGroupPermissions(stream=Permission.READ),
+ )
+ await s2.issue_access_token(id=token_id, scope=scope)
+
+ try:
+ page = await s2.list_access_tokens(limit=1)
+ assert len(page.items) <= 1
+ finally:
+ await s2.revoke_access_token(token_id)
+
+ @pytest.mark.cloud_only
+ async def test_list_access_tokens_with_prefix(self, s2: S2, token_id: str):
+ scope = AccessTokenScope(
+ streams=PrefixMatch(""),
+ op_groups=OperationGroupPermissions(stream=Permission.READ),
+ )
+ await s2.issue_access_token(id=token_id, scope=scope)
+
+ try:
+ page = await s2.list_access_tokens(prefix=token_id)
+ names = [t.id for t in page.items]
+ assert token_id in names
+ finally:
+ await s2.revoke_access_token(token_id)
+
+ @pytest.mark.cloud_only
+ async def test_issue_access_token_with_no_permitted_ops_errors(
+ self, s2: S2, token_id: str
+ ):
+ scope = AccessTokenScope()
+
+ with pytest.raises(S2ServerError):
+ await s2.issue_access_token(id=token_id, scope=scope)
+
+ @pytest.mark.cloud_only
+ async def test_issue_access_token_with_auto_prefix_without_prefix_errors(
+ self, s2: S2, token_id: str
+ ):
+ scope = AccessTokenScope(
+ op_groups=OperationGroupPermissions(stream=Permission.READ_WRITE),
+ )
+
+ with pytest.raises(S2ServerError):
+ await s2.issue_access_token(
+ id=token_id, scope=scope, auto_prefix_streams=True
+ )
diff --git a/tests/test_basin_ops.py b/tests/test_basin_ops.py
index aabb6ee..5ee5a42 100644
--- a/tests/test_basin_ops.py
+++ b/tests/test_basin_ops.py
@@ -1,7 +1,11 @@
+import uuid
+
import pytest
-from streamstore import Basin, Stream
-from streamstore.schemas import (
+from s2_sdk import (
+ S2Basin,
+ S2ServerError,
+ S2Stream,
StorageClass,
StreamConfig,
Timestamping,
@@ -11,7 +15,7 @@
@pytest.mark.basin
class TestBasinOperations:
- async def test_create_stream(self, shared_basin: Basin, stream_name: str):
+ async def test_create_stream(self, shared_basin: S2Basin, stream_name: str):
basin = shared_basin
stream_info = await basin.create_stream(name=stream_name)
@@ -24,7 +28,7 @@ async def test_create_stream(self, shared_basin: Basin, stream_name: str):
await basin.delete_stream(stream_name)
async def test_create_stream_with_config(
- self, shared_basin: Basin, stream_name: str
+ self, shared_basin: S2Basin, stream_name: str
):
basin = shared_basin
@@ -43,19 +47,24 @@ async def test_create_stream_with_config(
assert stream_info.name == stream_name
retrieved_config = await basin.get_stream_config(stream_name)
- assert retrieved_config == config
+ assert retrieved_config.storage_class == StorageClass.STANDARD
+ assert retrieved_config.retention_policy == 86400 * 3
+ assert retrieved_config.timestamping is not None
+ assert retrieved_config.timestamping.mode == TimestampingMode.ARRIVAL
+ assert retrieved_config.timestamping.uncapped is False
+ assert retrieved_config.delete_on_empty_min_age == 7200
finally:
await basin.delete_stream(stream_name)
- async def test_default_stream_config(self, shared_basin: Basin, stream: Stream):
+ async def test_default_stream_config(self, shared_basin: S2Basin, stream: S2Stream):
basin = shared_basin
config = await basin.get_stream_config(stream.name)
assert config.storage_class == StorageClass.EXPRESS
assert config.retention_policy == 86400 * 7
- async def test_reconfigure_stream(self, shared_basin: Basin, stream: Stream):
+ async def test_reconfigure_stream(self, shared_basin: S2Basin, stream: S2Stream):
basin = shared_basin
config = StreamConfig(
storage_class=StorageClass.STANDARD,
@@ -66,8 +75,13 @@ async def test_reconfigure_stream(self, shared_basin: Basin, stream: Stream):
delete_on_empty_min_age=1800,
)
- updated_config = await basin.reconfigure_stream(stream.name, config)
- assert updated_config == config
+ updated_config = await basin.reconfigure_stream(stream.name, config=config)
+ assert updated_config.storage_class == StorageClass.STANDARD
+ assert updated_config.retention_policy == "infinite"
+ assert updated_config.timestamping is not None
+ assert updated_config.timestamping.mode == TimestampingMode.CLIENT_REQUIRE
+ assert updated_config.timestamping.uncapped is True
+ assert updated_config.delete_on_empty_min_age == 1800
config = StreamConfig(
storage_class=StorageClass.EXPRESS,
@@ -77,10 +91,14 @@ async def test_reconfigure_stream(self, shared_basin: Basin, stream: Stream):
),
delete_on_empty_min_age=3600,
)
- updated_config = await basin.reconfigure_stream(stream.name, config)
- assert updated_config == config
+ updated_config = await basin.reconfigure_stream(stream.name, config=config)
+ assert updated_config.storage_class == StorageClass.EXPRESS
+ assert updated_config.retention_policy == 86400 * 90
+ assert updated_config.timestamping.mode == TimestampingMode.CLIENT_PREFER
+ assert updated_config.timestamping.uncapped is False
+ assert updated_config.delete_on_empty_min_age == 3600
- async def test_list_streams(self, shared_basin: Basin, stream_names: list[str]):
+ async def test_list_streams(self, shared_basin: S2Basin, stream_names: list[str]):
basin = shared_basin
stream_infos = []
@@ -99,7 +117,7 @@ async def test_list_streams(self, shared_basin: Basin, stream_names: list[str]):
await basin.delete_stream(stream_info.name)
async def test_list_streams_with_limit(
- self, shared_basin: Basin, stream_names: list[str]
+ self, shared_basin: S2Basin, stream_names: list[str]
):
basin = shared_basin
@@ -118,7 +136,7 @@ async def test_list_streams_with_limit(
await basin.delete_stream(stream_info.name)
async def test_list_streams_with_prefix(
- self, shared_basin: Basin, stream_name: str
+ self, shared_basin: S2Basin, stream_name: str
):
basin = shared_basin
@@ -136,3 +154,393 @@ async def test_list_streams_with_prefix(
finally:
await basin.delete_stream(stream_name)
+
+ async def test_create_stream_storage_class_express(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(storage_class=StorageClass.EXPRESS)
+ info = await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ assert info.name == stream_name
+ retrieved = await shared_basin.get_stream_config(stream_name)
+ assert retrieved.storage_class == StorageClass.EXPRESS
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_create_stream_retention_infinite(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(retention_policy="infinite")
+ await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ retrieved = await shared_basin.get_stream_config(stream_name)
+ assert retrieved.retention_policy == "infinite"
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_create_stream_timestamping_modes(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(
+ timestamping=Timestamping(mode=TimestampingMode.CLIENT_PREFER)
+ )
+ await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ retrieved = await shared_basin.get_stream_config(stream_name)
+ assert retrieved.timestamping is not None
+ assert retrieved.timestamping.mode == TimestampingMode.CLIENT_PREFER
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_create_stream_timestamping_uncapped(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(
+ timestamping=Timestamping(
+ mode=TimestampingMode.CLIENT_REQUIRE, uncapped=True
+ )
+ )
+ await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ retrieved = await shared_basin.get_stream_config(stream_name)
+ assert retrieved.timestamping is not None
+ assert retrieved.timestamping.uncapped is True
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_create_stream_delete_on_empty(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(delete_on_empty_min_age=3600)
+ await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ retrieved = await shared_basin.get_stream_config(stream_name)
+ assert retrieved.delete_on_empty_min_age == 3600
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_reconfigure_stream_storage_class(
+ self, shared_basin: S2Basin, stream: S2Stream
+ ):
+ updated = await shared_basin.reconfigure_stream(
+ stream.name, config=StreamConfig(storage_class=StorageClass.STANDARD)
+ )
+ assert updated.storage_class == StorageClass.STANDARD
+
+ async def test_reconfigure_stream_retention(
+ self, shared_basin: S2Basin, stream: S2Stream
+ ):
+ updated = await shared_basin.reconfigure_stream(
+ stream.name, config=StreamConfig(retention_policy="infinite")
+ )
+ assert updated.retention_policy == "infinite"
+
+ async def test_reconfigure_stream_timestamping(
+ self, shared_basin: S2Basin, stream: S2Stream
+ ):
+ updated = await shared_basin.reconfigure_stream(
+ stream.name,
+ config=StreamConfig(
+ timestamping=Timestamping(mode=TimestampingMode.CLIENT_REQUIRE)
+ ),
+ )
+ assert updated.timestamping is not None
+ assert updated.timestamping.mode == TimestampingMode.CLIENT_REQUIRE
+
+ async def test_reconfigure_stream_delete_on_empty(
+ self, shared_basin: S2Basin, stream: S2Stream
+ ):
+ updated = await shared_basin.reconfigure_stream(
+ stream.name, config=StreamConfig(delete_on_empty_min_age=7200)
+ )
+ assert updated.delete_on_empty_min_age == 7200
+
+ async def test_reconfigure_stream_partial_update(
+ self, shared_basin: S2Basin, stream: S2Stream
+ ):
+ # Only change retention, other fields unchanged
+ before = await shared_basin.get_stream_config(stream.name)
+ await shared_basin.reconfigure_stream(
+ stream.name, config=StreamConfig(retention_policy=86400)
+ )
+ after = await shared_basin.get_stream_config(stream.name)
+ assert after.retention_policy == 86400
+ # Storage class should remain the same
+ assert after.storage_class == before.storage_class
+
+ async def test_reconfigure_stream_empty_no_change(
+ self, shared_basin: S2Basin, stream: S2Stream
+ ):
+ before = await shared_basin.get_stream_config(stream.name)
+ after = await shared_basin.reconfigure_stream(
+ stream.name, config=StreamConfig()
+ )
+ assert after.storage_class == before.storage_class
+
+ async def test_delete_nonexistent_stream_errors(self, shared_basin: S2Basin):
+ with pytest.raises(S2ServerError):
+ await shared_basin.delete_stream("nonexistent-stream-xyz")
+
+ async def test_get_stream_config_nonexistent_errors(self, shared_basin: S2Basin):
+ with pytest.raises(S2ServerError):
+ await shared_basin.get_stream_config("nonexistent-stream-xyz")
+
+ async def test_create_stream_duplicate_name_errors(
+ self, shared_basin: S2Basin, stream: S2Stream
+ ):
+ with pytest.raises(S2ServerError):
+ await shared_basin.create_stream(name=stream.name)
+
+ async def test_list_streams_with_start_after(
+ self, shared_basin: S2Basin, stream_names: list[str]
+ ):
+ basin = shared_basin
+ try:
+ for name in sorted(stream_names):
+ await basin.create_stream(name=name)
+
+ sorted_names = sorted(stream_names)
+ page = await basin.list_streams(start_after=sorted_names[0])
+
+ retrieved = [s.name for s in page.items]
+ assert sorted_names[0] not in retrieved
+ finally:
+ for name in stream_names:
+ try:
+ await basin.delete_stream(name)
+ except Exception:
+ pass
+
+ async def test_list_streams_returns_lexicographic_order(
+ self, shared_basin: S2Basin, stream_names: list[str]
+ ):
+ basin = shared_basin
+ try:
+ for name in stream_names:
+ await basin.create_stream(name=name)
+
+ page = await basin.list_streams(prefix="stream-")
+
+ retrieved = [s.name for s in page.items]
+ assert retrieved == sorted(retrieved)
+ finally:
+ for name in stream_names:
+ try:
+ await basin.delete_stream(name)
+ except Exception:
+ pass
+
+ async def test_deleted_stream_has_deleted_at(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ basin = shared_basin
+ info = await basin.create_stream(name=stream_name)
+ assert info.deleted_at is None
+ await basin.delete_stream(stream_name)
+
+ async def test_create_stream_inherits_basin_defaults(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ """A stream created with no config inherits the basin defaults."""
+ await shared_basin.create_stream(name=stream_name)
+ try:
+ config = await shared_basin.get_stream_config(stream_name)
+ assert config.storage_class is not None
+ assert config.retention_policy is not None
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_list_streams_with_start_after_returns_empty_page(
+ self, shared_basin: S2Basin
+ ):
+ basin = shared_basin
+ prefix = f"safp-{uuid.uuid4().hex[:6]}"
+ names = sorted([f"{prefix}-{i:04}" for i in range(3)])
+ try:
+ for name in names:
+ await basin.create_stream(name=name)
+
+ last_name = names[-1]
+ page = await basin.list_streams(prefix=prefix, start_after=last_name)
+
+ assert len(page.items) == 0
+ assert page.has_more is False
+ finally:
+ for name in names:
+ try:
+ await basin.delete_stream(name)
+ except Exception:
+ pass
+
+ async def test_list_streams_with_start_after_less_than_prefix_errors(
+ self, shared_basin: S2Basin
+ ):
+ basin = shared_basin
+ base = uuid.uuid4().hex[:6]
+ names = [f"{base}-a-a", f"{base}-a-b", f"{base}-b-a"]
+ for name in names:
+ await basin.create_stream(name=name)
+ try:
+ with pytest.raises(S2ServerError):
+ await basin.list_streams(prefix=f"{base}-b", start_after=f"{base}-a")
+ finally:
+ for name in names:
+ try:
+ await basin.delete_stream(name)
+ except Exception:
+ pass
+
+ async def test_list_streams_with_limit_zero(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ await shared_basin.create_stream(name=stream_name)
+ try:
+ page = await shared_basin.list_streams(prefix=stream_name[:8], limit=0)
+ assert len(page.items) <= 1000
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_list_streams_with_limit_over_max(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ await shared_basin.create_stream(name=stream_name)
+ try:
+ page = await shared_basin.list_streams(prefix=stream_name[:8], limit=1500)
+ assert len(page.items) <= 1000
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_list_streams_with_pagination(self, shared_basin: S2Basin):
+ basin = shared_basin
+ prefix = f"page-{uuid.uuid4().hex[:6]}"
+ names = sorted([f"{prefix}-{i:04}" for i in range(3)])
+ for name in names:
+ await basin.create_stream(name=name)
+ try:
+ page_1 = await basin.list_streams(prefix=prefix, limit=2)
+ assert len(page_1.items) > 0
+
+ last_name = page_1.items[-1].name
+ page_2 = await basin.list_streams(
+ prefix=prefix, start_after=last_name, limit=2
+ )
+
+ assert all(s.name > last_name for s in page_2.items)
+
+ listed = sorted(
+ [s.name for s in page_1.items] + [s.name for s in page_2.items]
+ )
+ assert listed == names
+ finally:
+ for name in names:
+ try:
+ await basin.delete_stream(name)
+ except Exception:
+ pass
+
+ async def test_create_stream_invalid_retention_age_zero(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(retention_policy=0)
+ with pytest.raises(S2ServerError):
+ await shared_basin.create_stream(name=stream_name, config=config)
+
+ async def test_reconfigure_stream_storage_class_express(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ await shared_basin.create_stream(name=stream_name)
+ try:
+ updated = await shared_basin.reconfigure_stream(
+ stream_name, config=StreamConfig(storage_class=StorageClass.EXPRESS)
+ )
+ assert updated.storage_class == StorageClass.EXPRESS
+ except S2ServerError:
+ pass # Free tier may not support Express
+ finally:
+ try:
+ await shared_basin.delete_stream(stream_name)
+ except Exception:
+ pass
+
+ async def test_reconfigure_stream_retention_policy_age(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ await shared_basin.create_stream(name=stream_name)
+ try:
+ updated = await shared_basin.reconfigure_stream(
+ stream_name, config=StreamConfig(retention_policy=3600)
+ )
+ assert updated.retention_policy == 3600
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_reconfigure_stream_timestamping_uncapped(
+ self, shared_basin: S2Basin
+ ):
+ for uncapped in [True, False]:
+ name = f"stream-{uuid.uuid4().hex[:8]}"
+ await shared_basin.create_stream(name=name)
+ try:
+ updated = await shared_basin.reconfigure_stream(
+ name,
+ config=StreamConfig(timestamping=Timestamping(uncapped=uncapped)),
+ )
+ if updated.timestamping is not None:
+ assert updated.timestamping.uncapped == uncapped
+ finally:
+ await shared_basin.delete_stream(name)
+
+ async def test_reconfigure_stream_disable_delete_on_empty(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(delete_on_empty_min_age=3600)
+ await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ updated = await shared_basin.reconfigure_stream(
+ stream_name, config=StreamConfig(delete_on_empty_min_age=0)
+ )
+ assert (
+ updated.delete_on_empty_min_age is None
+ or updated.delete_on_empty_min_age == 0
+ )
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_reconfigure_stream_invalid_retention_age_zero(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ await shared_basin.create_stream(name=stream_name)
+ try:
+ with pytest.raises(S2ServerError):
+ await shared_basin.reconfigure_stream(
+ stream_name, config=StreamConfig(retention_policy=0)
+ )
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_reconfigure_stream_nonexistent_errors(self, shared_basin: S2Basin):
+ with pytest.raises(S2ServerError):
+ await shared_basin.reconfigure_stream(
+ "nonexistent-stream-xyz",
+ config=StreamConfig(storage_class=StorageClass.STANDARD),
+ )
+
+ async def test_delete_stream_already_deleting_is_idempotent(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ await shared_basin.create_stream(name=stream_name)
+ await shared_basin.delete_stream(stream_name)
+ # Second delete should be idempotent (no error or stream_not_found)
+ try:
+ await shared_basin.delete_stream(stream_name)
+ except S2ServerError:
+ pass # stream_not_found is acceptable
+
+ async def test_get_stream_config_for_deleting_stream_errors(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ await shared_basin.create_stream(name=stream_name)
+ await shared_basin.delete_stream(stream_name)
+
+ with pytest.raises(S2ServerError):
+ await shared_basin.get_stream_config(stream_name)
diff --git a/tests/test_batching.py b/tests/test_batching.py
new file mode 100644
index 0000000..9c8414b
--- /dev/null
+++ b/tests/test_batching.py
@@ -0,0 +1,74 @@
+from datetime import timedelta
+
+import pytest
+
+from s2_sdk import AppendInput, Batching, Record
+from s2_sdk._batching import append_inputs, append_record_batches
+
+
+async def _async_iter(records: list[Record]):
+ for r in records:
+ yield r
+
+
+@pytest.mark.asyncio
+async def test_empty_input():
+ batches = []
+ async for batch in append_record_batches(
+ _async_iter([]), batching=Batching(linger=timedelta(0))
+ ):
+ batches.append(batch)
+ assert batches == []
+
+
+@pytest.mark.asyncio
+async def test_count_limit():
+ records = [Record(body=f"r{i}".encode()) for i in range(5)]
+ batches = []
+ async for batch in append_record_batches(
+ _async_iter(records), batching=Batching(max_records=2, linger=timedelta(0))
+ ):
+ batches.append(batch)
+ assert len(batches) == 3
+ assert len(batches[0]) == 2
+ assert len(batches[1]) == 2
+ assert len(batches[2]) == 1
+
+
+@pytest.mark.asyncio
+async def test_bytes_limit():
+ # Each record: 8 bytes overhead + body. Body of 10 bytes → 18 metered bytes.
+ records = [Record(body=b"x" * 10) for _ in range(3)]
+ batches = []
+ async for batch in append_record_batches(
+ _async_iter(records),
+ batching=Batching(max_bytes=36, linger=timedelta(0)),
+ ):
+ batches.append(batch)
+ # 36 bytes limit: first 2 records fit (36 bytes), third goes in next batch
+ assert len(batches) == 2
+ assert len(batches[0]) == 2
+ assert len(batches[1]) == 1
+
+
+@pytest.mark.asyncio
+async def test_oversized_record_passes():
+ records = [Record(body=b"x" * 100)]
+ batches = []
+ async for batch in append_record_batches(
+ _async_iter(records), batching=Batching(max_bytes=10, linger=timedelta(0))
+ ):
+ batches.append(batch)
+ assert len(batches) == 1
+ assert len(batches[0]) == 1
+
+
+@pytest.mark.asyncio
+async def test_append_inputs_skips_empty_batches():
+ inputs = []
+ async for append_input in append_inputs(
+ _async_iter([Record(body=b"x" * 100)]),
+ batching=Batching(max_bytes=10, linger=timedelta(0)),
+ ):
+ inputs.append(append_input)
+ assert inputs == [AppendInput(records=[Record(body=b"x" * 100)])]
diff --git a/tests/test_client.py b/tests/test_client.py
new file mode 100644
index 0000000..04a3794
--- /dev/null
+++ b/tests/test_client.py
@@ -0,0 +1,677 @@
+import asyncio
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from s2_sdk._client import (
+ IDLE_TIMEOUT,
+ ConnectionPool,
+ HttpClient,
+ Response,
+ _PooledConnection,
+ _raise_for_status,
+ _StreamState,
+)
+from s2_sdk._exceptions import (
+ ConnectionClosedError,
+ ProtocolError,
+ ReadTimeoutError,
+ S2ClientError,
+ S2ServerError,
+)
+from s2_sdk._types import Compression
+
+_DEFAULT_MAX_STREAMS = 100
+
+
+def _mock_connection(
+ open_streams: int = 0,
+ available: bool = True,
+ max_concurrent_streams: int = _DEFAULT_MAX_STREAMS,
+) -> AsyncMock:
+ conn = AsyncMock()
+ conn.is_available = available
+ conn.open_stream_count = open_streams
+ conn.max_concurrent_streams = max_concurrent_streams
+ conn.connect = AsyncMock()
+ conn.close = AsyncMock()
+ conn._streams = {}
+ conn._pending_streams = {}
+ conn._recv_dead = False
+ conn._settings_received = asyncio.Event()
+ conn._settings_received.set()
+
+ def _reserve_stream():
+ state = MagicMock()
+ conn._pending_streams[id(state)] = state
+ conn.open_stream_count = len(conn._streams) + len(conn._pending_streams)
+ return state
+
+ conn.reserve_stream = _reserve_stream
+ return conn
+
+
+@pytest.fixture
+def pool():
+ return ConnectionPool(connect_timeout=5.0)
+
+
+@pytest.mark.asyncio
+async def test_checkout_creates_connection(pool: ConnectionPool):
+ with patch("s2_sdk._client.Connection") as MockConn:
+ mock_conn = _mock_connection()
+ MockConn.return_value = mock_conn
+ pc, state = await pool.checkout("https://example.com")
+ assert isinstance(pc, _PooledConnection)
+ assert state is not None
+ mock_conn.connect.assert_awaited_once()
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_checkout_reuses_connection_with_capacity(pool: ConnectionPool):
+ with patch("s2_sdk._client.Connection") as MockConn:
+ mock_conn = _mock_connection()
+ MockConn.return_value = mock_conn
+ pc1, _ = await pool.checkout("https://example.com")
+ pc2, _ = await pool.checkout("https://example.com")
+ assert pc1 is pc2
+ # Only one connection created
+ assert MockConn.call_count == 1
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_checkout_scales_when_saturated(pool: ConnectionPool):
+ with patch("s2_sdk._client.Connection") as MockConn:
+ conn1 = _mock_connection()
+ conn2 = _mock_connection()
+ MockConn.side_effect = [conn1, conn2]
+
+ # Fill first connection to capacity
+ for _ in range(_DEFAULT_MAX_STREAMS):
+ await pool.checkout("https://example.com")
+
+ # Next checkout should create a new connection
+ pc, _ = await pool.checkout("https://example.com")
+ assert pc._conn is conn2
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_checkout_different_hosts(pool: ConnectionPool):
+ with patch("s2_sdk._client.Connection") as MockConn:
+ MockConn.return_value = _mock_connection()
+ pc1, _ = await pool.checkout("https://a.example.com")
+ MockConn.return_value = _mock_connection()
+ pc2, _ = await pool.checkout("https://b.example.com")
+ assert pc1 is not pc2
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_close_pool(pool: ConnectionPool):
+ with patch("s2_sdk._client.Connection") as MockConn:
+ MockConn.return_value = _mock_connection()
+ await pool.checkout("https://example.com")
+ await pool.close()
+ assert pool._closed
+
+
+@pytest.mark.asyncio
+async def test_closed_pool_raises(pool: ConnectionPool):
+ await pool.close()
+ with pytest.raises(S2ClientError, match="Pool is closed"):
+ await pool.checkout("https://example.com")
+
+
+@pytest.mark.asyncio
+async def test_dead_connection_not_reused(pool: ConnectionPool):
+ """A connection whose recv_loop has died should not be reused."""
+ with patch("s2_sdk._client.Connection") as MockConn:
+ conn1 = _mock_connection()
+ conn2 = _mock_connection()
+ MockConn.side_effect = [conn1, conn2]
+
+ pc1, state1 = await pool.checkout("https://example.com")
+ # Simulate release so open_stream_count goes back down
+ conn1._pending_streams.pop(id(state1), None)
+ conn1.open_stream_count = 0
+
+ # Mark connection as dead (real is_available checks _recv_dead)
+ conn1._recv_dead = True
+ conn1.is_available = False
+
+ # Next checkout should create a new connection
+ pc2, _ = await pool.checkout("https://example.com")
+ assert pc2._conn is conn2
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_settings_not_received_skips_reuse(pool: ConnectionPool):
+ """Connection without settings should not be reused via _try_checkout."""
+ with patch("s2_sdk._client.Connection") as MockConn:
+ conn1 = _mock_connection()
+ conn2 = _mock_connection()
+ MockConn.side_effect = [conn1, conn2]
+
+ # First checkout creates conn1 and reserves a stream on it.
+ pc1, state1 = await pool.checkout("https://example.com")
+ conn1._pending_streams.pop(id(state1), None)
+ conn1.open_stream_count = 0
+
+ # Clear settings after creation so _try_checkout skips conn1 reuse.
+ conn1._settings_received.clear()
+
+ # Next checkout should skip conn1 and create conn2
+ pc2, _ = await pool.checkout("https://example.com")
+ assert pc2._conn is conn2
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_new_connection_with_zero_stream_capacity_raises(pool: ConnectionPool):
+ with patch("s2_sdk._client.Connection") as MockConn:
+ conn = _mock_connection(max_concurrent_streams=0)
+ MockConn.return_value = conn
+
+ with pytest.raises(ProtocolError, match="no available stream capacity"):
+ await pool.checkout("https://example.com")
+
+ conn.close.assert_awaited_once()
+ assert pool._hosts["https://example.com"] == []
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_new_connection_without_settings_proceeds_with_defaults():
+ pool = ConnectionPool(connect_timeout=0.001)
+ try:
+ with patch("s2_sdk._client.Connection") as MockConn:
+ conn = _mock_connection()
+ conn._settings_received.clear()
+ MockConn.return_value = conn
+
+ pc, state = await pool.checkout("https://example.com")
+
+ assert pc._conn is conn
+ assert state is not None
+ finally:
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_unary_request_timeout_acks_and_resets_stream():
+ state = _StreamState()
+ state.unacked_flow_bytes = 11
+
+ conn = AsyncMock()
+ conn.send_headers = AsyncMock(return_value=1)
+ conn.release_stream = MagicMock()
+ conn.ack_data = AsyncMock()
+ conn.reset_stream = AsyncMock()
+
+ pc = MagicMock()
+ pc._conn = conn
+ pc.touch_idle = MagicMock()
+
+ pool = MagicMock()
+ pool.checkout = AsyncMock(return_value=(pc, state))
+
+ client = HttpClient(
+ pool=pool, base_url="https://example.com", request_timeout=0.001
+ )
+
+ with pytest.raises(ReadTimeoutError, match="Request timed out"):
+ await client.unary_request("GET", "/v1/test")
+
+ conn.ack_data.assert_awaited_once_with(1, 11)
+ conn.reset_stream.assert_awaited_once_with(1)
+ conn.release_stream.assert_called_once_with(1, state)
+ pc.touch_idle.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_send_headers_allocates_ids_from_h2_under_lock():
+ from s2_sdk._client import Connection
+
+ conn = Connection(
+ host="localhost",
+ port=443,
+ ssl_context=None,
+ connect_timeout=5.0,
+ )
+ conn._h2 = MagicMock()
+ conn._h2.get_next_available_stream_id.side_effect = [1, 3]
+
+ state1 = conn.reserve_stream()
+ state2 = conn.reserve_stream()
+
+ with patch.object(Connection, "_flush_h2_data_and_drain", new=AsyncMock()):
+ stream1 = await conn.send_headers(state1, [(":method", "GET")], end_stream=True)
+ stream2 = await conn.send_headers(state2, [(":method", "GET")], end_stream=True)
+
+ assert stream1 == 1
+ assert stream2 == 3
+ assert conn.open_stream_count == 2
+
+
+@pytest.mark.asyncio
+async def test_reserve_stream_counts_pending_capacity():
+ from s2_sdk._client import Connection
+
+ conn = Connection(
+ host="localhost",
+ port=443,
+ ssl_context=None,
+ connect_timeout=5.0,
+ )
+ conn._h2 = MagicMock()
+ conn._h2.remote_settings.max_concurrent_streams = 1
+ conn._settings_received.set()
+
+ pc = _PooledConnection(conn)
+ assert pc.has_capacity is True
+
+ state = conn.reserve_stream()
+
+ assert conn.open_stream_count == 1
+ assert pc.has_capacity is False
+
+ conn.release_stream(None, state)
+ assert conn.open_stream_count == 0
+
+
+@pytest.mark.asyncio
+async def test_reaper_keeps_one_idle_connection_per_host():
+ pool = ConnectionPool(connect_timeout=5.0)
+ try:
+ conns = [_PooledConnection(_mock_connection()) for _ in range(3)]
+ past = asyncio.get_running_loop().time() - (IDLE_TIMEOUT + 1)
+ for pc in conns:
+ pc._idle_since = past
+ pool._hosts["https://example.com"] = conns
+
+ async def _sleep(_: float) -> None:
+ pool._closed = True
+
+ with patch("s2_sdk._client.asyncio.sleep", new=_sleep):
+ await pool._reap_idle()
+
+ assert len(pool._hosts["https://example.com"]) == 1
+ finally:
+ pool._closed = True
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_reaper_prunes_unavailable_connections():
+ pool = ConnectionPool(connect_timeout=5.0)
+ try:
+ dead = _PooledConnection(_mock_connection(available=False))
+ live = _PooledConnection(_mock_connection())
+ live._idle_since = asyncio.get_running_loop().time()
+ pool._hosts["https://example.com"] = [dead, live]
+
+ async def _sleep(_: float) -> None:
+ pool._closed = True
+
+ with patch("s2_sdk._client.asyncio.sleep", new=_sleep):
+ await pool._reap_idle()
+
+ assert pool._hosts["https://example.com"] == [live]
+ dead._conn.close.assert_awaited_once() # type: ignore[attr-defined]
+ finally:
+ pool._closed = True
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_reaper_does_not_close_connection_reacquired_mid_reap():
+ pool = ConnectionPool(connect_timeout=5.0)
+ try:
+ conn1 = _mock_connection()
+ conn2 = _mock_connection()
+ pc1 = _PooledConnection(conn1)
+ pc2 = _PooledConnection(conn2)
+ past = asyncio.get_running_loop().time() - (IDLE_TIMEOUT + 1)
+ pc1._idle_since = past
+ pc2._idle_since = past
+ pool._hosts["https://example.com"] = [pc1, pc2]
+
+ async def _close_first() -> None:
+ conn2.open_stream_count = 1
+
+ conn1.close.side_effect = _close_first
+
+ async def _sleep(_: float) -> None:
+ pool._closed = True
+
+ with patch("s2_sdk._client.asyncio.sleep", new=_sleep):
+ await pool._reap_idle()
+
+ assert pc2 in pool._hosts["https://example.com"]
+ conn2.close.assert_not_awaited()
+ finally:
+ pool._closed = True
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_reaper_handles_hosts_added_during_close():
+ pool = ConnectionPool(connect_timeout=5.0)
+ try:
+ dead = _PooledConnection(_mock_connection(available=False))
+ pool._hosts["https://example.com"] = [dead]
+
+ async def _close_and_add_host() -> None:
+ pool._hosts["https://other.example.com"] = []
+
+ dead._conn.close.side_effect = _close_and_add_host # type: ignore[attr-defined]
+
+ async def _sleep(_: float) -> None:
+ pool._closed = True
+
+ with patch("s2_sdk._client.asyncio.sleep", new=_sleep):
+ await pool._reap_idle()
+
+ assert "https://example.com" not in pool._hosts
+ assert "https://other.example.com" in pool._hosts
+ finally:
+ pool._closed = True
+ await pool.close()
+
+
+@pytest.mark.asyncio
+async def test_reaper_prunes_empty_host_lists():
+ pool = ConnectionPool(connect_timeout=5.0)
+ try:
+ dead = _PooledConnection(_mock_connection(available=False))
+ pool._hosts["https://example.com"] = [dead]
+ pool._host_locks["https://example.com"] = asyncio.Lock()
+
+ async def _sleep(_: float) -> None:
+ pool._closed = True
+
+ with patch("s2_sdk._client.asyncio.sleep", new=_sleep):
+ await pool._reap_idle()
+
+ assert "https://example.com" not in pool._hosts
+ assert "https://example.com" not in pool._host_locks
+ finally:
+ pool._closed = True
+ await pool.close()
+
+
+def test_build_headers_omits_accept_encoding_when_compression_disabled():
+ pool = MagicMock()
+
+ client = HttpClient(pool=pool, base_url="https://example.com", request_timeout=30.0)
+
+ headers = client._build_headers("GET", "/v1/test")
+
+ assert "accept-encoding" not in {key for key, _ in headers}
+
+
+@pytest.mark.parametrize(
+ ("compression", "encoding"),
+ [
+ (Compression.GZIP, "gzip"),
+ (Compression.ZSTD, "zstd"),
+ ],
+)
+def test_build_headers_sets_accept_encoding_from_compression(
+ compression: Compression, encoding: str
+):
+ pool = MagicMock()
+
+ client = HttpClient(
+ pool=pool,
+ base_url="https://example.com",
+ request_timeout=30.0,
+ compression=compression,
+ )
+
+ headers = client._build_headers("GET", "/v1/test")
+ accept_encoding = [value for key, value in headers if key == "accept-encoding"]
+
+ assert accept_encoding == [encoding]
+
+
+@pytest.mark.asyncio
+async def test_truncated_body_raises_error():
+ """Receiving None sentinel with state.error set should raise."""
+ from s2_sdk._client import StreamingResponse
+
+ state = _StreamState()
+ q = state.data_queue
+
+ # Simulate partial data then error
+ q.put_nowait(b"partial")
+ error = ConnectionError("stream reset")
+ state.error = error
+ q.put_nowait(None)
+
+ resp = StreamingResponse(
+ status_code=200,
+ data_queue=q,
+ ended=state.ended,
+ stream_state=state,
+ )
+
+ # aread should raise the error
+ with pytest.raises(ConnectionError, match="stream reset"):
+ await resp.aread()
+
+
+@pytest.mark.asyncio
+async def test_truncated_body_raises_in_aiter():
+ """aiter_bytes should raise error on None sentinel with state.error."""
+ from s2_sdk._client import StreamingResponse
+
+ state = _StreamState()
+ q = state.data_queue
+
+ q.put_nowait(b"chunk1")
+ error = ConnectionError("reset")
+ state.error = error
+ q.put_nowait(None)
+
+ resp = StreamingResponse(
+ status_code=200,
+ data_queue=q,
+ ended=state.ended,
+ stream_state=state,
+ )
+
+ chunks = []
+ with pytest.raises(ConnectionError, match="reset"):
+ async for chunk in resp.aiter_bytes():
+ chunks.append(chunk)
+ assert chunks == [b"chunk1"]
+
+
+@pytest.mark.asyncio
+async def test_clean_eof_no_error():
+ """Normal EOF (no error) should return data without raising."""
+ from s2_sdk._client import StreamingResponse
+
+ state = _StreamState()
+ q = state.data_queue
+
+ q.put_nowait(b"hello")
+ q.put_nowait(None)
+ state.ended.set()
+
+ resp = StreamingResponse(
+ status_code=200,
+ data_queue=q,
+ ended=state.ended,
+ stream_state=state,
+ )
+
+ data = await resp.aread()
+ assert data == b"hello"
+
+
+@pytest.mark.asyncio
+async def test_aread_acks_each_consumed_chunk():
+ from s2_sdk._client import StreamingResponse
+
+ state = _StreamState()
+ q = state.data_queue
+ q.put_nowait((b"a" * 20000, 20000))
+ q.put_nowait((b"b" * 20000, 20000))
+ q.put_nowait(None)
+ state.unacked_flow_bytes = 40000
+
+ ack = AsyncMock()
+ resp = StreamingResponse(
+ status_code=200,
+ data_queue=q,
+ ended=state.ended,
+ stream_state=state,
+ ack=ack,
+ )
+
+ data = await resp.aread()
+
+ assert data == (b"a" * 20000) + (b"b" * 20000)
+ assert state.unacked_flow_bytes == 0
+ assert [call.args for call in ack.await_args_list] == [(20000,), (20000,)]
+
+
+@pytest.mark.asyncio
+async def test_aiter_acks_only_consumed_chunk_on_early_exit():
+ from s2_sdk._client import StreamingResponse
+
+ state = _StreamState()
+ q = state.data_queue
+ q.put_nowait((b"first", 40000))
+ q.put_nowait((b"second", 7))
+ state.unacked_flow_bytes = 40007
+
+ ack = AsyncMock()
+ resp = StreamingResponse(
+ status_code=200,
+ data_queue=q,
+ ended=state.ended,
+ stream_state=state,
+ ack=ack,
+ )
+
+ iterator = resp.aiter_bytes()
+ chunk = await anext(iterator)
+ assert chunk == b"first"
+ await iterator.aclose()
+
+ assert state.unacked_flow_bytes == 7
+ assert [call.args for call in ack.await_args_list] == [(40000,)]
+
+
+@pytest.mark.asyncio
+async def test_drain_body_resets_stream_on_generator_error():
+ from s2_sdk._client import _drain_body
+
+ async def _broken_body():
+ yield b"first"
+ raise RuntimeError("boom")
+
+ conn = AsyncMock()
+
+ with pytest.raises(RuntimeError, match="boom"):
+ await _drain_body(conn, 1, _broken_body(), None)
+
+ conn.send_data.assert_awaited_once_with(1, b"first", on_write=None)
+ conn.reset_stream.assert_awaited_once_with(1)
+ conn.end_stream.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_close_fails_inflight_streams():
+ from s2_sdk._client import Connection
+
+ conn = Connection(
+ host="localhost",
+ port=443,
+ ssl_context=None,
+ connect_timeout=5.0,
+ )
+ state = _StreamState()
+ conn._streams[1] = state
+
+ await conn.close()
+
+ assert isinstance(state.error, ConnectionClosedError)
+ assert state.response_headers.done()
+ assert state.window_updated.is_set()
+ assert state.data_queue.get_nowait() is None
+
+
+@pytest.mark.asyncio
+async def test_fail_stream_is_idempotent_for_queue_sentinel():
+ from s2_sdk._client import Connection
+
+ conn = Connection(
+ host="localhost",
+ port=443,
+ ssl_context=None,
+ connect_timeout=5.0,
+ )
+ state = _StreamState()
+
+ conn._fail_stream(state, ConnectionClosedError("first"))
+ conn._fail_stream(state, ConnectionClosedError("second"))
+
+ assert state.data_queue.qsize() == 1
+ assert state.data_queue.get_nowait() is None
+
+
+@pytest.mark.parametrize("status_code", [412, 416])
+def test_raise_for_status_falls_back_to_text_for_non_json_special_status(
+ status_code: int,
+):
+ response = Response(status_code, b"not-json")
+
+ with pytest.raises(S2ServerError) as exc_info:
+ _raise_for_status(response)
+
+ assert exc_info.value.status_code == status_code
+ assert exc_info.value.code == "unknown"
+ assert str(exc_info.value) == "not-json"
+
+
+@pytest.mark.asyncio
+async def test_remote_settings_changed_wakes_waiters():
+ import h2.events
+
+ from s2_sdk._client import Connection
+
+ conn = Connection(
+ host="localhost",
+ port=443,
+ ssl_context=None,
+ connect_timeout=5.0,
+ )
+ state = _StreamState()
+ conn._streams[1] = state
+
+ conn._handle_event(h2.events.RemoteSettingsChanged())
+
+ assert conn._settings_received.is_set()
+ assert state.window_updated.is_set()
+
+
+def test_origin_rejects_path():
+ from s2_sdk._client import _origin
+
+ with pytest.raises(S2ClientError, match="origin without path or query"):
+ _origin("https://example.com/api")
+
+
+def test_origin_invalid_port_is_client_error():
+ from s2_sdk._client import _origin
+
+ with pytest.raises(S2ClientError, match="Invalid endpoint URL"):
+ _origin("https://example.com:99999")
diff --git a/tests/test_compression.py b/tests/test_compression.py
new file mode 100644
index 0000000..f879b60
--- /dev/null
+++ b/tests/test_compression.py
@@ -0,0 +1,33 @@
+from s2_sdk._compression import compress, decompress
+from s2_sdk._types import Compression
+
+
+class TestCompression:
+ def test_zstd_roundtrip(self):
+ data = b"hello world" * 100
+ compressed = compress(data, Compression.ZSTD)
+ assert compressed != data
+ decompressed = decompress(compressed, Compression.ZSTD)
+ assert decompressed == data
+
+ def test_gzip_roundtrip(self):
+ data = b"hello world" * 100
+ compressed = compress(data, Compression.GZIP)
+ assert compressed != data
+ decompressed = decompress(compressed, Compression.GZIP)
+ assert decompressed == data
+
+ def test_none_passthrough(self):
+ data = b"hello world"
+ assert compress(data, Compression.NONE) == data
+ assert decompress(data, Compression.NONE) == data
+
+ def test_zstd_compresses(self):
+ data = b"aaaa" * 1000
+ compressed = compress(data, Compression.ZSTD)
+ assert len(compressed) < len(data)
+
+ def test_gzip_compresses(self):
+ data = b"aaaa" * 1000
+ compressed = compress(data, Compression.GZIP)
+ assert len(compressed) < len(data)
diff --git a/tests/test_metrics_ops.py b/tests/test_metrics_ops.py
new file mode 100644
index 0000000..3b5873c
--- /dev/null
+++ b/tests/test_metrics_ops.py
@@ -0,0 +1,192 @@
+import time
+
+import pytest
+
+from s2_sdk import (
+ S2,
+ AccountMetricSet,
+ Accumulation,
+ BasinMetricSet,
+ Gauge,
+ Label,
+ S2Basin,
+ S2ServerError,
+ S2Stream,
+ Scalar,
+ StreamMetricSet,
+ TimeseriesInterval,
+)
+
+
+def _default_range() -> tuple[int, int]:
+ """Return a (start, end) range covering the last hour."""
+ now = int(time.time())
+ return now - 3600, now
+
+
+@pytest.mark.metrics
+class TestMetricsOperations:
+ async def test_account_metrics_active_basins(self, s2: S2):
+ start, end = _default_range()
+ metrics = await s2.account_metrics(
+ set=AccountMetricSet.ACTIVE_BASINS, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+ for m in metrics:
+ assert isinstance(m, (Scalar, Accumulation, Gauge, Label))
+
+ async def test_account_metrics_account_ops(self, s2: S2):
+ start, end = _default_range()
+ metrics = await s2.account_metrics(
+ set=AccountMetricSet.ACCOUNT_OPS, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+
+ async def test_account_metrics_account_ops_with_interval(self, s2: S2):
+ start, end = _default_range()
+ metrics = await s2.account_metrics(
+ set=AccountMetricSet.ACCOUNT_OPS,
+ start=start,
+ end=end,
+ interval=TimeseriesInterval.HOUR,
+ )
+ assert isinstance(metrics, list)
+
+ async def test_account_metrics_empty_time_range(self, s2: S2):
+ metrics = await s2.account_metrics(
+ set=AccountMetricSet.ACTIVE_BASINS, start=0, end=1
+ )
+ assert isinstance(metrics, list)
+
+ async def test_basin_metrics_storage(self, s2: S2, basin: S2Basin):
+ start, end = _default_range()
+ metrics = await s2.basin_metrics(
+ basin.name, set=BasinMetricSet.STORAGE, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+
+ async def test_basin_metrics_ops(self, s2: S2, basin: S2Basin):
+ start, end = _default_range()
+ metrics = await s2.basin_metrics(
+ basin.name, set=BasinMetricSet.BASIN_OPS, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+
+ async def test_basin_metrics_throughput(self, s2: S2, basin: S2Basin):
+ start, end = _default_range()
+ metrics = await s2.basin_metrics(
+ basin.name, set=BasinMetricSet.APPEND_THROUGHPUT, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+
+ async def test_basin_metrics_empty_time_range(self, s2: S2, basin: S2Basin):
+ metrics = await s2.basin_metrics(
+ basin.name, set=BasinMetricSet.STORAGE, start=0, end=1
+ )
+ assert isinstance(metrics, list)
+
+ async def test_stream_metrics_storage(
+ self, s2: S2, shared_basin: S2Basin, stream: S2Stream
+ ):
+ start, end = _default_range()
+ metrics = await s2.stream_metrics(
+ shared_basin.name,
+ stream.name,
+ set=StreamMetricSet.STORAGE,
+ start=start,
+ end=end,
+ )
+ assert isinstance(metrics, list)
+
+ async def test_stream_metrics_empty_time_range(
+ self, s2: S2, shared_basin: S2Basin, stream: S2Stream
+ ):
+ metrics = await s2.stream_metrics(
+ shared_basin.name, stream.name, set=StreamMetricSet.STORAGE, start=0, end=1
+ )
+ assert isinstance(metrics, list)
+
+ async def test_account_metrics_account_ops_minute_interval(self, s2: S2):
+ start, end = _default_range()
+ metrics = await s2.account_metrics(
+ set=AccountMetricSet.ACCOUNT_OPS,
+ start=start,
+ end=end,
+ interval=TimeseriesInterval.MINUTE,
+ )
+ assert isinstance(metrics, list)
+
+ async def test_account_metrics_account_ops_day_interval(self, s2: S2):
+ start, end = _default_range()
+ metrics = await s2.account_metrics(
+ set=AccountMetricSet.ACCOUNT_OPS,
+ start=start,
+ end=end,
+ interval=TimeseriesInterval.DAY,
+ )
+ assert isinstance(metrics, list)
+
+ async def test_basin_metrics_append_ops(self, s2: S2, basin: S2Basin):
+ start, end = _default_range()
+ metrics = await s2.basin_metrics(
+ basin.name, set=BasinMetricSet.APPEND_OPS, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+
+ async def test_basin_metrics_read_ops(self, s2: S2, basin: S2Basin):
+ start, end = _default_range()
+ metrics = await s2.basin_metrics(
+ basin.name, set=BasinMetricSet.READ_OPS, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+
+ async def test_basin_metrics_read_throughput(self, s2: S2, basin: S2Basin):
+ start, end = _default_range()
+ metrics = await s2.basin_metrics(
+ basin.name, set=BasinMetricSet.READ_THROUGHPUT, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+
+ async def test_account_metrics_invalid_time_ranges(self, s2: S2):
+ now = int(time.time())
+ # start > end is invalid
+ with pytest.raises(S2ServerError):
+ await s2.account_metrics(
+ set=AccountMetricSet.ACTIVE_BASINS, start=now + 3600, end=now
+ )
+
+ async def test_account_metrics_all_sets(self, s2: S2):
+ start, end = _default_range()
+ for metric_set in AccountMetricSet:
+ metrics = await s2.account_metrics(set=metric_set, start=start, end=end)
+ assert isinstance(metrics, list)
+
+ async def test_basin_metrics_invalid_time_ranges(self, s2: S2, basin: S2Basin):
+ now = int(time.time())
+ # start > end is invalid
+ with pytest.raises(S2ServerError):
+ await s2.basin_metrics(
+ basin.name, set=BasinMetricSet.STORAGE, start=now + 3600, end=now
+ )
+
+ async def test_basin_metrics_all_sets(self, s2: S2, basin: S2Basin):
+ start, end = _default_range()
+ for metric_set in BasinMetricSet:
+ metrics = await s2.basin_metrics(
+ basin.name, set=metric_set, start=start, end=end
+ )
+ assert isinstance(metrics, list)
+
+ async def test_stream_metrics_invalid_time_ranges(
+ self, s2: S2, shared_basin: S2Basin, stream: S2Stream
+ ):
+ now = int(time.time())
+ # start > end is invalid
+ with pytest.raises(S2ServerError):
+ await s2.stream_metrics(
+ shared_basin.name,
+ stream.name,
+ set=StreamMetricSet.STORAGE,
+ start=now + 3600,
+ end=now,
+ )
diff --git a/tests/test_producer.py b/tests/test_producer.py
new file mode 100644
index 0000000..27f7550
--- /dev/null
+++ b/tests/test_producer.py
@@ -0,0 +1,335 @@
+import asyncio
+from datetime import timedelta
+from unittest.mock import AsyncMock, patch
+
+import pytest
+
+from s2_sdk import (
+ AppendAck,
+ AppendInput,
+ Batching,
+ BatchSubmitTicket,
+ Compression,
+ IndexedAppendAck,
+ Producer,
+ Record,
+ RecordSubmitTicket,
+ Retry,
+ S2ClientError,
+ StreamPosition,
+)
+
+_PATCH_TARGET = "s2_sdk._producer.AppendSession"
+
+
+def _ack(start_seq: int = 0, end_seq: int = 1) -> AppendAck:
+ return AppendAck(
+ start=StreamPosition(seq_num=start_seq, timestamp=10),
+ end=StreamPosition(seq_num=end_seq, timestamp=10),
+ tail=StreamPosition(seq_num=end_seq, timestamp=10),
+ )
+
+
+def _mock_session():
+ """Create a mock AppendSession that tracks submit calls and resolves tickets."""
+ session = AsyncMock()
+ session._seq = 0
+
+ async def _submit(input: AppendInput):
+ n = len(input.records)
+ start = session._seq
+ session._seq += n
+ ack = _ack(start_seq=start, end_seq=start + n)
+ future: asyncio.Future[AppendAck] = asyncio.get_running_loop().create_future()
+ future.set_result(ack)
+ return BatchSubmitTicket(future)
+
+ session.submit = AsyncMock(side_effect=_submit)
+ session.close = AsyncMock()
+ session.__aenter__ = AsyncMock(return_value=session)
+ session.__aexit__ = AsyncMock(return_value=False)
+ return session
+
+
+def _producer(
+ batching: Batching | None = None,
+ fencing_token: str | None = None,
+ match_seq_num: int | None = None,
+) -> tuple[Producer, AsyncMock]:
+ mock_session = _mock_session()
+ with patch(_PATCH_TARGET, return_value=mock_session):
+ p = Producer(
+ client=AsyncMock(),
+ stream_name="test-stream",
+ retry=Retry(max_attempts=1),
+ compression=Compression.NONE,
+ fencing_token=fencing_token,
+ match_seq_num=match_seq_num,
+ max_unacked_bytes=5 * 1024 * 1024,
+ batching=batching or Batching(),
+ )
+ return p, mock_session
+
+
+@pytest.mark.asyncio
+async def test_submit_and_await_ticket():
+ producer, _ = _producer()
+ ticket = await producer.submit(Record(body=b"hello"))
+ assert isinstance(ticket, RecordSubmitTicket)
+ ack = await ticket
+ assert isinstance(ack, IndexedAppendAck)
+ assert ack.seq_num == 0
+ assert ack.batch.start.seq_num == 0
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_multiple_records_batched():
+ producer, mock_session = _producer(
+ batching=Batching(max_records=3),
+ )
+ tickets = []
+ for i in range(3):
+ ticket = await producer.submit(Record(body=f"record-{i}".encode()))
+ tickets.append(ticket)
+
+ for i, ticket in enumerate(tickets):
+ ack = await ticket
+ assert ack.seq_num == i
+
+ # All 3 records should be in one batch
+ assert mock_session.submit.call_count == 1
+ submitted_input = mock_session.submit.call_args[0][0]
+ assert len(submitted_input.records) == 3
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_fencing_token_and_match_seq_num():
+ producer, mock_session = _producer(
+ batching=Batching(max_records=2),
+ fencing_token="my-fence",
+ match_seq_num=10,
+ )
+ tickets = []
+ for _ in range(2):
+ tickets.append(await producer.submit(Record(body=b"x")))
+
+ for t in tickets:
+ await t
+
+ submitted_input = mock_session.submit.call_args[0][0]
+ assert submitted_input.fencing_token == "my-fence"
+ assert submitted_input.match_seq_num == 10
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_match_seq_num_auto_increments():
+ producer, mock_session = _producer(
+ batching=Batching(max_records=2),
+ match_seq_num=0,
+ )
+ # First batch of 2
+ for _ in range(2):
+ await producer.submit(Record(body=b"x"))
+ # Second batch of 2
+ for _ in range(2):
+ await producer.submit(Record(body=b"y"))
+
+ await producer.close()
+
+ calls = mock_session.submit.call_args_list
+ assert len(calls) == 2
+ assert calls[0][0][0].match_seq_num == 0
+ assert calls[1][0][0].match_seq_num == 2
+
+
+@pytest.mark.asyncio
+async def test_linger_timer_flushes_partial_batch():
+ producer, mock_session = _producer(
+ batching=Batching(max_records=100, linger=timedelta(milliseconds=10)),
+ )
+ ticket = await producer.submit(Record(body=b"hello"))
+ # Wait for linger to fire
+ ack = await asyncio.wait_for(ticket, timeout=1.0)
+ assert ack.seq_num == 0
+ assert mock_session.submit.call_count == 1
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_batch_limit_by_record_count():
+ producer, mock_session = _producer(
+ batching=Batching(max_records=2),
+ )
+ t1 = await producer.submit(Record(body=b"a"))
+ t2 = await producer.submit(Record(body=b"b")) # triggers flush
+
+ ack1 = await t1
+ ack2 = await t2
+ assert ack1.seq_num == 0
+ assert ack2.seq_num == 1
+ assert mock_session.submit.call_count == 1
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_batch_limit_by_bytes():
+ producer, mock_session = _producer(
+ batching=Batching(max_bytes=20),
+ )
+ # Each record: 8 (overhead) + body. Body of 12 bytes → 20 bytes metered.
+ t1 = await producer.submit(Record(body=b"x" * 12))
+ # Second record pushes over the limit
+ t2 = await producer.submit(Record(body=b"y" * 12))
+
+ ack1 = await t1
+ await t2
+ assert ack1.seq_num == 0
+ # Second record goes in a new batch
+ assert mock_session.submit.call_count == 2
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_error_propagation():
+ producer, mock_session = _producer(
+ batching=Batching(max_records=2),
+ )
+ error = RuntimeError("session failed")
+
+ async def _failing_submit(input):
+ raise error
+
+ mock_session.submit = AsyncMock(side_effect=_failing_submit)
+
+ t1 = await producer.submit(Record(body=b"a"))
+ with pytest.raises(RuntimeError, match="session failed"):
+ await producer.submit(Record(body=b"b")) # triggers flush which fails
+
+ with pytest.raises(RuntimeError, match="session failed"):
+ await t1
+
+ with pytest.raises(RuntimeError, match="session failed"):
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_submit_after_close_raises():
+ producer, _ = _producer()
+ await producer.close()
+ with pytest.raises(S2ClientError, match="closed"):
+ await producer.submit(Record(body=b"hello"))
+
+
+@pytest.mark.asyncio
+async def test_context_manager():
+ producer, _ = _producer()
+ async with producer as p:
+ ticket = await p.submit(Record(body=b"hello"))
+ ack = await ticket
+ assert ack.seq_num == 0
+
+
+@pytest.mark.asyncio
+async def test_close_flushes_remaining():
+ producer, mock_session = _producer(
+ batching=Batching(max_records=100),
+ )
+ ticket = await producer.submit(Record(body=b"hello"))
+ # Record hasn't been flushed yet (batch limit not met, no linger)
+ assert mock_session.submit.call_count == 0
+ await producer.close()
+ # close() should have flushed
+ assert mock_session.submit.call_count == 1
+ ack = await ticket
+ assert ack.seq_num == 0
+
+
+@pytest.mark.asyncio
+async def test_resolve_batch_error_propagates_to_futures():
+ """Test that an error during batch resolution propagates to individual tickets."""
+ producer, mock_session = _producer(
+ batching=Batching(max_records=2),
+ )
+ error = RuntimeError("ack failed")
+
+ async def _submit_failing_ticket(input):
+ future: asyncio.Future[AppendAck] = asyncio.get_running_loop().create_future()
+ future.set_exception(error)
+ return BatchSubmitTicket(future)
+
+ mock_session.submit = AsyncMock(side_effect=_submit_failing_ticket)
+
+ t1 = await producer.submit(Record(body=b"a"))
+ t2 = await producer.submit(Record(body=b"b")) # triggers flush
+
+ with pytest.raises(RuntimeError, match="ack failed"):
+ await t1
+ with pytest.raises(RuntimeError, match="ack failed"):
+ await t2
+
+ with pytest.raises(RuntimeError, match="ack failed"):
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_drain_resolves_multiple_batches_in_order():
+ """Drain loop resolves batches sequentially in FIFO order."""
+ producer, mock_session = _producer(
+ batching=Batching(max_records=2),
+ )
+ tickets = []
+ # Submit 6 records → 3 batches of 2
+ for i in range(6):
+ tickets.append(await producer.submit(Record(body=f"r{i}".encode())))
+
+ for i, ticket in enumerate(tickets):
+ ack = await ticket
+ assert ack.seq_num == i
+
+ assert mock_session.submit.call_count == 3
+ await producer.close()
+
+
+@pytest.mark.asyncio
+async def test_error_in_drain_fails_remaining_batches():
+ """When drain encounters an error, all remaining pending batches are failed."""
+ producer, mock_session = _producer(
+ batching=Batching(max_records=1),
+ )
+ call_count = 0
+ error = RuntimeError("drain boom")
+
+ async def _submit_nth(input: AppendInput):
+ nonlocal call_count
+ call_count += 1
+ n = len(input.records)
+ future: asyncio.Future[AppendAck] = asyncio.get_running_loop().create_future()
+ if call_count == 1:
+ # First batch succeeds
+ ack = _ack(start_seq=0, end_seq=n)
+ future.set_result(ack)
+ elif call_count == 2:
+ # Second batch fails — drain handles this and fails remaining
+ future.set_exception(error)
+ # else: leave unresolved (orphaned by drain exit, no warning)
+ return BatchSubmitTicket(future)
+
+ mock_session.submit = AsyncMock(side_effect=_submit_nth)
+
+ t1 = await producer.submit(Record(body=b"ok"))
+ t2 = await producer.submit(Record(body=b"fail"))
+ t3 = await producer.submit(Record(body=b"also-fail"))
+
+ ack1 = await t1
+ assert ack1.seq_num == 0
+
+ with pytest.raises(RuntimeError, match="drain boom"):
+ await t2
+ with pytest.raises(RuntimeError, match="drain boom"):
+ await t3
+
+ with pytest.raises(RuntimeError, match="drain boom"):
+ await producer.close()
diff --git a/tests/test_retry.py b/tests/test_retry.py
new file mode 100644
index 0000000..a2d6682
--- /dev/null
+++ b/tests/test_retry.py
@@ -0,0 +1,145 @@
+from s2_sdk._exceptions import ConnectError, ReadTimeoutError, S2ServerError
+from s2_sdk._frame_signal import FrameSignal
+from s2_sdk._retrier import (
+ compute_backoffs,
+ has_no_side_effects,
+ is_safe_to_retry_session,
+ is_safe_to_retry_unary,
+)
+from s2_sdk._types import AppendRetryPolicy
+
+
+class TestComputeBackoffs:
+ def test_backoffs_count(self):
+ backoffs = compute_backoffs(5)
+ assert len(backoffs) == 5
+
+ def test_backoffs_range(self):
+ backoffs = compute_backoffs(5, min_base_delay=0.1, max_base_delay=1.0)
+ for b in backoffs:
+ # Each backoff is base_delay + jitter where jitter in [0, base_delay]
+ # so max is 2 * max_base_delay
+ assert 0 <= b <= 2.0
+
+ def test_backoffs_empty(self):
+ backoffs = compute_backoffs(0)
+ assert backoffs == []
+
+
+class TestHasNoSideEffects:
+ def test_rate_limited(self):
+ e = S2ServerError("rate_limited", "rate limited", 429)
+ assert has_no_side_effects(e) is True
+
+ def test_hot_server(self):
+ e = S2ServerError("hot_server", "hot server", 502)
+ assert has_no_side_effects(e) is True
+
+ def test_other_server_error(self):
+ e = S2ServerError("internal", "internal", 500)
+ assert has_no_side_effects(e) is False
+
+ def test_429_wrong_code(self):
+ e = S2ServerError("throttled", "throttled", 429)
+ assert has_no_side_effects(e) is False
+
+ def test_connect_error_connection_refused(self):
+ cause = ConnectionRefusedError("connection refused")
+ e = ConnectError("connection refused")
+ e.__cause__ = cause
+ assert has_no_side_effects(e) is True
+
+ def test_connect_error_without_refused_cause(self):
+ e = ConnectError("connection timed out")
+ assert has_no_side_effects(e) is False
+
+ def test_other_transport_error(self):
+ e = ReadTimeoutError("timeout")
+ assert has_no_side_effects(e) is False
+
+ def test_generic_exception(self):
+ e = RuntimeError("something")
+ assert has_no_side_effects(e) is False
+
+
+class TestSafeToRetryUnary:
+ def test_no_policy_retries_retryable(self):
+ e = S2ServerError("internal", "error", 500)
+ assert is_safe_to_retry_unary(e, None) is True
+
+ def test_all_policy_retries_retryable(self):
+ e = S2ServerError("internal", "error", 500)
+ assert is_safe_to_retry_unary(e, AppendRetryPolicy.ALL) is True
+
+ def test_all_policy_skips_non_retryable(self):
+ e = S2ServerError("bad_request", "bad request", 400)
+ assert is_safe_to_retry_unary(e, AppendRetryPolicy.ALL) is False
+
+ def test_nse_policy_retries_no_side_effect_error(self):
+ e = S2ServerError("rate_limited", "rate limited", 429)
+ assert is_safe_to_retry_unary(e, AppendRetryPolicy.NO_SIDE_EFFECTS) is True
+
+ def test_nse_policy_retries_connect_error(self):
+ cause = ConnectionRefusedError("connection refused")
+ e = ConnectError("connection refused")
+ e.__cause__ = cause
+ assert is_safe_to_retry_unary(e, AppendRetryPolicy.NO_SIDE_EFFECTS) is True
+
+ def test_nse_policy_skips_ambiguous_error(self):
+ e = S2ServerError("internal", "error", 500)
+ assert is_safe_to_retry_unary(e, AppendRetryPolicy.NO_SIDE_EFFECTS) is False
+
+ def test_nse_policy_skips_timeout(self):
+ e = ReadTimeoutError("timeout")
+ assert is_safe_to_retry_unary(e, AppendRetryPolicy.NO_SIDE_EFFECTS) is False
+
+
+class TestSafeToRetrySession:
+ def test_all_policy_always_retries(self):
+ e = S2ServerError("internal", "error", 500)
+ assert is_safe_to_retry_session(e, AppendRetryPolicy.ALL, True, None) is True
+
+ def test_all_policy_skips_non_retryable(self):
+ e = S2ServerError("bad_request", "bad request", 400)
+ assert is_safe_to_retry_session(e, AppendRetryPolicy.ALL, False, None) is False
+
+ def test_nse_no_inflight_retries(self):
+ e = S2ServerError("internal", "error", 500)
+ assert (
+ is_safe_to_retry_session(e, AppendRetryPolicy.NO_SIDE_EFFECTS, False, None)
+ is True
+ )
+
+ def test_nse_inflight_signal_not_set_retries(self):
+ signal = FrameSignal()
+ e = S2ServerError("internal", "error", 500)
+ assert (
+ is_safe_to_retry_session(e, AppendRetryPolicy.NO_SIDE_EFFECTS, True, signal)
+ is True
+ )
+
+ def test_nse_inflight_signal_set_no_side_effects_retries(self):
+ signal = FrameSignal()
+ signal.signal()
+ e = S2ServerError("rate_limited", "rate limited", 429)
+ assert (
+ is_safe_to_retry_session(e, AppendRetryPolicy.NO_SIDE_EFFECTS, True, signal)
+ is True
+ )
+
+ def test_nse_inflight_signal_set_ambiguous_skips(self):
+ signal = FrameSignal()
+ signal.signal()
+ e = S2ServerError("internal", "error", 500)
+ assert (
+ is_safe_to_retry_session(e, AppendRetryPolicy.NO_SIDE_EFFECTS, True, signal)
+ is False
+ )
+
+ def test_nse_inflight_no_signal_skips(self):
+ """No FrameSignal at all + inflight + ambiguous error -> not safe."""
+ e = S2ServerError("internal", "error", 500)
+ assert (
+ is_safe_to_retry_session(e, AppendRetryPolicy.NO_SIDE_EFFECTS, True, None)
+ is False
+ )
diff --git a/tests/test_s2s_protocol.py b/tests/test_s2s_protocol.py
new file mode 100644
index 0000000..6085f6d
--- /dev/null
+++ b/tests/test_s2s_protocol.py
@@ -0,0 +1,86 @@
+import pytest
+
+from s2_sdk._s2s._protocol import (
+ Message,
+ deframe_data,
+ frame_message,
+ maybe_compress,
+)
+from s2_sdk._types import Compression
+
+
+class TestMessageFraming:
+ def test_frame_and_deframe_no_compression(self):
+ body = b"hello world"
+ data = frame_message(
+ Message(body, terminal=False, compression=Compression.NONE)
+ )
+ msg = deframe_data(data)
+
+ assert msg.body == body
+ assert msg.terminal is False
+ assert msg.compression == Compression.NONE
+
+ def test_frame_and_deframe_with_zstd(self):
+ body = b"hello world" * 100
+ compressed, comp = maybe_compress(body, Compression.ZSTD)
+ data = frame_message(Message(compressed, terminal=False, compression=comp))
+ msg = deframe_data(data)
+
+ # deframe_data returns raw (still-compressed) body
+ assert msg.compression == Compression.ZSTD
+ assert msg.terminal is False
+ from s2_sdk._compression import decompress
+
+ assert decompress(msg.body, msg.compression) == body
+
+ def test_terminal_message(self):
+ body = b"\x00\x00some error"
+ data = frame_message(Message(body, terminal=True, compression=Compression.NONE))
+ msg = deframe_data(data)
+
+ assert msg.terminal is True
+ # Terminal messages are not decompressed
+ assert msg.body == body
+
+ def test_message_length_encoding(self):
+ # Verify 3-byte length prefix covers flag + body
+ body = b"x" * 256
+ data = frame_message(
+ Message(body, terminal=False, compression=Compression.NONE)
+ )
+
+ # First 3 bytes are length (big-endian), includes 1 byte flag + body
+ length = int.from_bytes(data[0:3], "big")
+ assert length == 257 # 1 (flag) + 256 (body)
+
+ def test_empty_body(self):
+ data = frame_message(Message(b"", terminal=False, compression=Compression.NONE))
+ msg = deframe_data(data)
+
+ assert msg.body == b""
+ assert msg.terminal is False
+
+ def test_message_too_short(self):
+ with pytest.raises(ValueError, match="Message too short"):
+ deframe_data(b"\x00\x00")
+
+
+class TestMaybeCompress:
+ def test_below_threshold(self):
+ body = b"small"
+ compressed, comp_code = maybe_compress(body, Compression.ZSTD)
+ assert compressed == body
+ assert comp_code == Compression.NONE
+
+ def test_above_threshold(self):
+ body = b"x" * 2048
+ compressed, comp_code = maybe_compress(body, Compression.ZSTD)
+ assert comp_code == Compression.ZSTD
+ assert len(compressed) < len(body)
+
+ def test_no_compression_requested(self):
+ body = b"x" * 2048
+ compressed, comp_code = maybe_compress(body, compression=Compression.NONE)
+ assert compressed == body
+ assert comp_code == Compression.NONE
diff --git a/tests/test_session.py b/tests/test_session.py
new file mode 100644
index 0000000..129e61e
--- /dev/null
+++ b/tests/test_session.py
@@ -0,0 +1,723 @@
+import asyncio
+from collections.abc import AsyncIterator
+from typing import cast
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+import s2_sdk._generated.s2.v1.s2_pb2 as pb
+from s2_sdk import (
+ AppendAck,
+ AppendInput,
+ AppendRetryPolicy,
+ AppendSession,
+ BatchSubmitTicket,
+ Compression,
+ Record,
+ Retry,
+ S2ClientError,
+ StreamPosition,
+)
+from s2_sdk._append_session import _AppendPermits, _Semaphore # noqa: PLC2701
+from s2_sdk._client import HttpClient
+from s2_sdk._s2s._append_session import run_append_session
+from s2_sdk._s2s._protocol import Message, deframe_data, frame_message
+
+_PATCH_TARGET = "s2_sdk._append_session.run_append_session"
+
+
+def _ack(start_seq: int = 0, end_seq: int = 1) -> AppendAck:
+ return AppendAck(
+ start=StreamPosition(seq_num=start_seq, timestamp=10),
+ end=StreamPosition(seq_num=end_seq, timestamp=10),
+ tail=StreamPosition(seq_num=end_seq, timestamp=10),
+ )
+
+
+def _input(n_records: int = 1, body: bytes = b"payload") -> AppendInput:
+ return AppendInput(records=[Record(body=body) for _ in range(n_records)])
+
+
+async def _fake_run(client, stream_name, inputs, retry, compression, ack_timeout=None):
+ seq = 0
+ async for inp in inputs:
+ n = len(inp.records)
+ yield _ack(start_seq=seq, end_seq=seq + n)
+ seq += n
+
+
+async def _slow_fake_run(
+ client, stream_name, inputs, retry, compression, ack_timeout=None
+):
+ seq = 0
+ async for inp in inputs:
+ await asyncio.sleep(0.01)
+ n = len(inp.records)
+ yield _ack(start_seq=seq, end_seq=seq + n)
+ seq += n
+
+
+async def _failing_run(
+ client, stream_name, inputs, retry, compression, ack_timeout=None
+):
+ async for _ in inputs:
+ raise RuntimeError("session failed")
+ yield # noqa: F401
+
+
+def _mock_client() -> MagicMock:
+ client = MagicMock()
+ client._request_timeout = 5.0
+ return client
+
+
+def _session(
+ max_unacked_bytes: int = 5 * 1024 * 1024,
+ max_unacked_batches: int | None = None,
+) -> AppendSession:
+ return AppendSession(
+ client=_mock_client(),
+ stream_name="test-stream",
+ retry=Retry(max_attempts=1),
+ compression=Compression.NONE,
+ max_unacked_bytes=max_unacked_bytes,
+ max_unacked_batches=max_unacked_batches,
+ )
+
+
+@pytest.mark.asyncio
+async def test_submit_and_await_ticket():
+ with patch(_PATCH_TARGET, side_effect=_fake_run):
+ session = _session()
+ ticket = await session.submit(_input())
+ ack = await ticket
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 1
+ await session.close()
+
+
+@pytest.mark.asyncio
+async def test_ticket_is_awaitable():
+ with patch(_PATCH_TARGET, side_effect=_fake_run):
+ session = _session()
+ ticket = await session.submit(_input())
+ assert isinstance(ticket, BatchSubmitTicket)
+ ack = await ticket
+ assert isinstance(ack, AppendAck)
+ await session.close()
+
+
+@pytest.mark.asyncio
+async def test_multiple_submits_ack_in_order():
+ with patch(_PATCH_TARGET, side_effect=_fake_run):
+ session = _session()
+ tickets = []
+ for _ in range(5):
+ ticket = await session.submit(_input(n_records=1))
+ tickets.append(ticket)
+
+ for i, ticket in enumerate(tickets):
+ ack = await ticket
+ assert ack.start.seq_num == i
+ assert ack.end.seq_num == i + 1
+
+ await session.close()
+
+
+@pytest.mark.asyncio
+async def test_backpressure_bytes_limit():
+ with patch(_PATCH_TARGET, side_effect=_slow_fake_run):
+ session = _session(max_unacked_bytes=100)
+ ticket1 = await session.submit(_input(n_records=1, body=b"x" * 50))
+ ticket2 = await session.submit(_input(n_records=1, body=b"x" * 50))
+
+ ack1 = await ticket1
+ ack2 = await ticket2
+ assert ack1.start.seq_num == 0
+ assert ack2.start.seq_num == 1
+ await session.close()
+
+
+@pytest.mark.asyncio
+async def test_backpressure_batch_limit():
+ with patch(_PATCH_TARGET, side_effect=_slow_fake_run):
+ session = _session(max_unacked_batches=2)
+ ticket1 = await session.submit(_input())
+ ticket2 = await session.submit(_input())
+ ticket3 = await session.submit(_input())
+
+ ack1 = await ticket1
+ ack2 = await ticket2
+ ack3 = await ticket3
+ assert ack1.end.seq_num == 1
+ assert ack2.end.seq_num == 2
+ assert ack3.end.seq_num == 3
+ await session.close()
+
+
+@pytest.mark.asyncio
+async def test_close_flushes_pending():
+ with patch(_PATCH_TARGET, side_effect=_fake_run):
+ session = _session()
+ ticket = await session.submit(_input())
+ await session.close()
+ ack = await ticket
+ assert ack.end.seq_num == 1
+
+
+@pytest.mark.asyncio
+async def test_close_is_idempotent():
+ with patch(_PATCH_TARGET, side_effect=_fake_run):
+ session = _session()
+ await session.close()
+ await session.close()
+
+
+@pytest.mark.asyncio
+async def test_error_propagation():
+ with patch(_PATCH_TARGET, side_effect=_failing_run):
+ session = _session()
+ ticket = await session.submit(_input())
+ with pytest.raises(RuntimeError, match="session failed"):
+ await ticket
+
+
+@pytest.mark.asyncio
+async def test_submit_after_close_raises():
+ with patch(_PATCH_TARGET, side_effect=_fake_run):
+ session = _session()
+ await session.close()
+ with pytest.raises(S2ClientError, match="closed"):
+ await session.submit(_input())
+
+
+@pytest.mark.asyncio
+async def test_submit_after_error_raises():
+ with patch(_PATCH_TARGET, side_effect=_failing_run):
+ session = _session()
+ ticket = await session.submit(_input())
+ with pytest.raises(RuntimeError, match="session failed"):
+ await ticket
+ with pytest.raises(RuntimeError, match="session failed"):
+ await session.submit(_input())
+
+
+@pytest.mark.asyncio
+async def test_context_manager():
+ with patch(_PATCH_TARGET, side_effect=_fake_run):
+ async with _session() as session:
+ ticket = await session.submit(_input())
+ ack = await ticket
+ assert ack.end.seq_num == 1
+
+
+@pytest.mark.asyncio
+async def test_close_rejects_submit_blocked_on_backpressure():
+ with patch(_PATCH_TARGET, side_effect=_slow_fake_run):
+ session = _session(max_unacked_batches=1)
+ ticket = await session.submit(_input())
+ blocked_submit = asyncio.create_task(session.submit(_input()))
+ await asyncio.sleep(0.001)
+ await session.close()
+ await ticket
+ with pytest.raises(S2ClientError, match="closed"):
+ await blocked_submit
+
+
+# ── S2S framing tests ──
+
+
+class _StreamResponse:
+ def __init__(
+ self,
+ messages: list[bytes],
+ content: AsyncIterator[bytes] | None,
+ frame_signal=None,
+ ) -> None:
+ self.status_code = 200
+ self._messages = messages
+ self._content = content
+ self._frame_signal = frame_signal
+ self.sent_messages: list[bytes] = []
+
+ async def __aenter__(self):
+ if self._content is not None:
+ async for chunk in self._content:
+ self.sent_messages.append(chunk)
+ if self._frame_signal is not None:
+ self._frame_signal.signal()
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb) -> bool:
+ return False
+
+ async def aread(self) -> bytes:
+ return b""
+
+ async def aiter_bytes(self):
+ for msg in self._messages:
+ yield msg
+
+
+class _StreamingClient:
+ def __init__(self, response_messages: list[bytes]) -> None:
+ self.calls: list[tuple[str, str]] = []
+ self.responses: list[_StreamResponse] = []
+ self._response_messages = response_messages
+
+ def streaming_request(self, method: str, path: str, **kwargs) -> _StreamResponse:
+ self.calls.append((method, path))
+ response = _StreamResponse(
+ self._response_messages,
+ kwargs.get("content"),
+ kwargs.get("frame_signal"),
+ )
+ self.responses.append(response)
+ return response
+
+
+def _ack_message() -> bytes:
+ ack = pb.AppendAck(
+ start=pb.StreamPosition(seq_num=0, timestamp=10),
+ end=pb.StreamPosition(seq_num=1, timestamp=10),
+ tail=pb.StreamPosition(seq_num=1, timestamp=10),
+ )
+ return frame_message(
+ Message(ack.SerializeToString(), terminal=False, compression=Compression.NONE)
+ )
+
+
+def _terminal_message(status_code: int, body: bytes) -> bytes:
+ return frame_message(
+ Message(
+ status_code.to_bytes(2, "big") + body,
+ terminal=True,
+ compression=Compression.NONE,
+ )
+ )
+
+
+async def _s2s_inputs() -> AsyncIterator[AppendInput]:
+ yield AppendInput(records=[Record(body=b"payload")])
+
+
+def _decode_append_input(data: bytes) -> pb.AppendInput:
+ body, terminal, _ = deframe_data(data)
+ assert terminal is False
+ message = pb.AppendInput()
+ message.ParseFromString(body)
+ return message
+
+
+@pytest.mark.asyncio
+async def test_retrying_append_session_encodes_messages():
+ client = _StreamingClient([_ack_message()])
+
+ outputs = []
+ async for output in run_append_session(
+ cast("HttpClient", client),
+ "orders/us-east",
+ _s2s_inputs(),
+ retry=Retry(max_attempts=1),
+ compression=Compression.NONE,
+ ):
+ outputs.append(output)
+
+ assert len(outputs) == 1
+ assert client.calls == [("POST", "/v1/streams/orders%2Fus-east/records")]
+ assert len(client.responses[0].sent_messages) == 1
+ sent = _decode_append_input(client.responses[0].sent_messages[0])
+ assert sent.records[0].body == b"payload"
+
+
+class _PartialFailureStreamingClient:
+ def __init__(self) -> None:
+ self.calls = 0
+
+ def streaming_request(self, method: str, path: str, **kwargs) -> _StreamResponse:
+ self.calls += 1
+ if self.calls == 1:
+ return _StreamResponse(
+ [
+ _ack_message(),
+ _terminal_message(500, b'{"code":"internal","message":"boom"}'),
+ ],
+ kwargs.get("content"),
+ kwargs.get("frame_signal"),
+ )
+ return _StreamResponse([], kwargs.get("content"), kwargs.get("frame_signal"))
+
+
+async def _two_s2s_inputs() -> AsyncIterator[AppendInput]:
+ yield AppendInput(records=[Record(body=b"payload-0")])
+ yield AppendInput(records=[Record(body=b"payload-1")])
+
+
+@pytest.mark.asyncio
+async def test_retrying_append_session_no_side_effects_stops_after_partial_ack():
+ client = _PartialFailureStreamingClient()
+ outputs = []
+
+ with pytest.raises(BaseExceptionGroup) as exc_info:
+ async for output in run_append_session(
+ cast("HttpClient", client),
+ "orders/us-east",
+ _two_s2s_inputs(),
+ retry=Retry(
+ max_attempts=1,
+ append_retry_policy=AppendRetryPolicy.NO_SIDE_EFFECTS,
+ ),
+ compression=Compression.NONE,
+ ):
+ outputs.append(output)
+
+ assert "boom" in str(exc_info.value.exceptions[0])
+ assert len(outputs) == 1
+ assert client.calls == 1
+
+
+# ── Permits / Semaphore tests ──
+
+
+@pytest.mark.asyncio
+async def test_permits_acquire_release_basic():
+ permits = _AppendPermits(max_bytes=100)
+ await permits.acquire(50)
+ await permits.acquire(50)
+ permits.release(50)
+ permits.release(50)
+
+
+@pytest.mark.asyncio
+async def test_permits_blocks_when_byte_limit_exceeded():
+ permits = _AppendPermits(max_bytes=100)
+ await permits.acquire(60)
+
+ acquired = asyncio.Event()
+
+ async def _acquire():
+ await permits.acquire(60)
+ acquired.set()
+
+ task = asyncio.create_task(_acquire())
+ await asyncio.sleep(0.01)
+ assert not acquired.is_set()
+
+ permits.release(60)
+ await asyncio.wait_for(task, timeout=1.0)
+ assert acquired.is_set()
+
+
+@pytest.mark.asyncio
+async def test_permits_blocks_on_count_limit():
+ permits = _AppendPermits(max_bytes=10_000, max_count=2)
+ await permits.acquire(10)
+ await permits.acquire(10)
+
+ acquired = asyncio.Event()
+
+ async def _acquire():
+ await permits.acquire(10)
+ acquired.set()
+
+ task = asyncio.create_task(_acquire())
+ await asyncio.sleep(0.01)
+ assert not acquired.is_set()
+
+ permits.release(10)
+ await asyncio.wait_for(task, timeout=1.0)
+ assert acquired.is_set()
+
+
+@pytest.mark.asyncio
+async def test_permits_release_unblocks_waiter():
+ permits = _AppendPermits(max_bytes=100)
+ await permits.acquire(100)
+
+ result = []
+
+ async def _waiter():
+ await permits.acquire(50)
+ result.append("acquired")
+
+ task = asyncio.create_task(_waiter())
+ await asyncio.sleep(0.01)
+ assert result == []
+
+ permits.release(100)
+ await asyncio.wait_for(task, timeout=1.0)
+ assert result == ["acquired"]
+
+
+@pytest.mark.asyncio
+async def test_semaphore_acquire_release():
+ sem = _Semaphore(10)
+ await sem.acquire(5)
+ await sem.acquire(5)
+ sem.release(10)
+
+
+@pytest.mark.asyncio
+async def test_semaphore_blocks_on_insufficient_permits():
+ sem = _Semaphore(10)
+ await sem.acquire(8)
+
+ acquired = asyncio.Event()
+
+ async def _acquire():
+ await sem.acquire(5)
+ acquired.set()
+
+ task = asyncio.create_task(_acquire())
+ await asyncio.sleep(0.01)
+ assert not acquired.is_set()
+
+ sem.release(8)
+ await asyncio.wait_for(task, timeout=1.0)
+ assert acquired.is_set()
+
+
+# ── Ack monotonicity tests ──
+
+
+def _ack_message_range(start_seq: int, end_seq: int) -> bytes:
+ ack = pb.AppendAck(
+ start=pb.StreamPosition(seq_num=start_seq, timestamp=10),
+ end=pb.StreamPosition(seq_num=end_seq, timestamp=10),
+ tail=pb.StreamPosition(seq_num=end_seq, timestamp=10),
+ )
+ return frame_message(
+ Message(ack.SerializeToString(), terminal=False, compression=Compression.NONE)
+ )
+
+
+@pytest.mark.asyncio
+async def test_ack_monotonicity_rejects_non_increasing():
+ """Acks with non-increasing end seq_num should raise S2ClientError."""
+ messages = [
+ _ack_message_range(0, 2),
+ _ack_message_range(1, 2), # end == previous end — violation
+ ]
+ client = _StreamingClient(messages)
+
+ async def _inputs():
+ yield AppendInput(records=[Record(body=b"a"), Record(body=b"b")])
+ yield AppendInput(records=[Record(body=b"c")])
+
+ with pytest.raises(BaseExceptionGroup) as exc_info:
+ async for _ in run_append_session(
+ cast("HttpClient", client),
+ "test-stream",
+ _inputs(),
+ retry=Retry(max_attempts=1),
+ compression=Compression.NONE,
+ ):
+ pass
+
+ assert "not monotonically increasing" in str(exc_info.value.exceptions[0])
+
+
+@pytest.mark.asyncio
+async def test_ack_rejects_end_less_than_start():
+ """An ack where end < start should raise S2ClientError."""
+ # Craft a bad ack: end.seq_num < start.seq_num
+ ack = pb.AppendAck(
+ start=pb.StreamPosition(seq_num=5, timestamp=10),
+ end=pb.StreamPosition(seq_num=3, timestamp=10),
+ tail=pb.StreamPosition(seq_num=5, timestamp=10),
+ )
+ bad_msg = frame_message(
+ Message(ack.SerializeToString(), terminal=False, compression=Compression.NONE)
+ )
+ client = _StreamingClient([bad_msg])
+
+ async def _inputs():
+ yield AppendInput(records=[Record(body=b"a")])
+
+ with pytest.raises(BaseExceptionGroup) as exc_info:
+ async for _ in run_append_session(
+ cast("HttpClient", client),
+ "test-stream",
+ _inputs(),
+ retry=Retry(max_attempts=1),
+ compression=Compression.NONE,
+ ):
+ pass
+
+ assert "end < start" in str(exc_info.value.exceptions[0])
+
+
+# ── Ack timeout test ──
+
+
+@pytest.mark.asyncio
+async def test_ack_timeout_raises_read_timeout_error():
+ """If the server stops sending acks, ReadTimeoutError should be raised."""
+ from s2_sdk._exceptions import ReadTimeoutError as RTE
+
+ class _HangingStreamResponse:
+ def __init__(self, content, frame_signal=None):
+ self.status_code = 200
+ self._content = content
+ self._frame_signal = frame_signal
+
+ async def __aenter__(self):
+ if self._content is not None:
+ async for chunk in self._content:
+ if self._frame_signal is not None:
+ self._frame_signal.signal()
+ return self
+
+ async def __aexit__(self, *args):
+ return False
+
+ async def aread(self):
+ return b""
+
+ async def aiter_bytes(self):
+ # Never yield any data — simulates server hang
+ await asyncio.sleep(100)
+ yield b"" # pragma: no cover
+
+ class _HangingClient:
+ def streaming_request(self, method, path, **kwargs):
+ return _HangingStreamResponse(
+ kwargs.get("content"), kwargs.get("frame_signal")
+ )
+
+ async def _inputs():
+ yield AppendInput(records=[Record(body=b"data")])
+
+ with pytest.raises(BaseExceptionGroup) as exc_info:
+ async for _ in run_append_session(
+ cast("HttpClient", _HangingClient()),
+ "test-stream",
+ _inputs(),
+ retry=Retry(max_attempts=1),
+ compression=Compression.NONE,
+ ack_timeout=0.01,
+ ):
+ pass
+
+ assert isinstance(exc_info.value.exceptions[0], RTE)
+
+
+# ── Frame signal reset after resend test ──
+
+
+@pytest.mark.asyncio
+async def test_frame_signal_reset_after_resend():
+ """Frame signal should be reset after resend acks are consumed in _run_attempt."""
+ from collections import deque
+
+ from s2_sdk._frame_signal import FrameSignal
+ from s2_sdk._retrier import Attempt
+ from s2_sdk._s2s._append_session import _InflightInput, _run_attempt
+
+ # One resend batch + one fresh batch, each with 1 record.
+ resend_encoded = b"resend-data"
+ pending_resend = (_InflightInput(num_records=1, encoded=resend_encoded),)
+ inflight_inputs: deque[_InflightInput] = deque(pending_resend)
+
+ # Server acks: first for resend (0→1), then for fresh (1→2).
+ messages = [
+ _ack_message_range(0, 1),
+ _ack_message_range(1, 2),
+ ]
+
+ input_queue: asyncio.Queue[AppendInput | None] = asyncio.Queue()
+ await input_queue.put(AppendInput(records=[Record(body=b"fresh")]))
+ await input_queue.put(None)
+
+ ack_queue: asyncio.Queue[AppendAck | None] = asyncio.Queue()
+
+ frame_signal = FrameSignal()
+ # Simulate that the signal was set during the previous attempt's body send.
+ frame_signal.signal()
+
+ # Build a client whose streaming_request yields the canned acks.
+ client = _StreamingClient(messages)
+
+ await _run_attempt(
+ cast("HttpClient", client),
+ "test-stream",
+ Attempt(1),
+ inflight_inputs,
+ input_queue,
+ ack_queue,
+ pending_resend,
+ Compression.NONE,
+ frame_signal,
+ )
+
+ # Frame signal should have been reset after consuming resend ack.
+ # The fresh batch ack then signals it again via _StreamResponse, but
+ # the important thing is it WAS reset after resend phase.
+ ack1 = ack_queue.get_nowait()
+ ack2 = ack_queue.get_nowait()
+ assert ack1 is not None and ack1.start.seq_num == 0
+ assert ack2 is not None and ack2.start.seq_num == 1
+
+
+# ── Terminal 416 in S2S protocol ──
+
+
+@pytest.mark.asyncio
+async def test_terminal_416_raises_read_unwritten():
+ """A terminal S2S message with status 416 should raise ReadUnwrittenError."""
+ import json
+
+ from s2_sdk._exceptions import ReadUnwrittenError
+ from s2_sdk._s2s._protocol import read_messages
+
+ error_body = json.dumps(
+ {"code": "read_unwritten", "tail": {"seq_num": 42, "timestamp": 100}}
+ ).encode()
+ terminal_frame = frame_message(
+ Message(
+ (416).to_bytes(2, "big") + error_body,
+ terminal=True,
+ compression=Compression.NONE,
+ )
+ )
+
+ async def _byte_stream():
+ yield terminal_frame
+
+ with pytest.raises(ReadUnwrittenError) as exc_info:
+ async for _ in read_messages(_byte_stream()):
+ pass
+
+ assert exc_info.value.status_code == 416
+ assert exc_info.value.tail.seq_num == 42
+ assert exc_info.value.tail.timestamp == 100
+
+
+# ── ConnectError no-side-effects narrowing ──
+
+
+def test_connect_error_with_connection_refused_is_no_side_effects():
+ from s2_sdk._exceptions import ConnectError
+ from s2_sdk._retrier import has_no_side_effects
+
+ inner = ConnectionRefusedError("refused")
+ err = ConnectError("connect failed")
+ err.__cause__ = inner
+ assert has_no_side_effects(err) is True
+
+
+def test_connect_error_without_connection_refused_has_side_effects():
+ from s2_sdk._exceptions import ConnectError
+ from s2_sdk._retrier import has_no_side_effects
+
+ err = ConnectError("DNS failure")
+ err.__cause__ = OSError("name resolution failed")
+ assert has_no_side_effects(err) is False
+
+
+def test_connect_error_no_cause_has_side_effects():
+ from s2_sdk._exceptions import ConnectError
+ from s2_sdk._retrier import has_no_side_effects
+
+ err = ConnectError("generic")
+ assert has_no_side_effects(err) is False
diff --git a/tests/test_stream_ops.py b/tests/test_stream_ops.py
index a603df7..ca362f0 100644
--- a/tests/test_stream_ops.py
+++ b/tests/test_stream_ops.py
@@ -1,71 +1,151 @@
import asyncio
import time
-from typing import AsyncIterable
+import uuid
+from datetime import timedelta
import pytest
-from streamstore import Stream
-from streamstore.schemas import (
+from s2_sdk import (
+ S2,
AppendInput,
+ BasinConfig,
+ Batching,
+ CommandRecord,
+ Compression,
+ Endpoints,
+ FencingTokenMismatchError,
ReadLimit,
+ ReadUnwrittenError,
Record,
+ S2Basin,
+ S2ServerError,
+ S2Stream,
SeqNum,
- Tail,
+ SeqNumMismatchError,
+ StorageClass,
+ StreamConfig,
TailOffset,
Timestamp,
+ Timestamping,
+ TimestampingMode,
+ metered_bytes,
)
-from streamstore.utils import metered_bytes
+
+
+def now_ms() -> int:
+ return int(time.time() * 1000)
@pytest.mark.stream
class TestStreamOperations:
- async def test_check_tail_empty_stream(self, stream: Stream):
+ async def test_check_tail_empty_stream(self, stream: S2Stream):
+ tail = await stream.check_tail()
+
+ assert tail.seq_num == 0
+ assert tail.timestamp == 0
+
+ async def test_check_tail_after_multiple_appends(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"a")]))
+ await stream.append(AppendInput(records=[Record(body=b"b"), Record(body=b"c")]))
tail = await stream.check_tail()
+ assert tail.seq_num == 3
- assert tail.next_seq_num == 0
- assert tail.last_timestamp == 0
+ async def test_check_tail_nonexistent_stream_errors(self, shared_basin: S2Basin):
+ nonexistent = shared_basin.stream("nonexistent-stream-xyz")
+ with pytest.raises(S2ServerError) as exc_info:
+ await nonexistent.check_tail()
+ assert exc_info.value.code == "stream_not_found"
- async def test_append_single_record(self, stream: Stream):
- input = AppendInput(records=[Record(body=b"record-0")])
- output = await stream.append(input)
+ async def test_append_single_record(self, stream: S2Stream):
+ ack = await stream.append(AppendInput(records=[Record(body=b"record-0")]))
- assert output.start_seq_num == 0
- assert output.end_seq_num == 1
- assert output.next_seq_num == 1
- assert output.start_timestamp > 0
- assert output.end_timestamp > 0
- assert output.last_timestamp > 0
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 1
+ assert ack.tail.seq_num == 1
- async def test_append_multiple_records(self, stream: Stream):
+ async def test_append_multiple_records(self, stream: S2Stream):
input = AppendInput(
records=[
- Record(body=f"record-{i}".encode(), headers=[(b"key", b"value")])
+ Record(body=f"record-{i}".encode(), headers=[(b"k1", b"v1")])
for i in range(3)
]
)
- output = await stream.append(input)
+ ack = await stream.append(input)
+
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 3
+ assert ack.tail.seq_num == 3
+
+ async def test_append_empty_body(self, stream: S2Stream):
+ ack = await stream.append(AppendInput(records=[Record(body=b"")]))
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 1
+
+ async def test_append_with_headers(self, stream: S2Stream):
+ headers = [(b"k1", b"v1"), (b"k2", b"v2")]
+ await stream.append(
+ AppendInput(records=[Record(body=b"data", headers=headers)])
+ )
- assert output.start_seq_num == 0
- assert output.end_seq_num == 3
- assert output.next_seq_num == 3
+ batch = await stream.read(start=SeqNum(0))
+ assert len(batch.records) == 1
+ assert batch.records[0].headers == headers
+
+ async def test_append_with_empty_header_value(self, stream: S2Stream):
+ headers = [(b"k1", b""), (b"k2", b"")]
+ await stream.append(
+ AppendInput(records=[Record(body=b"lorem", headers=headers)])
+ )
- async def test_append_with_match_seq_num(self, stream: Stream):
+ batch = await stream.read(start=SeqNum(0))
+ assert len(batch.records) == 1
+ assert batch.records[0].headers == headers
+
+ async def test_append_mixed_records_with_and_without_headers(
+ self, stream: S2Stream
+ ):
+ records_in = [
+ Record(body=b"lorem", headers=[(b"k1", b"v1")]),
+ Record(body=b"ipsum", headers=[(b"k2", b""), (b"k3", b"v3")]),
+ Record(body=b"dolor"),
+ ]
+ ack = await stream.append(AppendInput(records=records_in))
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 3
+
+ batch = await stream.read(start=SeqNum(0))
+ assert len(batch.records) == 3
+ assert len(batch.records[0].headers) == 1
+ assert len(batch.records[1].headers) == 2
+ assert len(batch.records[2].headers) == 0
+
+ async def test_append_with_match_seq_num(self, stream: S2Stream):
input_0 = AppendInput(records=[Record(body=b"record-0")])
- output_0 = await stream.append(input_0)
+ ack_0 = await stream.append(input_0)
input_1 = AppendInput(
- records=[Record(body=b"record-1")], match_seq_num=output_0.next_seq_num
+ records=[Record(body=b"record-1")],
+ match_seq_num=ack_0.tail.seq_num,
)
- output_1 = await stream.append(input_1)
+ ack_1 = await stream.append(input_1)
+
+ assert ack_1.start.seq_num == 1
+ assert ack_1.end.seq_num == 2
+ assert ack_1.tail.seq_num == 2
- assert output_1.start_seq_num == 1
- assert output_1.end_seq_num == 2
- assert output_1.next_seq_num == 2
+ async def test_append_mismatched_seq_num_errors(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"first")]))
+
+ with pytest.raises(SeqNumMismatchError) as exc_info:
+ await stream.append(
+ AppendInput(records=[Record(body=b"wrong")], match_seq_num=999)
+ )
+ assert exc_info.value.expected_seq_num == 1
- async def test_append_with_timestamp(self, stream: Stream):
- timestamp_0 = int(time.time())
+ async def test_append_with_timestamp(self, stream: S2Stream):
+ timestamp_0 = now_ms()
await asyncio.sleep(0.1)
- timestamp_1 = int(time.time())
+ timestamp_1 = now_ms()
input = AppendInput(
records=[
@@ -73,71 +153,198 @@ async def test_append_with_timestamp(self, stream: Stream):
Record(body=b"record-1", timestamp=timestamp_1),
]
)
- output = await stream.append(input)
+ ack = await stream.append(input)
+
+ assert ack.start.seq_num == 0
+ assert ack.start.timestamp == timestamp_0
+ assert ack.end.seq_num == 2
+ assert ack.end.timestamp == timestamp_1
+ assert ack.tail.seq_num == 2
+ assert ack.tail.timestamp == timestamp_1
+
+ async def test_append_with_past_timestamp_adjusts_monotonic(self, stream: S2Stream):
+ base = now_ms() - 10_000
+ first_timestamp = base + 10
+ past_timestamp = base
+
+ ack_1 = await stream.append(
+ AppendInput(records=[Record(body=b"first", timestamp=first_timestamp)])
+ )
+ ack_2 = await stream.append(
+ AppendInput(records=[Record(body=b"second", timestamp=past_timestamp)])
+ )
+ assert ack_2.start.timestamp >= ack_1.end.timestamp
+
+ async def test_append_without_timestamp_client_require_errors(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(
+ timestamping=Timestamping(mode=TimestampingMode.CLIENT_REQUIRE)
+ )
+ await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ stream = shared_basin.stream(stream_name)
+ with pytest.raises(S2ServerError) as exc_info:
+ await stream.append(AppendInput(records=[Record(body=b"lorem")]))
+ assert exc_info.value.code == "invalid"
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_append_with_future_timestamp_uncapped_false_caps(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(timestamping=Timestamping(uncapped=False))
+ await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ stream = shared_basin.stream(stream_name)
+ now = now_ms()
+ future = now + 3_600_000
+ ack = await stream.append(
+ AppendInput(records=[Record(body=b"lorem", timestamp=future)])
+ )
+ assert ack.start.timestamp < future
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_append_with_future_timestamp_uncapped_true_preserves(
+ self, shared_basin: S2Basin, stream_name: str
+ ):
+ config = StreamConfig(timestamping=Timestamping(uncapped=True))
+ await shared_basin.create_stream(name=stream_name, config=config)
+ try:
+ stream = shared_basin.stream(stream_name)
+ now = now_ms()
+ future = now + 3_600_000
+ ack = await stream.append(
+ AppendInput(records=[Record(body=b"lorem", timestamp=future)])
+ )
+ assert ack.start.timestamp == future
+ finally:
+ await shared_basin.delete_stream(stream_name)
+
+ async def test_append_matching_fencing_token(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[CommandRecord.fence("my-token")]))
+
+ ack = await stream.append(
+ AppendInput(records=[Record(body=b"data")], fencing_token="my-token")
+ )
+ assert ack.end.seq_num > 0
+
+ async def test_append_mismatched_fencing_token_errors(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[CommandRecord.fence("correct-token")]))
+
+ with pytest.raises(FencingTokenMismatchError) as exc_info:
+ await stream.append(
+ AppendInput(records=[Record(body=b"data")], fencing_token="wrong-token")
+ )
+ assert exc_info.value.expected_fencing_token == "correct-token"
+
+ async def test_fence_set_and_clear(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[CommandRecord.fence("fence-1")]))
+
+ await stream.append(
+ AppendInput(records=[Record(body=b"data")], fencing_token="fence-1")
+ )
+
+ await stream.append(
+ AppendInput(records=[CommandRecord.fence("")], fencing_token="fence-1")
+ )
- assert output.start_seq_num == 0
- assert output.start_timestamp == timestamp_0
- assert output.end_seq_num == 2
- assert output.end_timestamp == timestamp_1
- assert output.next_seq_num == 2
- assert output.last_timestamp == timestamp_1
+ ack = await stream.append(AppendInput(records=[Record(body=b"after-clear")]))
+ assert ack.end.seq_num > 0
- async def test_read_from_seq_num_zero(self, stream: Stream):
+ async def test_trim_command_is_accepted(self, stream: S2Stream):
+ await stream.append(
+ AppendInput(records=[Record(body=f"r{i}".encode()) for i in range(3)])
+ )
+
+ ack = await stream.append(AppendInput(records=[CommandRecord.trim(2)]))
+ assert ack.start.seq_num == 3
+ assert ack.end.seq_num == 4
+
+ async def test_trim_to_future_seq_num_noop(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"record-1")]))
+ ack = await stream.append(AppendInput(records=[CommandRecord.trim(999_999)]))
+ assert ack.start.seq_num == 1
+ assert ack.end.seq_num == 2
+
+ async def test_append_max_batch_size(self, stream: S2Stream):
+ records = [Record(body=b"a") for _ in range(1000)]
+ ack = await stream.append(AppendInput(records=records))
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 1000
+
+ async def test_append_invalid_command_header_errors(self, stream: S2Stream):
+ record = Record(body=b"lorem", headers=[(b"", b"not-a-command")])
+ with pytest.raises(S2ServerError) as exc_info:
+ await stream.append(AppendInput(records=[record]))
+ assert exc_info.value.code == "invalid"
+
+ async def test_append_invalid_command_header_with_extra_headers_errors(
+ self, stream: S2Stream
+ ):
+ record = Record(body=b"lorem", headers=[(b"", b"fence"), (b"extra", b"value")])
+ with pytest.raises(S2ServerError) as exc_info:
+ await stream.append(AppendInput(records=[record]))
+ assert exc_info.value.code == "invalid"
+
+ async def test_append_nonexistent_stream_errors(self, shared_basin: S2Basin):
+ nonexistent = shared_basin.stream("nonexistent-stream-xyz")
+ with pytest.raises(S2ServerError) as exc_info:
+ await nonexistent.append(AppendInput(records=[Record(body=b"data")]))
+ assert exc_info.value.code == "stream_not_found"
+
+ async def test_read_from_seq_num_zero(self, stream: S2Stream):
await stream.append(
AppendInput(records=[Record(body=f"record-{i}".encode()) for i in range(3)])
)
- records = await stream.read(start=SeqNum(0))
+ batch = await stream.read(start=SeqNum(0))
- assert isinstance(records, list)
- assert len(records) == 3
+ assert len(batch.records) == 3
- for i, record in enumerate(records):
+ for i, record in enumerate(batch.records):
assert record.seq_num == i
assert record.body == f"record-{i}".encode()
- async def test_read_with_limit(self, stream: Stream):
+ async def test_read_with_limit(self, stream: S2Stream):
await stream.append(
AppendInput(records=[Record(body=f"record-{i}".encode()) for i in range(5)])
)
- records = await stream.read(start=SeqNum(0), limit=ReadLimit(count=2))
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(count=2))
- assert isinstance(records, list)
- assert len(records) == 2
+ assert len(batch.records) == 2
- records = await stream.read(start=SeqNum(0), limit=ReadLimit(bytes=20))
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(bytes=20))
- assert isinstance(records, list)
- total_bytes = sum(metered_bytes([r]) for r in records)
+ total_bytes = sum(metered_bytes([r]) for r in batch.records)
assert total_bytes <= 20
- async def test_read_from_timestamp(self, stream: Stream):
- output = await stream.append(AppendInput(records=[Record(body=b"record-0")]))
+ async def test_read_from_timestamp(self, stream: S2Stream):
+ ack = await stream.append(AppendInput(records=[Record(body=b"record-0")]))
- records = await stream.read(start=Timestamp(output.start_timestamp))
+ batch = await stream.read(start=Timestamp(ack.start.timestamp))
- assert isinstance(records, list)
- assert len(records) == 1
+ assert len(batch.records) == 1
- async def test_read_from_tail_offset(self, stream: Stream):
+ async def test_read_from_tail_offset(self, stream: S2Stream):
await stream.append(
AppendInput(records=[Record(body=f"record-{i}".encode()) for i in range(5)])
)
- records = await stream.read(start=TailOffset(2))
+ batch = await stream.read(start=TailOffset(2))
- assert isinstance(records, list)
- assert len(records) == 2
- assert records[0].body == b"record-3"
- assert records[1].body == b"record-4"
+ assert len(batch.records) == 2
+ assert batch.records[0].body == b"record-3"
+ assert batch.records[1].body == b"record-4"
- async def test_read_until_timestamp(self, stream: Stream):
- timestamp_0 = int(time.time() * 1000)
+ async def test_read_until_timestamp(self, stream: S2Stream):
+ timestamp_0 = now_ms()
await asyncio.sleep(0.2)
- timestamp_1 = int(time.time() * 1000)
+ timestamp_1 = now_ms()
await asyncio.sleep(0.2)
- timestamp_2 = int(time.time() * 1000)
+ timestamp_2 = now_ms()
await stream.append(
AppendInput(
@@ -149,72 +356,491 @@ async def test_read_until_timestamp(self, stream: Stream):
)
)
- records = await stream.read(start=Timestamp(timestamp_0), until=timestamp_2)
- assert isinstance(records, list)
- assert len(records) == 2
- assert records[0].timestamp == timestamp_0
- assert records[1].timestamp == timestamp_1
+ batch = await stream.read(
+ start=Timestamp(timestamp_0), until_timestamp=timestamp_2
+ )
+ assert len(batch.records) == 2
+ assert batch.records[0].timestamp == timestamp_0
+ assert batch.records[1].timestamp == timestamp_1
- async def test_read_beyond_tail(self, stream: Stream):
+ async def test_read_unbounded(self, stream: S2Stream):
+ await stream.append(
+ AppendInput(records=[Record(body=f"r{i}".encode()) for i in range(5)])
+ )
+ batch = await stream.read(start=SeqNum(0))
+ assert len(batch.records) == 5
+
+ async def test_read_count_limit_partial(self, stream: S2Stream):
+ await stream.append(
+ AppendInput(records=[Record(body=f"r{i}".encode()) for i in range(5)])
+ )
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(count=3))
+ assert len(batch.records) == 3
+
+ async def test_read_count_limit_exact(self, stream: S2Stream):
+ await stream.append(
+ AppendInput(records=[Record(body=f"r{i}".encode()) for i in range(3)])
+ )
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(count=3))
+ assert len(batch.records) == 3
+
+ async def test_read_count_limit_exceeds(self, stream: S2Stream):
+ await stream.append(
+ AppendInput(records=[Record(body=f"r{i}".encode()) for i in range(3)])
+ )
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(count=100))
+ assert len(batch.records) == 3
+
+ async def test_read_count_zero_returns_empty(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"data")]))
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(count=0))
+ assert batch.records == []
+
+ async def test_read_bytes_limit_partial(self, stream: S2Stream):
+ await stream.append(
+ AppendInput(records=[Record(body=b"x" * 100) for _ in range(5)])
+ )
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(bytes=200))
+ total = sum(metered_bytes([r]) for r in batch.records)
+ assert total <= 200
+
+ async def test_read_bytes_limit_exact(self, stream: S2Stream):
+ record = Record(body=b"x" * 100)
+ await stream.append(AppendInput(records=[record]))
+ batch = await stream.read(
+ start=SeqNum(0), limit=ReadLimit(bytes=metered_bytes([record]))
+ )
+ assert len(batch.records) == 1
+
+ async def test_read_bytes_limit_exceeds(self, stream: S2Stream):
+ await stream.append(
+ AppendInput(records=[Record(body=b"x" * 10) for _ in range(3)])
+ )
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(bytes=100000))
+ assert len(batch.records) == 3
+
+ async def test_read_bytes_zero_returns_empty(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"data")]))
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(bytes=0))
+ assert batch.records == []
+
+ async def test_read_count_over_max_clamps(self, stream: S2Stream):
+ for batch_start in range(0, 1000, 100):
+ records = [
+ Record(body=f"r{i}".encode())
+ for i in range(batch_start, batch_start + 100)
+ ]
+ await stream.append(AppendInput(records=records))
+ await stream.append(
+ AppendInput(records=[Record(body=f"tail-{i}".encode()) for i in range(5)])
+ )
+
+ batch = await stream.read(start=SeqNum(0), limit=ReadLimit(count=2000))
+ assert len(batch.records) == 1000
+
+ async def test_read_bytes_over_max_clamps(self, stream: S2Stream):
+ body = b"a" * 700_000
+ await stream.append(AppendInput(records=[Record(body=body)]))
+ await stream.append(AppendInput(records=[Record(body=body)]))
+
+ batch = await stream.read(
+ start=SeqNum(0), limit=ReadLimit(bytes=2 * 1024 * 1024)
+ )
+ total = sum(metered_bytes([r]) for r in batch.records)
+ assert total <= 1024 * 1024
+ assert len(batch.records) == 1
+
+ async def test_read_from_seq_num_with_bytes_limit(self, stream: S2Stream):
+ r1 = Record(body=b"ipsum")
+ r2 = Record(body=b"dolor")
+ ack = await stream.append(
+ AppendInput(records=[Record(body=b"lorem"), r1, r2, Record(body=b"sit")])
+ )
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 4
+
+ bytes_limit = metered_bytes([r1]) + metered_bytes([r2])
+ batch = await stream.read(start=SeqNum(1), limit=ReadLimit(bytes=bytes_limit))
+ assert len(batch.records) == 2
+ assert batch.records[0].seq_num == 1
+ assert batch.records[0].body == b"ipsum"
+ assert batch.records[1].seq_num == 2
+ assert batch.records[1].body == b"dolor"
+
+ async def test_read_from_timestamp_with_count_limit(self, stream: S2Stream):
+ base = now_ms() - 10_000
+ await stream.append(
+ AppendInput(
+ records=[
+ Record(body=b"lorem", timestamp=base),
+ Record(body=b"ipsum", timestamp=base + 1),
+ Record(body=b"dolor", timestamp=base + 2),
+ Record(body=b"sit", timestamp=base + 3),
+ ]
+ )
+ )
+
+ batch = await stream.read(start=Timestamp(base + 2), limit=ReadLimit(count=1))
+ assert len(batch.records) == 1
+ assert batch.records[0].seq_num == 2
+ assert batch.records[0].timestamp == base + 2
+
+ async def test_read_from_timestamp_with_bytes_limit(self, stream: S2Stream):
+ base = now_ms() - 10_000
+ r1 = Record(body=b"ipsum", timestamp=base + 1)
+ await stream.append(
+ AppendInput(
+ records=[
+ Record(body=b"lorem", timestamp=base),
+ r1,
+ Record(body=b"dolor", timestamp=base + 2),
+ Record(body=b"sit", timestamp=base + 3),
+ ]
+ )
+ )
+
+ bytes_limit = metered_bytes([r1]) + 5
+ batch = await stream.read(
+ start=Timestamp(base + 1), limit=ReadLimit(bytes=bytes_limit)
+ )
+ assert len(batch.records) == 1
+ assert batch.records[0].seq_num == 1
+ assert batch.records[0].timestamp == base + 1
+
+ async def test_read_from_timestamp_in_future_errors(self, stream: S2Stream):
+ base = now_ms() - 10_000
+ await stream.append(
+ AppendInput(
+ records=[
+ Record(body=b"lorem", timestamp=base),
+ Record(body=b"ipsum", timestamp=base + 1),
+ ]
+ )
+ )
+
+ with pytest.raises(ReadUnwrittenError) as exc_info:
+ await stream.read(start=Timestamp(base + 100))
+ assert exc_info.value.tail.seq_num == 2
+
+ async def test_read_beyond_tail(self, stream: S2Stream):
await stream.append(
AppendInput(records=[Record(body=f"record-{i}".encode()) for i in range(5)])
)
- tail = await stream.read(start=SeqNum(100))
+ with pytest.raises(ReadUnwrittenError) as exc_info:
+ await stream.read(start=SeqNum(100))
+
+ assert exc_info.value.tail.seq_num == 5
+
+ async def test_read_empty_stream_errors(self, stream: S2Stream):
+ with pytest.raises(ReadUnwrittenError) as exc_info:
+ await stream.read(start=SeqNum(0))
+ assert exc_info.value.tail.seq_num == 0
+ assert exc_info.value.tail.timestamp == 0
+
+ async def test_read_beyond_tail_with_clamp_to_tail_errors(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"lorem")]))
+
+ with pytest.raises(ReadUnwrittenError) as exc_info:
+ await stream.read(start=SeqNum(10), clamp_to_tail=True)
+ assert exc_info.value.tail.seq_num == 1
+
+ async def test_read_beyond_tail_with_clamp_to_tail_and_wait_returns_empty(
+ self, stream: S2Stream
+ ):
+ await stream.append(AppendInput(records=[Record(body=b"lorem")]))
+
+ batch = await stream.read(start=SeqNum(10), clamp_to_tail=True, wait=1)
+ assert batch.records == []
+
+ async def test_read_empty_stream_with_wait_returns_empty(self, stream: S2Stream):
+ batch = await stream.read(start=SeqNum(0), wait=1)
+ assert batch.records == []
+
+ async def test_read_start_timestamp_ge_until_errors(self, stream: S2Stream):
+ base = now_ms() - 10_000
+ await stream.append(
+ AppendInput(
+ records=[
+ Record(body=b"lorem", timestamp=base),
+ Record(body=b"ipsum", timestamp=base + 1),
+ Record(body=b"dolor", timestamp=base + 2),
+ ]
+ )
+ )
+
+ with pytest.raises(S2ServerError):
+ await stream.read(start=Timestamp(base + 2), until_timestamp=base + 2)
- assert isinstance(tail, Tail)
- assert tail.next_seq_num == 5
+ async def test_read_nonexistent_stream_errors(self, shared_basin: S2Basin):
+ nonexistent = shared_basin.stream("nonexistent-stream-xyz")
+ with pytest.raises(S2ServerError) as exc_info:
+ await nonexistent.read(start=SeqNum(0))
+ assert exc_info.value.code == "stream_not_found"
- async def test_append_session(self, stream: Stream):
- async def inputs_gen() -> AsyncIterable[AppendInput]:
+ async def test_append_session(self, stream: S2Stream):
+ acks = []
+ async with stream.append_session() as session:
for i in range(3):
records = [
Record(body=f"batch-{i}-record-{j}".encode()) for j in range(2)
]
- yield AppendInput(records=records)
+ ticket = await session.submit(AppendInput(records=records))
+ ack = await ticket
+ acks.append(ack)
- outputs = []
- async for output in stream.append_session(inputs_gen()):
- outputs.append(output)
-
- assert len(outputs) == 3
+ assert len(acks) == 3
exp_seq_num = 0
- for output in outputs:
- assert output.start_seq_num == exp_seq_num
- exp_seq_num = output.end_seq_num
-
- async def test_read_session_termination(self, stream: Stream):
+ for ack in acks:
+ assert ack.start.seq_num == exp_seq_num
+ exp_seq_num = ack.end.seq_num
+
+ async def test_append_session_mismatched_seq_num_errors(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"first")]))
+
+ with pytest.raises(SeqNumMismatchError) as exc_info:
+ async with stream.append_session() as session:
+ ticket = await session.submit(
+ AppendInput(records=[Record(body=b"wrong")], match_seq_num=999)
+ )
+ await ticket
+ assert exc_info.value.expected_seq_num == 1
+
+ async def test_append_session_mismatched_fencing_token_errors(
+ self, stream: S2Stream
+ ):
+ await stream.append(AppendInput(records=[CommandRecord.fence("correct")]))
+
+ with pytest.raises(FencingTokenMismatchError) as exc_info:
+ async with stream.append_session() as session:
+ ticket = await session.submit(
+ AppendInput(records=[Record(body=b"data")], fencing_token="wrong")
+ )
+ await ticket
+ assert exc_info.value.expected_fencing_token == "correct"
+
+ async def test_append_session_nonexistent_stream_errors(
+ self, shared_basin: S2Basin
+ ):
+ nonexistent = shared_basin.stream("nonexistent-stream-xyz")
+
+ with pytest.raises(S2ServerError) as exc_info:
+ async with nonexistent.append_session() as session:
+ ticket = await session.submit(
+ AppendInput(records=[Record(body=b"data")])
+ )
+ await ticket
+ assert exc_info.value.code == "stream_not_found"
+
+ async def test_producer_delivers_all_acks(self, stream: S2Stream):
+ async with stream.producer() as p:
+ tickets = []
+ for i in range(5):
+ ticket = await p.submit(Record(body=f"record-{i}".encode()))
+ tickets.append(ticket)
+
+ for i, ticket in enumerate(tickets):
+ ack = await ticket
+ assert ack.seq_num == i
+
+ async def test_producer_close_delivers_all_indexed_acks_from_same_ack(
+ self, stream: S2Stream
+ ):
+ async with stream.producer() as p:
+ t0 = await p.submit(Record(body=b"lorem"))
+ t1 = await p.submit(Record(body=b"ipsum"))
+ t2 = await p.submit(Record(body=b"dolor"))
+
+ ack0 = await t0
+ ack1 = await t1
+ ack2 = await t2
+
+ assert ack0.seq_num == 0
+ assert ack1.seq_num == 1
+ assert ack2.seq_num == 2
+
+ assert ack0.batch is ack1.batch
+ assert ack1.batch is ack2.batch
+
+ async def test_producer_close_delivers_all_indexed_acks_from_different_acks(
+ self, stream: S2Stream
+ ):
+ async with stream.producer(
+ batching=Batching(max_records=1, linger=timedelta(0))
+ ) as p:
+ t0 = await p.submit(Record(body=b"lorem"))
+ t1 = await p.submit(Record(body=b"ipsum"))
+ t2 = await p.submit(Record(body=b"dolor"))
+
+ ack0 = await t0
+ ack1 = await t1
+ ack2 = await t2
+
+ assert ack0.seq_num == 0
+ assert ack1.seq_num == 1
+ assert ack2.seq_num == 2
+
+ assert ack0.batch is not ack1.batch
+ assert ack1.batch is not ack2.batch
+
+ async def test_producer_nonexistent_stream_errors(self, shared_basin: S2Basin):
+ nonexistent = shared_basin.stream("nonexistent-stream-xyz")
+
+ with pytest.raises(S2ServerError) as exc_info:
+ async with nonexistent.producer() as p:
+ ticket = await p.submit(Record(body=b"data"))
+ await ticket
+ assert exc_info.value.code == "stream_not_found"
+
+ async def test_read_session_termination(self, stream: S2Stream):
await stream.append(
AppendInput(records=[Record(body=f"record-{i}".encode()) for i in range(5)])
)
- outputs = []
- async for output in stream.read_session(
+ batches = []
+ async for batch in stream.read_session(
start=SeqNum(0), limit=ReadLimit(count=2)
):
- outputs.append(output)
+ batches.append(batch)
- assert len(outputs) == 1
+ assert len(batches) >= 1
+ assert len(batches[0].records) == 2
- assert isinstance(outputs[0], list)
- assert len(outputs[0]) == 2
+ async def test_read_session_read_existing_then_tails(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"a"), Record(body=b"b")]))
- async def test_read_session_tailing(self, stream: Stream):
- tail = await stream.check_tail()
+ received = []
+
+ with pytest.raises(asyncio.TimeoutError):
+ async with asyncio.timeout(2):
+ async for batch in stream.read_session(start=SeqNum(0)):
+ received.extend(batch.records)
- async def producer():
- await asyncio.sleep(0.5)
- await stream.append(AppendInput(records=[Record(body=b"record-0")]))
+ assert len(received) == 2
+ assert received[0].body == b"a"
+ assert received[1].body == b"b"
- producer_task = asyncio.create_task(producer())
+ async def test_read_session_tail_then_read_new_then_tail(self, stream: S2Stream):
+ async def append_later():
+ await asyncio.sleep(1)
+ await stream.append(AppendInput(records=[Record(body=b"new")]))
+ task = asyncio.create_task(append_later())
+ received = []
try:
- async for output in stream.read_session(
- start=SeqNum(tail.next_seq_num), clamp=True
- ):
- if isinstance(output, list) and len(output) > 0:
- assert output[0].body == b"record-0"
- break
+ with pytest.raises(asyncio.TimeoutError):
+ async with asyncio.timeout(5):
+ async for batch in stream.read_session(start=SeqNum(0)):
+ received.extend(batch.records)
finally:
- producer_task.cancel()
+ task.cancel()
+ assert len(received) == 1
+ assert received[0].body == b"new"
+
+ async def test_read_session_tails(self, stream: S2Stream):
+ with pytest.raises(asyncio.TimeoutError):
+ async with asyncio.timeout(1):
+ async for _ in stream.read_session(start=SeqNum(0)):
+ pass
+
+ async def test_read_session_clamp_to_tail_tails(self, stream: S2Stream):
+ await stream.append(AppendInput(records=[Record(body=b"data")]))
+ received = []
+ with pytest.raises(asyncio.TimeoutError):
+ async with asyncio.timeout(1):
+ async for batch in stream.read_session(
+ start=SeqNum(100), clamp_to_tail=True
+ ):
+ received.extend(batch.records)
+ assert received == []
+
+ async def test_read_session_beyond_tail_errors(self, stream: S2Stream):
+ with pytest.raises(ReadUnwrittenError) as exc_info:
+ async for _ in stream.read_session(start=SeqNum(100)):
+ pass
+ assert exc_info.value.tail.seq_num == 0
+
+
+@pytest.mark.stream
+@pytest.mark.parametrize("compression", [Compression.GZIP])
+class TestCompression:
+ async def test_compression_roundtrip_unary(
+ self, access_token: str, endpoints: Endpoints | None, compression: Compression
+ ):
+ async with S2(access_token, endpoints=endpoints, compression=compression) as s2:
+ basin_name = f"test-py-sdk-{uuid.uuid4().hex[:8]}"
+ await s2.create_basin(
+ name=basin_name,
+ config=BasinConfig(
+ default_stream_config=StreamConfig(
+ storage_class=StorageClass.STANDARD
+ )
+ ),
+ )
+ try:
+ basin = s2.basin(basin_name)
+ stream_name = f"stream-{uuid.uuid4().hex[:8]}"
+ await basin.create_stream(
+ name=stream_name,
+ config=StreamConfig(timestamping=Timestamping(uncapped=True)),
+ )
+ try:
+ stream = basin.stream(stream_name)
+ ack = await stream.append(
+ AppendInput(
+ records=[
+ Record(body=b"s" * 2048),
+ Record(body=b"2" * 2048),
+ ]
+ )
+ )
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 2
+
+ batch = await stream.read(start=SeqNum(0))
+ assert len(batch.records) == 2
+ finally:
+ await basin.delete_stream(stream_name)
+ finally:
+ await s2.delete_basin(basin_name)
+
+ async def test_compression_roundtrip_session(
+ self, access_token: str, endpoints: Endpoints | None, compression: Compression
+ ):
+ async with S2(access_token, endpoints=endpoints, compression=compression) as s2:
+ basin_name = f"test-py-sdk-{uuid.uuid4().hex[:8]}"
+ await s2.create_basin(
+ name=basin_name,
+ config=BasinConfig(
+ default_stream_config=StreamConfig(
+ timestamping=Timestamping(mode=TimestampingMode.ARRIVAL)
+ )
+ ),
+ )
+ try:
+ basin = s2.basin(basin_name)
+ stream_name = f"stream-{uuid.uuid4().hex[:8]}"
+ await basin.create_stream(
+ name=stream_name,
+ config=StreamConfig(storage_class=StorageClass.STANDARD),
+ )
+ try:
+ stream = basin.stream(stream_name)
+ # Payload >= 1KiB to trigger compression
+ async with stream.append_session() as session:
+ ticket = await session.submit(
+ AppendInput(records=[Record(body=b"s2" * 10240)])
+ )
+ ack = await ticket
+
+ assert ack.start.seq_num == 0
+ assert ack.end.seq_num == 1
+
+ batch = await stream.read(start=SeqNum(0))
+ assert len(batch.records) == 1
+ assert len(batch.records[0].body) == 20480
+ finally:
+ await basin.delete_stream(stream_name)
+ finally:
+ await s2.delete_basin(basin_name)
diff --git a/tests/test_validators.py b/tests/test_validators.py
new file mode 100644
index 0000000..099fabf
--- /dev/null
+++ b/tests/test_validators.py
@@ -0,0 +1,27 @@
+import pytest
+
+from s2_sdk import CommandRecord, Record, S2ClientError
+from s2_sdk._types import metered_bytes
+from s2_sdk._validators import validate_append_input
+
+
+def test_append_record_batch_rejects_empty():
+ with pytest.raises(S2ClientError):
+ validate_append_input(0, 0)
+
+
+def test_append_record_batch_rejects_too_many_records():
+ records = [Record(body=b"a") for _ in range(1001)]
+ with pytest.raises(S2ClientError):
+ validate_append_input(len(records), metered_bytes(records))
+
+
+def test_append_record_rejects_too_large():
+ record = Record(body=b"a" * (1024 * 1024 + 1))
+ with pytest.raises(S2ClientError):
+ validate_append_input(1, metered_bytes([record]))
+
+
+def test_fencing_token_rejects_too_long():
+ with pytest.raises(S2ClientError):
+ CommandRecord.fence("a" * 37)
diff --git a/update_protos b/update_protos
deleted file mode 100755
index e54278b..0000000
--- a/update_protos
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-set -euo pipefail
-
-cd src/streamstore/_lib
-uv run python -m grpc_tools.protoc -I../../../s2-specs --python_out=. --pyi_out=. --grpc_python_out=. ../../../s2-specs/s2/v1alpha/s2.proto
-# workaround for https://github.com/protocolbuffers/protobuf/issues/7061
-find . -name '*.py' | xargs -I{} sed -i '' 's/from s2\.\(v[0-9][a-z0-9]*\) import s2_pb2 as s2_dot_\1_dot_s2__pb2/from streamstore._lib.s2.\1 import s2_pb2 as s2_dot_\1_dot_s2__pb2/' {}
-uv run poe checker
diff --git a/update_specs b/update_specs
new file mode 100755
index 0000000..3b93c04
--- /dev/null
+++ b/update_specs
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -euo pipefail
+
+# Proto → protobuf messages (data plane)
+cd src/s2_sdk/_generated
+uv run python -m grpc_tools.protoc \
+ -I../../../s2-specs \
+ --python_out=. \
+ --pyi_out=. \
+ ../../../s2-specs/s2/v1/s2.proto
+
+# Fix import paths
+find . -name '*.py' | xargs -I{} sed -i '' 's/from s2\.\(v[0-9][a-z0-9]*\) import s2_pb2/from s2_sdk._generated.s2.\1 import s2_pb2/' {}
+cd ../../..
+
+# Run checks
+uv run poe checker
diff --git a/uv.lock b/uv.lock
index f50440b..509c5b2 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1,8 +1,9 @@
version = 1
-revision = 2
+revision = 3
requires-python = ">=3.11"
resolution-markers = [
- "python_full_version >= '3.13'",
+ "python_full_version >= '3.14'",
+ "python_full_version == '3.13.*'",
"python_full_version < '3.13'",
]
@@ -17,16 +18,15 @@ wheels = [
[[package]]
name = "anyio"
-version = "4.8.0"
+version = "4.13.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "idna" },
- { name = "sniffio" },
{ name = "typing-extensions", marker = "python_full_version < '3.13'" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/a3/73/199a98fc2dae33535d6b8e8e6ec01f8c1d76c9adb096c6b7d64823038cde/anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a", size = 181126, upload-time = "2025-01-05T13:13:11.095Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/19/14/2c5dd9f512b66549ae92767a9c7b330ae88e1932ca57876909410251fe13/anyio-4.13.0.tar.gz", hash = "sha256:334b70e641fd2221c1505b3890c69882fe4a2df910cba14d97019b90b24439dc", size = 231622, upload-time = "2026-03-24T12:59:09.671Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/46/eb/e7f063ad1fec6b3178a3cd82d1a3c4de82cccf283fc42746168188e1cdd5/anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a", size = 96041, upload-time = "2025-01-05T13:13:07.985Z" },
+ { url = "https://files.pythonhosted.org/packages/da/42/e921fccf5015463e32a3cf6ee7f980a6ed0f395ceeaa45060b61d86486c2/anyio-4.13.0-py3-none-any.whl", hash = "sha256:08b310f9e24a9594186fd75b4f73f4a4152069e3853f1ed8bfbf58369f4ad708", size = 114353, upload-time = "2026-03-24T12:59:08.246Z" },
]
[[package]]
@@ -165,6 +165,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767, upload-time = "2024-12-24T18:12:32.852Z" },
]
+[[package]]
+name = "click"
+version = "8.3.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama", marker = "sys_platform == 'win32'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/57/75/31212c6bf2503fdf920d87fee5d7a86a2e3bcf444984126f13d8e4016804/click-8.3.2.tar.gz", hash = "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5", size = 302856, upload-time = "2026-04-03T19:14:45.118Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e4/20/71885d8b97d4f3dde17b1fdb92dbd4908b00541c5a3379787137285f602e/click-8.3.2-py3-none-any.whl", hash = "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d", size = 108379, upload-time = "2026-04-03T19:14:43.505Z" },
+]
+
[[package]]
name = "colorama"
version = "0.4.6"
@@ -241,15 +253,6 @@ sphinx = [
{ name = "sphinx-toolbox" },
]
-[[package]]
-name = "execnet"
-version = "2.1.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/bb/ff/b4c0dc78fbe20c3e59c0c7334de0c27eb4001a2b2017999af398bf730817/execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3", size = 166524, upload-time = "2024-04-08T09:04:19.245Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/43/09/2aea36ff60d16dd8879bdb2f5b3ee0ba8d08cbbdcdfe870e695ce3784385/execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc", size = 40612, upload-time = "2024-04-08T09:04:17.414Z" },
-]
-
[[package]]
name = "filelock"
version = "3.16.1"
@@ -274,18 +277,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/27/48/e791a7ed487dbb9729ef32bb5d1af16693d8925f4366befef54119b2e576/furo-2024.8.6-py3-none-any.whl", hash = "sha256:6cd97c58b47813d3619e63e9081169880fbe331f0ca883c871ff1f3f11814f5c", size = 341333, upload-time = "2024-08-06T08:07:54.44Z" },
]
-[[package]]
-name = "grpc-stubs"
-version = "1.53.0.5"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "grpcio" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/55/8d/14c6b8c2fa5d82ffe96aed53b1c38e2a9fb6a57c5836966545f3080e5adc/grpc-stubs-1.53.0.5.tar.gz", hash = "sha256:3e1b642775cbc3e0c6332cfcedfccb022176db87e518757bef3a1241397be406", size = 14259, upload-time = "2023-12-28T02:13:29.19Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e0/86/147d2ccaf9b4b81407734b9abc1152aff39836e8e05be3bf069f9374c021/grpc_stubs-1.53.0.5-py3-none-any.whl", hash = "sha256:04183fb65a1b166a1febb9627e3d9647d3926ccc2dfe049fe7b6af243428dbe1", size = 16497, upload-time = "2023-12-28T02:13:27.556Z" },
-]
-
[[package]]
name = "grpcio"
version = "1.69.0"
@@ -361,6 +352,37 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/27/e2/b419a02b50240143605f77cd50cb07f724caf0fd35a01540a4f044ae9f21/grpcio_tools-1.69.0-cp313-cp313-win_amd64.whl", hash = "sha256:e9bae733654e0eb8ca83aa1d0d6b6c2f4a3525ce70d5ffc07df68d28f6520137", size = 1113616, upload-time = "2025-01-05T05:52:13.089Z" },
]
+[[package]]
+name = "h11"
+version = "0.16.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
+]
+
+[[package]]
+name = "h2"
+version = "4.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "hpack" },
+ { name = "hyperframe" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/1d/17/afa56379f94ad0fe8defd37d6eb3f89a25404ffc71d4d848893d270325fc/h2-4.3.0.tar.gz", hash = "sha256:6c59efe4323fa18b47a632221a1888bd7fde6249819beda254aeca909f221bf1", size = 2152026, upload-time = "2025-08-23T18:12:19.778Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/69/b2/119f6e6dcbd96f9069ce9a2665e0146588dc9f88f29549711853645e736a/h2-4.3.0-py3-none-any.whl", hash = "sha256:c438f029a25f7945c69e0ccf0fb951dc3f73a5f6412981daee861431b70e2bdd", size = 61779, upload-time = "2025-08-23T18:12:17.779Z" },
+]
+
+[[package]]
+name = "hpack"
+version = "4.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/2c/48/71de9ed269fdae9c8057e5a4c0aa7402e8bb16f2c6e90b3aa53327b113f8/hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca", size = 51276, upload-time = "2025-01-22T21:44:58.347Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496", size = 34357, upload-time = "2025-01-22T21:44:56.92Z" },
+]
+
[[package]]
name = "html5lib"
version = "1.1"
@@ -374,6 +396,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/6c/dd/a834df6482147d48e225a49515aabc28974ad5a4ca3215c18a882565b028/html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d", size = 112173, upload-time = "2020-06-22T23:32:36.781Z" },
]
+[[package]]
+name = "hyperframe"
+version = "6.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/02/e7/94f8232d4a74cc99514c13a9f995811485a6903d48e5d952771ef6322e30/hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08", size = 26566, upload-time = "2025-01-22T21:41:49.302Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5", size = 13007, upload-time = "2025-01-22T21:41:47.295Z" },
+]
+
[[package]]
name = "idna"
version = "3.10"
@@ -722,19 +753,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fa/b6/3127540ecdf1464a00e5a01ee60a1b09175f6913f0644ac748494d9c4b21/pytest_timeout-2.4.0-py3-none-any.whl", hash = "sha256:c42667e5cdadb151aeb5b26d114aff6bdf5a907f176a007a30b940d3d865b5c2", size = 14382, upload-time = "2025-05-05T19:44:33.502Z" },
]
-[[package]]
-name = "pytest-xdist"
-version = "3.8.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "execnet" },
- { name = "pytest" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/78/b4/439b179d1ff526791eb921115fca8e44e596a13efeda518b9d845a619450/pytest_xdist-3.8.0.tar.gz", hash = "sha256:7e578125ec9bc6050861aa93f2d59f1d8d085595d6551c2c90b6f4fad8d3a9f1", size = 88069, upload-time = "2025-07-01T13:30:59.346Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/ca/31/d4e37e9e550c2b92a9cbc2e4d0b7420a27224968580b5a447f420847c975/pytest_xdist-3.8.0-py3-none-any.whl", hash = "sha256:202ca578cfeb7370784a8c33d6d05bc6e13b4f25b5053c30a152269fd10f0b88", size = 46396, upload-time = "2025-07-01T13:30:56.632Z" },
-]
-
[[package]]
name = "pyyaml"
version = "6.0.2"
@@ -857,6 +875,67 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b2/94/0498cdb7316ed67a1928300dd87d659c933479f44dec51b4f62bfd1f8028/ruff-0.9.1-py3-none-win_arm64.whl", hash = "sha256:1cd76c7f9c679e6e8f2af8f778367dca82b95009bc7b1a85a47f1521ae524fa7", size = 9145708, upload-time = "2025-01-10T18:57:51.308Z" },
]
+[[package]]
+name = "s2-sdk"
+version = "0.1.0"
+source = { editable = "." }
+dependencies = [
+ { name = "h2" },
+ { name = "protobuf" },
+ { name = "zstandard" },
+]
+
+[package.dev-dependencies]
+dev = [
+ { name = "grpcio-tools" },
+ { name = "mypy" },
+ { name = "poethepoet" },
+ { name = "ruff" },
+ { name = "types-protobuf" },
+]
+docs = [
+ { name = "enum-tools", extra = ["sphinx"] },
+ { name = "furo" },
+ { name = "myst-parser" },
+ { name = "sphinx" },
+ { name = "sphinx-autobuild" },
+ { name = "sphinx-autodoc-typehints" },
+]
+test = [
+ { name = "pytest" },
+ { name = "pytest-asyncio" },
+ { name = "pytest-timeout" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "h2", specifier = ">=4.1.0" },
+ { name = "protobuf", specifier = ">=5.29.0" },
+ { name = "zstandard", specifier = ">=0.23.0" },
+]
+
+[package.metadata.requires-dev]
+dev = [
+ { name = "grpcio-tools", specifier = ">=1.69.0" },
+ { name = "mypy", specifier = ">=1.14.1" },
+ { name = "poethepoet", specifier = ">=0.36.0" },
+ { name = "ruff", specifier = ">=0.9.1" },
+ { name = "types-protobuf", specifier = ">=5.29.1.20241207" },
+]
+docs = [
+ { name = "enum-tools", extras = ["sphinx"], specifier = ">=0.12.0" },
+ { name = "furo", specifier = ">=2024.8.6" },
+ { name = "myst-parser", specifier = ">=4.0.0" },
+ { name = "sphinx", specifier = "==8.1.3" },
+ { name = "sphinx-autobuild", specifier = ">=2024.10.3" },
+ { name = "sphinx-autodoc-typehints", specifier = ">=3.0.0" },
+]
+test = [
+ { name = "pytest", specifier = ">=8.0.0" },
+ { name = "pytest-asyncio", specifier = ">=0.23.0" },
+ { name = "pytest-timeout", specifier = ">=2.3.0" },
+]
+
[[package]]
name = "setuptools"
version = "75.8.0"
@@ -875,15 +954,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" },
]
-[[package]]
-name = "sniffio"
-version = "1.3.1"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" },
-]
-
[[package]]
name = "snowballstemmer"
version = "2.2.0"
@@ -929,6 +999,23 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/26/60/1ddff83a56d33aaf6f10ec8ce84b4c007d9368b21008876fceda7e7381ef/sphinx-8.1.3-py3-none-any.whl", hash = "sha256:09719015511837b76bf6e03e42eb7595ac8c2e41eeb9c29c5b755c6b677992a2", size = 3487125, upload-time = "2024-10-13T20:27:10.448Z" },
]
+[[package]]
+name = "sphinx-autobuild"
+version = "2025.8.25"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "colorama" },
+ { name = "sphinx" },
+ { name = "starlette" },
+ { name = "uvicorn" },
+ { name = "watchfiles" },
+ { name = "websockets" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/e0/3c/a59a3a453d4133777f7ed2e83c80b7dc817d43c74b74298ca0af869662ad/sphinx_autobuild-2025.8.25.tar.gz", hash = "sha256:9cf5aab32853c8c31af572e4fecdc09c997e2b8be5a07daf2a389e270e85b213", size = 15200, upload-time = "2025-08-25T18:44:55.436Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d7/20/56411b52f917696995f5ad27d2ea7e9492c84a043c5b49a3a3173573cd93/sphinx_autobuild-2025.8.25-py3-none-any.whl", hash = "sha256:b750ac7d5a18603e4665294323fd20f6dcc0a984117026d1986704fa68f0379a", size = 12535, upload-time = "2025-08-25T18:44:54.164Z" },
+]
+
[[package]]
name = "sphinx-autodoc-typehints"
version = "3.0.0"
@@ -1090,64 +1177,16 @@ wheels = [
]
[[package]]
-name = "streamstore"
-version = "5.0.0"
-source = { editable = "." }
+name = "starlette"
+version = "1.0.0"
+source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
- { name = "grpc-stubs" },
- { name = "grpcio" },
- { name = "grpcio-tools" },
- { name = "types-protobuf" },
-]
-
-[package.dev-dependencies]
-dev = [
- { name = "mypy" },
- { name = "poethepoet" },
- { name = "ruff" },
-]
-docs = [
- { name = "enum-tools", extra = ["sphinx"] },
- { name = "furo" },
- { name = "myst-parser" },
- { name = "sphinx" },
- { name = "sphinx-autodoc-typehints" },
-]
-test = [
- { name = "pytest" },
- { name = "pytest-asyncio" },
- { name = "pytest-timeout" },
- { name = "pytest-xdist" },
-]
-
-[package.metadata]
-requires-dist = [
- { name = "anyio", specifier = ">=4.8.0" },
- { name = "grpc-stubs", specifier = ">=1.53.0.5" },
- { name = "grpcio", specifier = ">=1.69.0" },
- { name = "grpcio-tools", specifier = ">=1.69.0" },
- { name = "types-protobuf", specifier = ">=5.29.1.20241207" },
-]
-
-[package.metadata.requires-dev]
-dev = [
- { name = "mypy", specifier = ">=1.14.1" },
- { name = "poethepoet", specifier = ">=0.36.0" },
- { name = "ruff", specifier = ">=0.9.1" },
-]
-docs = [
- { name = "enum-tools", extras = ["sphinx"], specifier = ">=0.12.0" },
- { name = "furo", specifier = ">=2024.8.6" },
- { name = "myst-parser", specifier = ">=4.0.0" },
- { name = "sphinx", specifier = "==8.1.3" },
- { name = "sphinx-autodoc-typehints", specifier = ">=3.0.0" },
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
]
-test = [
- { name = "pytest", specifier = ">=8.0.0" },
- { name = "pytest-asyncio", specifier = ">=0.23.0" },
- { name = "pytest-timeout", specifier = ">=2.3.0" },
- { name = "pytest-xdist", specifier = ">=3.5.0" },
+sdist = { url = "https://files.pythonhosted.org/packages/81/69/17425771797c36cded50b7fe44e850315d039f28b15901ab44839e70b593/starlette-1.0.0.tar.gz", hash = "sha256:6a4beaf1f81bb472fd19ea9b918b50dc3a77a6f2e190a12954b25e6ed5eea149", size = 2655289, upload-time = "2026-03-22T18:29:46.779Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/0b/c9/584bc9651441b4ba60cc4d557d8a547b5aff901af35bda3a4ee30c819b82/starlette-1.0.0-py3-none-any.whl", hash = "sha256:d3ec55e0bb321692d275455ddfd3df75fff145d009685eb40dc91fc66b03d38b", size = 72651, upload-time = "2026-03-22T18:29:45.111Z" },
]
[[package]]
@@ -1161,20 +1200,20 @@ wheels = [
[[package]]
name = "types-protobuf"
-version = "5.29.1.20241207"
+version = "6.32.1.20260221"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/70/89/b661a447139f665ccea8e39bfdd52a92f803df4b5de0e6001a3537feaacb/types_protobuf-5.29.1.20241207.tar.gz", hash = "sha256:2ebcadb8ab3ef2e3e2f067e0882906d64ba0dc65fc5b0fd7a8b692315b4a0be9", size = 59190, upload-time = "2024-12-07T02:54:37.951Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/5f/e2/9aa4a3b2469508bd7b4e2ae11cbedaf419222a09a1b94daffcd5efca4023/types_protobuf-6.32.1.20260221.tar.gz", hash = "sha256:6d5fb060a616bfb076cbb61b4b3c3969f5fc8bec5810f9a2f7e648ee5cbcbf6e", size = 64408, upload-time = "2026-02-21T03:55:13.916Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/7e/6e/cdf152187019d6f6d04066b23e48659d961b527e9c6d43b48459d160e332/types_protobuf-5.29.1.20241207-py3-none-any.whl", hash = "sha256:92893c42083e9b718c678badc0af7a9a1307b92afe1599e5cba5f3d35b668b2f", size = 73902, upload-time = "2024-12-07T02:54:36.069Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/e8/1fd38926f9cf031188fbc5a96694203ea6f24b0e34bd64a225ec6f6291ba/types_protobuf-6.32.1.20260221-py3-none-any.whl", hash = "sha256:da7cdd947975964a93c30bfbcc2c6841ee646b318d3816b033adc2c4eb6448e4", size = 77956, upload-time = "2026-02-21T03:55:12.894Z" },
]
[[package]]
name = "typing-extensions"
-version = "4.12.2"
+version = "4.15.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" },
+ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
]
[[package]]
@@ -1186,6 +1225,106 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369, upload-time = "2024-12-22T07:47:28.074Z" },
]
+[[package]]
+name = "uvicorn"
+version = "0.43.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "click" },
+ { name = "h11" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/62/f2/368268300fb8af33743508d738ef7bb4d56afdb46c6d9c0fa3dd515df171/uvicorn-0.43.0.tar.gz", hash = "sha256:ab1652d2fb23abf124f36ccc399828558880def222c3cb3d98d24021520dc6e8", size = 85686, upload-time = "2026-04-03T18:37:48.984Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/55/df/0cf5b0c451602748fdc7a702d4667f6e209bf96aa6e3160d754234445f2a/uvicorn-0.43.0-py3-none-any.whl", hash = "sha256:46fac64f487fd968cd999e5e49efbbe64bd231b5bd8b4a0b482a23ebce499620", size = 68591, upload-time = "2026-04-03T18:37:47.64Z" },
+]
+
+[[package]]
+name = "watchfiles"
+version = "1.1.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "anyio" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/c2/c9/8869df9b2a2d6c59d79220a4db37679e74f807c559ffe5265e08b227a210/watchfiles-1.1.1.tar.gz", hash = "sha256:a173cb5c16c4f40ab19cecf48a534c409f7ea983ab8fed0741304a1c0a31b3f2", size = 94440, upload-time = "2025-10-14T15:06:21.08Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1f/f8/2c5f479fb531ce2f0564eda479faecf253d886b1ab3630a39b7bf7362d46/watchfiles-1.1.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:f57b396167a2565a4e8b5e56a5a1c537571733992b226f4f1197d79e94cf0ae5", size = 406529, upload-time = "2025-10-14T15:04:32.899Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/cd/f515660b1f32f65df671ddf6f85bfaca621aee177712874dc30a97397977/watchfiles-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:421e29339983e1bebc281fab40d812742268ad057db4aee8c4d2bce0af43b741", size = 394384, upload-time = "2025-10-14T15:04:33.761Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/c3/28b7dc99733eab43fca2d10f55c86e03bd6ab11ca31b802abac26b23d161/watchfiles-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e43d39a741e972bab5d8100b5cdacf69db64e34eb19b6e9af162bccf63c5cc6", size = 448789, upload-time = "2025-10-14T15:04:34.679Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/24/33e71113b320030011c8e4316ccca04194bf0cbbaeee207f00cbc7d6b9f5/watchfiles-1.1.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f537afb3276d12814082a2e9b242bdcf416c2e8fd9f799a737990a1dbe906e5b", size = 460521, upload-time = "2025-10-14T15:04:35.963Z" },
+ { url = "https://files.pythonhosted.org/packages/f4/c3/3c9a55f255aa57b91579ae9e98c88704955fa9dac3e5614fb378291155df/watchfiles-1.1.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b2cd9e04277e756a2e2d2543d65d1e2166d6fd4c9b183f8808634fda23f17b14", size = 488722, upload-time = "2025-10-14T15:04:37.091Z" },
+ { url = "https://files.pythonhosted.org/packages/49/36/506447b73eb46c120169dc1717fe2eff07c234bb3232a7200b5f5bd816e9/watchfiles-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f3f58818dc0b07f7d9aa7fe9eb1037aecb9700e63e1f6acfed13e9fef648f5d", size = 596088, upload-time = "2025-10-14T15:04:38.39Z" },
+ { url = "https://files.pythonhosted.org/packages/82/ab/5f39e752a9838ec4d52e9b87c1e80f1ee3ccdbe92e183c15b6577ab9de16/watchfiles-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bb9f66367023ae783551042d31b1d7fd422e8289eedd91f26754a66f44d5cff", size = 472923, upload-time = "2025-10-14T15:04:39.666Z" },
+ { url = "https://files.pythonhosted.org/packages/af/b9/a419292f05e302dea372fa7e6fda5178a92998411f8581b9830d28fb9edb/watchfiles-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aebfd0861a83e6c3d1110b78ad54704486555246e542be3e2bb94195eabb2606", size = 456080, upload-time = "2025-10-14T15:04:40.643Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/c3/d5932fd62bde1a30c36e10c409dc5d54506726f08cb3e1d8d0ba5e2bc8db/watchfiles-1.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5fac835b4ab3c6487b5dbad78c4b3724e26bcc468e886f8ba8cc4306f68f6701", size = 629432, upload-time = "2025-10-14T15:04:41.789Z" },
+ { url = "https://files.pythonhosted.org/packages/f7/77/16bddd9779fafb795f1a94319dc965209c5641db5bf1edbbccace6d1b3c0/watchfiles-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:399600947b170270e80134ac854e21b3ccdefa11a9529a3decc1327088180f10", size = 623046, upload-time = "2025-10-14T15:04:42.718Z" },
+ { url = "https://files.pythonhosted.org/packages/46/ef/f2ecb9a0f342b4bfad13a2787155c6ee7ce792140eac63a34676a2feeef2/watchfiles-1.1.1-cp311-cp311-win32.whl", hash = "sha256:de6da501c883f58ad50db3a32ad397b09ad29865b5f26f64c24d3e3281685849", size = 271473, upload-time = "2025-10-14T15:04:43.624Z" },
+ { url = "https://files.pythonhosted.org/packages/94/bc/f42d71125f19731ea435c3948cad148d31a64fccde3867e5ba4edee901f9/watchfiles-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:35c53bd62a0b885bf653ebf6b700d1bf05debb78ad9292cf2a942b23513dc4c4", size = 287598, upload-time = "2025-10-14T15:04:44.516Z" },
+ { url = "https://files.pythonhosted.org/packages/57/c9/a30f897351f95bbbfb6abcadafbaca711ce1162f4db95fc908c98a9165f3/watchfiles-1.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:57ca5281a8b5e27593cb7d82c2ac927ad88a96ed406aa446f6344e4328208e9e", size = 277210, upload-time = "2025-10-14T15:04:45.883Z" },
+ { url = "https://files.pythonhosted.org/packages/74/d5/f039e7e3c639d9b1d09b07ea412a6806d38123f0508e5f9b48a87b0a76cc/watchfiles-1.1.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:8c89f9f2f740a6b7dcc753140dd5e1ab9215966f7a3530d0c0705c83b401bd7d", size = 404745, upload-time = "2025-10-14T15:04:46.731Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/96/a881a13aa1349827490dab2d363c8039527060cfcc2c92cc6d13d1b1049e/watchfiles-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:bd404be08018c37350f0d6e34676bd1e2889990117a2b90070b3007f172d0610", size = 391769, upload-time = "2025-10-14T15:04:48.003Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/5b/d3b460364aeb8da471c1989238ea0e56bec24b6042a68046adf3d9ddb01c/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8526e8f916bb5b9a0a777c8317c23ce65de259422bba5b31325a6fa6029d33af", size = 449374, upload-time = "2025-10-14T15:04:49.179Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/44/5769cb62d4ed055cb17417c0a109a92f007114a4e07f30812a73a4efdb11/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2edc3553362b1c38d9f06242416a5d8e9fe235c204a4072e988ce2e5bb1f69f6", size = 459485, upload-time = "2025-10-14T15:04:50.155Z" },
+ { url = "https://files.pythonhosted.org/packages/19/0c/286b6301ded2eccd4ffd0041a1b726afda999926cf720aab63adb68a1e36/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30f7da3fb3f2844259cba4720c3fc7138eb0f7b659c38f3bfa65084c7fc7abce", size = 488813, upload-time = "2025-10-14T15:04:51.059Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/2b/8530ed41112dd4a22f4dcfdb5ccf6a1baad1ff6eed8dc5a5f09e7e8c41c7/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8979280bdafff686ba5e4d8f97840f929a87ed9cdf133cbbd42f7766774d2aa", size = 594816, upload-time = "2025-10-14T15:04:52.031Z" },
+ { url = "https://files.pythonhosted.org/packages/ce/d2/f5f9fb49489f184f18470d4f99f4e862a4b3e9ac2865688eb2099e3d837a/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dcc5c24523771db3a294c77d94771abcfcb82a0e0ee8efd910c37c59ec1b31bb", size = 475186, upload-time = "2025-10-14T15:04:53.064Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/68/5707da262a119fb06fbe214d82dd1fe4a6f4af32d2d14de368d0349eb52a/watchfiles-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1db5d7ae38ff20153d542460752ff397fcf5c96090c1230803713cf3147a6803", size = 456812, upload-time = "2025-10-14T15:04:55.174Z" },
+ { url = "https://files.pythonhosted.org/packages/66/ab/3cbb8756323e8f9b6f9acb9ef4ec26d42b2109bce830cc1f3468df20511d/watchfiles-1.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:28475ddbde92df1874b6c5c8aaeb24ad5be47a11f87cde5a28ef3835932e3e94", size = 630196, upload-time = "2025-10-14T15:04:56.22Z" },
+ { url = "https://files.pythonhosted.org/packages/78/46/7152ec29b8335f80167928944a94955015a345440f524d2dfe63fc2f437b/watchfiles-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:36193ed342f5b9842edd3532729a2ad55c4160ffcfa3700e0d54be496b70dd43", size = 622657, upload-time = "2025-10-14T15:04:57.521Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/bf/95895e78dd75efe9a7f31733607f384b42eb5feb54bd2eb6ed57cc2e94f4/watchfiles-1.1.1-cp312-cp312-win32.whl", hash = "sha256:859e43a1951717cc8de7f4c77674a6d389b106361585951d9e69572823f311d9", size = 272042, upload-time = "2025-10-14T15:04:59.046Z" },
+ { url = "https://files.pythonhosted.org/packages/87/0a/90eb755f568de2688cb220171c4191df932232c20946966c27a59c400850/watchfiles-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:91d4c9a823a8c987cce8fa2690923b069966dabb196dd8d137ea2cede885fde9", size = 288410, upload-time = "2025-10-14T15:05:00.081Z" },
+ { url = "https://files.pythonhosted.org/packages/36/76/f322701530586922fbd6723c4f91ace21364924822a8772c549483abed13/watchfiles-1.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:a625815d4a2bdca61953dbba5a39d60164451ef34c88d751f6c368c3ea73d404", size = 278209, upload-time = "2025-10-14T15:05:01.168Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/f4/f750b29225fe77139f7ae5de89d4949f5a99f934c65a1f1c0b248f26f747/watchfiles-1.1.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:130e4876309e8686a5e37dba7d5e9bc77e6ed908266996ca26572437a5271e18", size = 404321, upload-time = "2025-10-14T15:05:02.063Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/f9/f07a295cde762644aa4c4bb0f88921d2d141af45e735b965fb2e87858328/watchfiles-1.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5f3bde70f157f84ece3765b42b4a52c6ac1a50334903c6eaf765362f6ccca88a", size = 391783, upload-time = "2025-10-14T15:05:03.052Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/11/fc2502457e0bea39a5c958d86d2cb69e407a4d00b85735ca724bfa6e0d1a/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e0b1fe858430fc0251737ef3824c54027bedb8c37c38114488b8e131cf8219", size = 449279, upload-time = "2025-10-14T15:05:04.004Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/1f/d66bc15ea0b728df3ed96a539c777acfcad0eb78555ad9efcaa1274688f0/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f27db948078f3823a6bb3b465180db8ebecf26dd5dae6f6180bd87383b6b4428", size = 459405, upload-time = "2025-10-14T15:05:04.942Z" },
+ { url = "https://files.pythonhosted.org/packages/be/90/9f4a65c0aec3ccf032703e6db02d89a157462fbb2cf20dd415128251cac0/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:059098c3a429f62fc98e8ec62b982230ef2c8df68c79e826e37b895bc359a9c0", size = 488976, upload-time = "2025-10-14T15:05:05.905Z" },
+ { url = "https://files.pythonhosted.org/packages/37/57/ee347af605d867f712be7029bb94c8c071732a4b44792e3176fa3c612d39/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfb5862016acc9b869bb57284e6cb35fdf8e22fe59f7548858e2f971d045f150", size = 595506, upload-time = "2025-10-14T15:05:06.906Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/78/cc5ab0b86c122047f75e8fc471c67a04dee395daf847d3e59381996c8707/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:319b27255aacd9923b8a276bb14d21a5f7ff82564c744235fc5eae58d95422ae", size = 474936, upload-time = "2025-10-14T15:05:07.906Z" },
+ { url = "https://files.pythonhosted.org/packages/62/da/def65b170a3815af7bd40a3e7010bf6ab53089ef1b75d05dd5385b87cf08/watchfiles-1.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c755367e51db90e75b19454b680903631d41f9e3607fbd941d296a020c2d752d", size = 456147, upload-time = "2025-10-14T15:05:09.138Z" },
+ { url = "https://files.pythonhosted.org/packages/57/99/da6573ba71166e82d288d4df0839128004c67d2778d3b566c138695f5c0b/watchfiles-1.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c22c776292a23bfc7237a98f791b9ad3144b02116ff10d820829ce62dff46d0b", size = 630007, upload-time = "2025-10-14T15:05:10.117Z" },
+ { url = "https://files.pythonhosted.org/packages/a8/51/7439c4dd39511368849eb1e53279cd3454b4a4dbace80bab88feeb83c6b5/watchfiles-1.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3a476189be23c3686bc2f4321dd501cb329c0a0469e77b7b534ee10129ae6374", size = 622280, upload-time = "2025-10-14T15:05:11.146Z" },
+ { url = "https://files.pythonhosted.org/packages/95/9c/8ed97d4bba5db6fdcdb2b298d3898f2dd5c20f6b73aee04eabe56c59677e/watchfiles-1.1.1-cp313-cp313-win32.whl", hash = "sha256:bf0a91bfb5574a2f7fc223cf95eeea79abfefa404bf1ea5e339c0c1560ae99a0", size = 272056, upload-time = "2025-10-14T15:05:12.156Z" },
+ { url = "https://files.pythonhosted.org/packages/1f/f3/c14e28429f744a260d8ceae18bf58c1d5fa56b50d006a7a9f80e1882cb0d/watchfiles-1.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:52e06553899e11e8074503c8e716d574adeeb7e68913115c4b3653c53f9bae42", size = 288162, upload-time = "2025-10-14T15:05:13.208Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/61/fe0e56c40d5cd29523e398d31153218718c5786b5e636d9ae8ae79453d27/watchfiles-1.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:ac3cc5759570cd02662b15fbcd9d917f7ecd47efe0d6b40474eafd246f91ea18", size = 277909, upload-time = "2025-10-14T15:05:14.49Z" },
+ { url = "https://files.pythonhosted.org/packages/79/42/e0a7d749626f1e28c7108a99fb9bf524b501bbbeb9b261ceecde644d5a07/watchfiles-1.1.1-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:563b116874a9a7ce6f96f87cd0b94f7faf92d08d0021e837796f0a14318ef8da", size = 403389, upload-time = "2025-10-14T15:05:15.777Z" },
+ { url = "https://files.pythonhosted.org/packages/15/49/08732f90ce0fbbc13913f9f215c689cfc9ced345fb1bcd8829a50007cc8d/watchfiles-1.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3ad9fe1dae4ab4212d8c91e80b832425e24f421703b5a42ef2e4a1e215aff051", size = 389964, upload-time = "2025-10-14T15:05:16.85Z" },
+ { url = "https://files.pythonhosted.org/packages/27/0d/7c315d4bd5f2538910491a0393c56bf70d333d51bc5b34bee8e68e8cea19/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce70f96a46b894b36eba678f153f052967a0d06d5b5a19b336ab0dbbd029f73e", size = 448114, upload-time = "2025-10-14T15:05:17.876Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/24/9e096de47a4d11bc4df41e9d1e61776393eac4cb6eb11b3e23315b78b2cc/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cb467c999c2eff23a6417e58d75e5828716f42ed8289fe6b77a7e5a91036ca70", size = 460264, upload-time = "2025-10-14T15:05:18.962Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/0f/e8dea6375f1d3ba5fcb0b3583e2b493e77379834c74fd5a22d66d85d6540/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:836398932192dae4146c8f6f737d74baeac8b70ce14831a239bdb1ca882fc261", size = 487877, upload-time = "2025-10-14T15:05:20.094Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/5b/df24cfc6424a12deb41503b64d42fbea6b8cb357ec62ca84a5a3476f654a/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:743185e7372b7bc7c389e1badcc606931a827112fbbd37f14c537320fca08620", size = 595176, upload-time = "2025-10-14T15:05:21.134Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/b5/853b6757f7347de4e9b37e8cc3289283fb983cba1ab4d2d7144694871d9c/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afaeff7696e0ad9f02cbb8f56365ff4686ab205fcf9c4c5b6fdfaaa16549dd04", size = 473577, upload-time = "2025-10-14T15:05:22.306Z" },
+ { url = "https://files.pythonhosted.org/packages/e1/f7/0a4467be0a56e80447c8529c9fce5b38eab4f513cb3d9bf82e7392a5696b/watchfiles-1.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f7eb7da0eb23aa2ba036d4f616d46906013a68caf61b7fdbe42fc8b25132e77", size = 455425, upload-time = "2025-10-14T15:05:23.348Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/e0/82583485ea00137ddf69bc84a2db88bd92ab4a6e3c405e5fb878ead8d0e7/watchfiles-1.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:831a62658609f0e5c64178211c942ace999517f5770fe9436be4c2faeba0c0ef", size = 628826, upload-time = "2025-10-14T15:05:24.398Z" },
+ { url = "https://files.pythonhosted.org/packages/28/9a/a785356fccf9fae84c0cc90570f11702ae9571036fb25932f1242c82191c/watchfiles-1.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:f9a2ae5c91cecc9edd47e041a930490c31c3afb1f5e6d71de3dc671bfaca02bf", size = 622208, upload-time = "2025-10-14T15:05:25.45Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/f4/0872229324ef69b2c3edec35e84bd57a1289e7d3fe74588048ed8947a323/watchfiles-1.1.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:d1715143123baeeaeadec0528bb7441103979a1d5f6fd0e1f915383fea7ea6d5", size = 404315, upload-time = "2025-10-14T15:05:26.501Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/22/16d5331eaed1cb107b873f6ae1b69e9ced582fcf0c59a50cd84f403b1c32/watchfiles-1.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:39574d6370c4579d7f5d0ad940ce5b20db0e4117444e39b6d8f99db5676c52fd", size = 390869, upload-time = "2025-10-14T15:05:27.649Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/7e/5643bfff5acb6539b18483128fdc0ef2cccc94a5b8fbda130c823e8ed636/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7365b92c2e69ee952902e8f70f3ba6360d0d596d9299d55d7d386df84b6941fb", size = 449919, upload-time = "2025-10-14T15:05:28.701Z" },
+ { url = "https://files.pythonhosted.org/packages/51/2e/c410993ba5025a9f9357c376f48976ef0e1b1aefb73b97a5ae01a5972755/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfff9740c69c0e4ed32416f013f3c45e2ae42ccedd1167ef2d805c000b6c71a5", size = 460845, upload-time = "2025-10-14T15:05:30.064Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/a4/2df3b404469122e8680f0fcd06079317e48db58a2da2950fb45020947734/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b27cf2eb1dda37b2089e3907d8ea92922b673c0c427886d4edc6b94d8dfe5db3", size = 489027, upload-time = "2025-10-14T15:05:31.064Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/84/4587ba5b1f267167ee715b7f66e6382cca6938e0a4b870adad93e44747e6/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:526e86aced14a65a5b0ec50827c745597c782ff46b571dbfe46192ab9e0b3c33", size = 595615, upload-time = "2025-10-14T15:05:32.074Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/0f/c6988c91d06e93cd0bb3d4a808bcf32375ca1904609835c3031799e3ecae/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04e78dd0b6352db95507fd8cb46f39d185cf8c74e4cf1e4fbad1d3df96faf510", size = 474836, upload-time = "2025-10-14T15:05:33.209Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/36/ded8aebea91919485b7bbabbd14f5f359326cb5ec218cd67074d1e426d74/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c85794a4cfa094714fb9c08d4a218375b2b95b8ed1666e8677c349906246c05", size = 455099, upload-time = "2025-10-14T15:05:34.189Z" },
+ { url = "https://files.pythonhosted.org/packages/98/e0/8c9bdba88af756a2fce230dd365fab2baf927ba42cd47521ee7498fd5211/watchfiles-1.1.1-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:74d5012b7630714b66be7b7b7a78855ef7ad58e8650c73afc4c076a1f480a8d6", size = 630626, upload-time = "2025-10-14T15:05:35.216Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/84/a95db05354bf2d19e438520d92a8ca475e578c647f78f53197f5a2f17aaf/watchfiles-1.1.1-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:8fbe85cb3201c7d380d3d0b90e63d520f15d6afe217165d7f98c9c649654db81", size = 622519, upload-time = "2025-10-14T15:05:36.259Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/ce/d8acdc8de545de995c339be67711e474c77d643555a9bb74a9334252bd55/watchfiles-1.1.1-cp314-cp314-win32.whl", hash = "sha256:3fa0b59c92278b5a7800d3ee7733da9d096d4aabcfabb9a928918bd276ef9b9b", size = 272078, upload-time = "2025-10-14T15:05:37.63Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/c9/a74487f72d0451524be827e8edec251da0cc1fcf111646a511ae752e1a3d/watchfiles-1.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:c2047d0b6cea13b3316bdbafbfa0c4228ae593d995030fda39089d36e64fc03a", size = 287664, upload-time = "2025-10-14T15:05:38.95Z" },
+ { url = "https://files.pythonhosted.org/packages/df/b8/8ac000702cdd496cdce998c6f4ee0ca1f15977bba51bdf07d872ebdfc34c/watchfiles-1.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:842178b126593addc05acf6fce960d28bc5fae7afbaa2c6c1b3a7b9460e5be02", size = 277154, upload-time = "2025-10-14T15:05:39.954Z" },
+ { url = "https://files.pythonhosted.org/packages/47/a8/e3af2184707c29f0f14b1963c0aace6529f9d1b8582d5b99f31bbf42f59e/watchfiles-1.1.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:88863fbbc1a7312972f1c511f202eb30866370ebb8493aef2812b9ff28156a21", size = 403820, upload-time = "2025-10-14T15:05:40.932Z" },
+ { url = "https://files.pythonhosted.org/packages/c0/ec/e47e307c2f4bd75f9f9e8afbe3876679b18e1bcec449beca132a1c5ffb2d/watchfiles-1.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:55c7475190662e202c08c6c0f4d9e345a29367438cf8e8037f3155e10a88d5a5", size = 390510, upload-time = "2025-10-14T15:05:41.945Z" },
+ { url = "https://files.pythonhosted.org/packages/d5/a0/ad235642118090f66e7b2f18fd5c42082418404a79205cdfca50b6309c13/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f53fa183d53a1d7a8852277c92b967ae99c2d4dcee2bfacff8868e6e30b15f7", size = 448408, upload-time = "2025-10-14T15:05:43.385Z" },
+ { url = "https://files.pythonhosted.org/packages/df/85/97fa10fd5ff3332ae17e7e40e20784e419e28521549780869f1413742e9d/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6aae418a8b323732fa89721d86f39ec8f092fc2af67f4217a2b07fd3e93c6101", size = 458968, upload-time = "2025-10-14T15:05:44.404Z" },
+ { url = "https://files.pythonhosted.org/packages/47/c2/9059c2e8966ea5ce678166617a7f75ecba6164375f3b288e50a40dc6d489/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f096076119da54a6080e8920cbdaac3dbee667eb91dcc5e5b78840b87415bd44", size = 488096, upload-time = "2025-10-14T15:05:45.398Z" },
+ { url = "https://files.pythonhosted.org/packages/94/44/d90a9ec8ac309bc26db808a13e7bfc0e4e78b6fc051078a554e132e80160/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00485f441d183717038ed2e887a7c868154f216877653121068107b227a2f64c", size = 596040, upload-time = "2025-10-14T15:05:46.502Z" },
+ { url = "https://files.pythonhosted.org/packages/95/68/4e3479b20ca305cfc561db3ed207a8a1c745ee32bf24f2026a129d0ddb6e/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a55f3e9e493158d7bfdb60a1165035f1cf7d320914e7b7ea83fe22c6023b58fc", size = 473847, upload-time = "2025-10-14T15:05:47.484Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/55/2af26693fd15165c4ff7857e38330e1b61ab8c37d15dc79118cdba115b7a/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c91ed27800188c2ae96d16e3149f199d62f86c7af5f5f4d2c61a3ed8cd3666c", size = 455072, upload-time = "2025-10-14T15:05:48.928Z" },
+ { url = "https://files.pythonhosted.org/packages/66/1d/d0d200b10c9311ec25d2273f8aad8c3ef7cc7ea11808022501811208a750/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:311ff15a0bae3714ffb603e6ba6dbfba4065ab60865d15a6ec544133bdb21099", size = 629104, upload-time = "2025-10-14T15:05:49.908Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/bd/fa9bb053192491b3867ba07d2343d9f2252e00811567d30ae8d0f78136fe/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:a916a2932da8f8ab582f242c065f5c81bed3462849ca79ee357dd9551b0e9b01", size = 622112, upload-time = "2025-10-14T15:05:50.941Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/8e/e500f8b0b77be4ff753ac94dc06b33d8f0d839377fee1b78e8c8d8f031bf/watchfiles-1.1.1-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:db476ab59b6765134de1d4fe96a1a9c96ddf091683599be0f26147ea1b2e4b88", size = 408250, upload-time = "2025-10-14T15:06:10.264Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/95/615e72cd27b85b61eec764a5ca51bd94d40b5adea5ff47567d9ebc4d275a/watchfiles-1.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:89eef07eee5e9d1fda06e38822ad167a044153457e6fd997f8a858ab7564a336", size = 396117, upload-time = "2025-10-14T15:06:11.28Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/81/e7fe958ce8a7fb5c73cc9fb07f5aeaf755e6aa72498c57d760af760c91f8/watchfiles-1.1.1-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ce19e06cbda693e9e7686358af9cd6f5d61312ab8b00488bc36f5aabbaf77e24", size = 450493, upload-time = "2025-10-14T15:06:12.321Z" },
+ { url = "https://files.pythonhosted.org/packages/6e/d4/ed38dd3b1767193de971e694aa544356e63353c33a85d948166b5ff58b9e/watchfiles-1.1.1-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e6f39af2eab0118338902798b5aa6664f46ff66bc0280de76fca67a7f262a49", size = 457546, upload-time = "2025-10-14T15:06:13.372Z" },
+]
+
[[package]]
name = "webencodings"
version = "0.5.1"
@@ -1194,3 +1333,136 @@ sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda5308
wheels = [
{ url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" },
]
+
+[[package]]
+name = "websockets"
+version = "16.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f2/db/de907251b4ff46ae804ad0409809504153b3f30984daf82a1d84a9875830/websockets-16.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:31a52addea25187bde0797a97d6fc3d2f92b6f72a9370792d65a6e84615ac8a8", size = 177340, upload-time = "2026-01-10T09:22:34.539Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/fa/abe89019d8d8815c8781e90d697dec52523fb8ebe308bf11664e8de1877e/websockets-16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:417b28978cdccab24f46400586d128366313e8a96312e4b9362a4af504f3bbad", size = 175022, upload-time = "2026-01-10T09:22:36.332Z" },
+ { url = "https://files.pythonhosted.org/packages/58/5d/88ea17ed1ded2079358b40d31d48abe90a73c9e5819dbcde1606e991e2ad/websockets-16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af80d74d4edfa3cb9ed973a0a5ba2b2a549371f8a741e0800cb07becdd20f23d", size = 175319, upload-time = "2026-01-10T09:22:37.602Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/ae/0ee92b33087a33632f37a635e11e1d99d429d3d323329675a6022312aac2/websockets-16.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:08d7af67b64d29823fed316505a89b86705f2b7981c07848fb5e3ea3020c1abe", size = 184631, upload-time = "2026-01-10T09:22:38.789Z" },
+ { url = "https://files.pythonhosted.org/packages/c8/c5/27178df583b6c5b31b29f526ba2da5e2f864ecc79c99dae630a85d68c304/websockets-16.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7be95cfb0a4dae143eaed2bcba8ac23f4892d8971311f1b06f3c6b78952ee70b", size = 185870, upload-time = "2026-01-10T09:22:39.893Z" },
+ { url = "https://files.pythonhosted.org/packages/87/05/536652aa84ddc1c018dbb7e2c4cbcd0db884580bf8e95aece7593fde526f/websockets-16.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d6297ce39ce5c2e6feb13c1a996a2ded3b6832155fcfc920265c76f24c7cceb5", size = 185361, upload-time = "2026-01-10T09:22:41.016Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/e2/d5332c90da12b1e01f06fb1b85c50cfc489783076547415bf9f0a659ec19/websockets-16.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1c1b30e4f497b0b354057f3467f56244c603a79c0d1dafce1d16c283c25f6e64", size = 184615, upload-time = "2026-01-10T09:22:42.442Z" },
+ { url = "https://files.pythonhosted.org/packages/77/fb/d3f9576691cae9253b51555f841bc6600bf0a983a461c79500ace5a5b364/websockets-16.0-cp311-cp311-win32.whl", hash = "sha256:5f451484aeb5cafee1ccf789b1b66f535409d038c56966d6101740c1614b86c6", size = 178246, upload-time = "2026-01-10T09:22:43.654Z" },
+ { url = "https://files.pythonhosted.org/packages/54/67/eaff76b3dbaf18dcddabc3b8c1dba50b483761cccff67793897945b37408/websockets-16.0-cp311-cp311-win_amd64.whl", hash = "sha256:8d7f0659570eefb578dacde98e24fb60af35350193e4f56e11190787bee77dac", size = 178684, upload-time = "2026-01-10T09:22:44.941Z" },
+ { url = "https://files.pythonhosted.org/packages/84/7b/bac442e6b96c9d25092695578dda82403c77936104b5682307bd4deb1ad4/websockets-16.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:71c989cbf3254fbd5e84d3bff31e4da39c43f884e64f2551d14bb3c186230f00", size = 177365, upload-time = "2026-01-10T09:22:46.787Z" },
+ { url = "https://files.pythonhosted.org/packages/b0/fe/136ccece61bd690d9c1f715baaeefd953bb2360134de73519d5df19d29ca/websockets-16.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8b6e209ffee39ff1b6d0fa7bfef6de950c60dfb91b8fcead17da4ee539121a79", size = 175038, upload-time = "2026-01-10T09:22:47.999Z" },
+ { url = "https://files.pythonhosted.org/packages/40/1e/9771421ac2286eaab95b8575b0cb701ae3663abf8b5e1f64f1fd90d0a673/websockets-16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:86890e837d61574c92a97496d590968b23c2ef0aeb8a9bc9421d174cd378ae39", size = 175328, upload-time = "2026-01-10T09:22:49.809Z" },
+ { url = "https://files.pythonhosted.org/packages/18/29/71729b4671f21e1eaa5d6573031ab810ad2936c8175f03f97f3ff164c802/websockets-16.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:9b5aca38b67492ef518a8ab76851862488a478602229112c4b0d58d63a7a4d5c", size = 184915, upload-time = "2026-01-10T09:22:51.071Z" },
+ { url = "https://files.pythonhosted.org/packages/97/bb/21c36b7dbbafc85d2d480cd65df02a1dc93bf76d97147605a8e27ff9409d/websockets-16.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e0334872c0a37b606418ac52f6ab9cfd17317ac26365f7f65e203e2d0d0d359f", size = 186152, upload-time = "2026-01-10T09:22:52.224Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/34/9bf8df0c0cf88fa7bfe36678dc7b02970c9a7d5e065a3099292db87b1be2/websockets-16.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a0b31e0b424cc6b5a04b8838bbaec1688834b2383256688cf47eb97412531da1", size = 185583, upload-time = "2026-01-10T09:22:53.443Z" },
+ { url = "https://files.pythonhosted.org/packages/47/88/4dd516068e1a3d6ab3c7c183288404cd424a9a02d585efbac226cb61ff2d/websockets-16.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:485c49116d0af10ac698623c513c1cc01c9446c058a4e61e3bf6c19dff7335a2", size = 184880, upload-time = "2026-01-10T09:22:55.033Z" },
+ { url = "https://files.pythonhosted.org/packages/91/d6/7d4553ad4bf1c0421e1ebd4b18de5d9098383b5caa1d937b63df8d04b565/websockets-16.0-cp312-cp312-win32.whl", hash = "sha256:eaded469f5e5b7294e2bdca0ab06becb6756ea86894a47806456089298813c89", size = 178261, upload-time = "2026-01-10T09:22:56.251Z" },
+ { url = "https://files.pythonhosted.org/packages/c3/f0/f3a17365441ed1c27f850a80b2bc680a0fa9505d733fe152fdf5e98c1c0b/websockets-16.0-cp312-cp312-win_amd64.whl", hash = "sha256:5569417dc80977fc8c2d43a86f78e0a5a22fee17565d78621b6bb264a115d4ea", size = 178693, upload-time = "2026-01-10T09:22:57.478Z" },
+ { url = "https://files.pythonhosted.org/packages/cc/9c/baa8456050d1c1b08dd0ec7346026668cbc6f145ab4e314d707bb845bf0d/websockets-16.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:878b336ac47938b474c8f982ac2f7266a540adc3fa4ad74ae96fea9823a02cc9", size = 177364, upload-time = "2026-01-10T09:22:59.333Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/0c/8811fc53e9bcff68fe7de2bcbe75116a8d959ac699a3200f4847a8925210/websockets-16.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:52a0fec0e6c8d9a784c2c78276a48a2bdf099e4ccc2a4cad53b27718dbfd0230", size = 175039, upload-time = "2026-01-10T09:23:01.171Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/82/39a5f910cb99ec0b59e482971238c845af9220d3ab9fa76dd9162cda9d62/websockets-16.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e6578ed5b6981005df1860a56e3617f14a6c307e6a71b4fff8c48fdc50f3ed2c", size = 175323, upload-time = "2026-01-10T09:23:02.341Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/28/0a25ee5342eb5d5f297d992a77e56892ecb65e7854c7898fb7d35e9b33bd/websockets-16.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:95724e638f0f9c350bb1c2b0a7ad0e83d9cc0c9259f3ea94e40d7b02a2179ae5", size = 184975, upload-time = "2026-01-10T09:23:03.756Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/66/27ea52741752f5107c2e41fda05e8395a682a1e11c4e592a809a90c6a506/websockets-16.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c0204dc62a89dc9d50d682412c10b3542d748260d743500a85c13cd1ee4bde82", size = 186203, upload-time = "2026-01-10T09:23:05.01Z" },
+ { url = "https://files.pythonhosted.org/packages/37/e5/8e32857371406a757816a2b471939d51c463509be73fa538216ea52b792a/websockets-16.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:52ac480f44d32970d66763115edea932f1c5b1312de36df06d6b219f6741eed8", size = 185653, upload-time = "2026-01-10T09:23:06.301Z" },
+ { url = "https://files.pythonhosted.org/packages/9b/67/f926bac29882894669368dc73f4da900fcdf47955d0a0185d60103df5737/websockets-16.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6e5a82b677f8f6f59e8dfc34ec06ca6b5b48bc4fcda346acd093694cc2c24d8f", size = 184920, upload-time = "2026-01-10T09:23:07.492Z" },
+ { url = "https://files.pythonhosted.org/packages/3c/a1/3d6ccdcd125b0a42a311bcd15a7f705d688f73b2a22d8cf1c0875d35d34a/websockets-16.0-cp313-cp313-win32.whl", hash = "sha256:abf050a199613f64c886ea10f38b47770a65154dc37181bfaff70c160f45315a", size = 178255, upload-time = "2026-01-10T09:23:09.245Z" },
+ { url = "https://files.pythonhosted.org/packages/6b/ae/90366304d7c2ce80f9b826096a9e9048b4bb760e44d3b873bb272cba696b/websockets-16.0-cp313-cp313-win_amd64.whl", hash = "sha256:3425ac5cf448801335d6fdc7ae1eb22072055417a96cc6b31b3861f455fbc156", size = 178689, upload-time = "2026-01-10T09:23:10.483Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" },
+ { url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" },
+ { url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" },
+ { url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" },
+ { url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" },
+ { url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" },
+ { url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" },
+ { url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" },
+ { url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" },
+ { url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" },
+ { url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" },
+ { url = "https://files.pythonhosted.org/packages/72/07/c98a68571dcf256e74f1f816b8cc5eae6eb2d3d5cfa44d37f801619d9166/websockets-16.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:349f83cd6c9a415428ee1005cadb5c2c56f4389bc06a9af16103c3bc3dcc8b7d", size = 174947, upload-time = "2026-01-10T09:23:36.166Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/52/93e166a81e0305b33fe416338be92ae863563fe7bce446b0f687b9df5aea/websockets-16.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:4a1aba3340a8dca8db6eb5a7986157f52eb9e436b74813764241981ca4888f03", size = 175260, upload-time = "2026-01-10T09:23:37.409Z" },
+ { url = "https://files.pythonhosted.org/packages/56/0c/2dbf513bafd24889d33de2ff0368190a0e69f37bcfa19009ef819fe4d507/websockets-16.0-pp311-pypy311_pp73-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f4a32d1bd841d4bcbffdcb3d2ce50c09c3909fbead375ab28d0181af89fd04da", size = 176071, upload-time = "2026-01-10T09:23:39.158Z" },
+ { url = "https://files.pythonhosted.org/packages/a5/8f/aea9c71cc92bf9b6cc0f7f70df8f0b420636b6c96ef4feee1e16f80f75dd/websockets-16.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0298d07ee155e2e9fda5be8a9042200dd2e3bb0b8a38482156576f863a9d457c", size = 176968, upload-time = "2026-01-10T09:23:41.031Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/3f/f70e03f40ffc9a30d817eef7da1be72ee4956ba8d7255c399a01b135902a/websockets-16.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a653aea902e0324b52f1613332ddf50b00c06fdaf7e92624fbf8c77c78fa5767", size = 178735, upload-time = "2026-01-10T09:23:42.259Z" },
+ { url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" },
+]
+
+[[package]]
+name = "zstandard"
+version = "0.25.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/fd/aa/3e0508d5a5dd96529cdc5a97011299056e14c6505b678fd58938792794b1/zstandard-0.25.0.tar.gz", hash = "sha256:7713e1179d162cf5c7906da876ec2ccb9c3a9dcbdffef0cc7f70c3667a205f0b", size = 711513, upload-time = "2025-09-14T22:15:54.002Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2a/83/c3ca27c363d104980f1c9cee1101cc8ba724ac8c28a033ede6aab89585b1/zstandard-0.25.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:933b65d7680ea337180733cf9e87293cc5500cc0eb3fc8769f4d3c88d724ec5c", size = 795254, upload-time = "2025-09-14T22:16:26.137Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/4d/e66465c5411a7cf4866aeadc7d108081d8ceba9bc7abe6b14aa21c671ec3/zstandard-0.25.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3f79487c687b1fc69f19e487cd949bf3aae653d181dfb5fde3bf6d18894706f", size = 640559, upload-time = "2025-09-14T22:16:27.973Z" },
+ { url = "https://files.pythonhosted.org/packages/12/56/354fe655905f290d3b147b33fe946b0f27e791e4b50a5f004c802cb3eb7b/zstandard-0.25.0-cp311-cp311-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:0bbc9a0c65ce0eea3c34a691e3c4b6889f5f3909ba4822ab385fab9057099431", size = 5348020, upload-time = "2025-09-14T22:16:29.523Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/13/2b7ed68bd85e69a2069bcc72141d378f22cae5a0f3b353a2c8f50ef30c1b/zstandard-0.25.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:01582723b3ccd6939ab7b3a78622c573799d5d8737b534b86d0e06ac18dbde4a", size = 5058126, upload-time = "2025-09-14T22:16:31.811Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/dd/fdaf0674f4b10d92cb120ccff58bbb6626bf8368f00ebfd2a41ba4a0dc99/zstandard-0.25.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5f1ad7bf88535edcf30038f6919abe087f606f62c00a87d7e33e7fc57cb69fcc", size = 5405390, upload-time = "2025-09-14T22:16:33.486Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/67/354d1555575bc2490435f90d67ca4dd65238ff2f119f30f72d5cde09c2ad/zstandard-0.25.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:06acb75eebeedb77b69048031282737717a63e71e4ae3f77cc0c3b9508320df6", size = 5452914, upload-time = "2025-09-14T22:16:35.277Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/1f/e9cfd801a3f9190bf3e759c422bbfd2247db9d7f3d54a56ecde70137791a/zstandard-0.25.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9300d02ea7c6506f00e627e287e0492a5eb0371ec1670ae852fefffa6164b072", size = 5559635, upload-time = "2025-09-14T22:16:37.141Z" },
+ { url = "https://files.pythonhosted.org/packages/21/88/5ba550f797ca953a52d708c8e4f380959e7e3280af029e38fbf47b55916e/zstandard-0.25.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bfd06b1c5584b657a2892a6014c2f4c20e0db0208c159148fa78c65f7e0b0277", size = 5048277, upload-time = "2025-09-14T22:16:38.807Z" },
+ { url = "https://files.pythonhosted.org/packages/46/c0/ca3e533b4fa03112facbe7fbe7779cb1ebec215688e5df576fe5429172e0/zstandard-0.25.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f373da2c1757bb7f1acaf09369cdc1d51d84131e50d5fa9863982fd626466313", size = 5574377, upload-time = "2025-09-14T22:16:40.523Z" },
+ { url = "https://files.pythonhosted.org/packages/12/9b/3fb626390113f272abd0799fd677ea33d5fc3ec185e62e6be534493c4b60/zstandard-0.25.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6c0e5a65158a7946e7a7affa6418878ef97ab66636f13353b8502d7ea03c8097", size = 4961493, upload-time = "2025-09-14T22:16:43.3Z" },
+ { url = "https://files.pythonhosted.org/packages/cb/d3/23094a6b6a4b1343b27ae68249daa17ae0651fcfec9ed4de09d14b940285/zstandard-0.25.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:c8e167d5adf59476fa3e37bee730890e389410c354771a62e3c076c86f9f7778", size = 5269018, upload-time = "2025-09-14T22:16:45.292Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/a7/bb5a0c1c0f3f4b5e9d5b55198e39de91e04ba7c205cc46fcb0f95f0383c1/zstandard-0.25.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:98750a309eb2f020da61e727de7d7ba3c57c97cf6213f6f6277bb7fb42a8e065", size = 5443672, upload-time = "2025-09-14T22:16:47.076Z" },
+ { url = "https://files.pythonhosted.org/packages/27/22/503347aa08d073993f25109c36c8d9f029c7d5949198050962cb568dfa5e/zstandard-0.25.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:22a086cff1b6ceca18a8dd6096ec631e430e93a8e70a9ca5efa7561a00f826fa", size = 5822753, upload-time = "2025-09-14T22:16:49.316Z" },
+ { url = "https://files.pythonhosted.org/packages/e2/be/94267dc6ee64f0f8ba2b2ae7c7a2df934a816baaa7291db9e1aa77394c3c/zstandard-0.25.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:72d35d7aa0bba323965da807a462b0966c91608ef3a48ba761678cb20ce5d8b7", size = 5366047, upload-time = "2025-09-14T22:16:51.328Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/a3/732893eab0a3a7aecff8b99052fecf9f605cf0fb5fb6d0290e36beee47a4/zstandard-0.25.0-cp311-cp311-win32.whl", hash = "sha256:f5aeea11ded7320a84dcdd62a3d95b5186834224a9e55b92ccae35d21a8b63d4", size = 436484, upload-time = "2025-09-14T22:16:55.005Z" },
+ { url = "https://files.pythonhosted.org/packages/43/a3/c6155f5c1cce691cb80dfd38627046e50af3ee9ddc5d0b45b9b063bfb8c9/zstandard-0.25.0-cp311-cp311-win_amd64.whl", hash = "sha256:daab68faadb847063d0c56f361a289c4f268706b598afbf9ad113cbe5c38b6b2", size = 506183, upload-time = "2025-09-14T22:16:52.753Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/3e/8945ab86a0820cc0e0cdbf38086a92868a9172020fdab8a03ac19662b0e5/zstandard-0.25.0-cp311-cp311-win_arm64.whl", hash = "sha256:22a06c5df3751bb7dc67406f5374734ccee8ed37fc5981bf1ad7041831fa1137", size = 462533, upload-time = "2025-09-14T22:16:53.878Z" },
+ { url = "https://files.pythonhosted.org/packages/82/fc/f26eb6ef91ae723a03e16eddb198abcfce2bc5a42e224d44cc8b6765e57e/zstandard-0.25.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7b3c3a3ab9daa3eed242d6ecceead93aebbb8f5f84318d82cee643e019c4b73b", size = 795738, upload-time = "2025-09-14T22:16:56.237Z" },
+ { url = "https://files.pythonhosted.org/packages/aa/1c/d920d64b22f8dd028a8b90e2d756e431a5d86194caa78e3819c7bf53b4b3/zstandard-0.25.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:913cbd31a400febff93b564a23e17c3ed2d56c064006f54efec210d586171c00", size = 640436, upload-time = "2025-09-14T22:16:57.774Z" },
+ { url = "https://files.pythonhosted.org/packages/53/6c/288c3f0bd9fcfe9ca41e2c2fbfd17b2097f6af57b62a81161941f09afa76/zstandard-0.25.0-cp312-cp312-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:011d388c76b11a0c165374ce660ce2c8efa8e5d87f34996aa80f9c0816698b64", size = 5343019, upload-time = "2025-09-14T22:16:59.302Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/15/efef5a2f204a64bdb5571e6161d49f7ef0fffdbca953a615efbec045f60f/zstandard-0.25.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6dffecc361d079bb48d7caef5d673c88c8988d3d33fb74ab95b7ee6da42652ea", size = 5063012, upload-time = "2025-09-14T22:17:01.156Z" },
+ { url = "https://files.pythonhosted.org/packages/b7/37/a6ce629ffdb43959e92e87ebdaeebb5ac81c944b6a75c9c47e300f85abdf/zstandard-0.25.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:7149623bba7fdf7e7f24312953bcf73cae103db8cae49f8154dd1eadc8a29ecb", size = 5394148, upload-time = "2025-09-14T22:17:03.091Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/79/2bf870b3abeb5c070fe2d670a5a8d1057a8270f125ef7676d29ea900f496/zstandard-0.25.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:6a573a35693e03cf1d67799fd01b50ff578515a8aeadd4595d2a7fa9f3ec002a", size = 5451652, upload-time = "2025-09-14T22:17:04.979Z" },
+ { url = "https://files.pythonhosted.org/packages/53/60/7be26e610767316c028a2cbedb9a3beabdbe33e2182c373f71a1c0b88f36/zstandard-0.25.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5a56ba0db2d244117ed744dfa8f6f5b366e14148e00de44723413b2f3938a902", size = 5546993, upload-time = "2025-09-14T22:17:06.781Z" },
+ { url = "https://files.pythonhosted.org/packages/85/c7/3483ad9ff0662623f3648479b0380d2de5510abf00990468c286c6b04017/zstandard-0.25.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:10ef2a79ab8e2974e2075fb984e5b9806c64134810fac21576f0668e7ea19f8f", size = 5046806, upload-time = "2025-09-14T22:17:08.415Z" },
+ { url = "https://files.pythonhosted.org/packages/08/b3/206883dd25b8d1591a1caa44b54c2aad84badccf2f1de9e2d60a446f9a25/zstandard-0.25.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aaf21ba8fb76d102b696781bddaa0954b782536446083ae3fdaa6f16b25a1c4b", size = 5576659, upload-time = "2025-09-14T22:17:10.164Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/31/76c0779101453e6c117b0ff22565865c54f48f8bd807df2b00c2c404b8e0/zstandard-0.25.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1869da9571d5e94a85a5e8d57e4e8807b175c9e4a6294e3b66fa4efb074d90f6", size = 4953933, upload-time = "2025-09-14T22:17:11.857Z" },
+ { url = "https://files.pythonhosted.org/packages/18/e1/97680c664a1bf9a247a280a053d98e251424af51f1b196c6d52f117c9720/zstandard-0.25.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:809c5bcb2c67cd0ed81e9229d227d4ca28f82d0f778fc5fea624a9def3963f91", size = 5268008, upload-time = "2025-09-14T22:17:13.627Z" },
+ { url = "https://files.pythonhosted.org/packages/1e/73/316e4010de585ac798e154e88fd81bb16afc5c5cb1a72eeb16dd37e8024a/zstandard-0.25.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f27662e4f7dbf9f9c12391cb37b4c4c3cb90ffbd3b1fb9284dadbbb8935fa708", size = 5433517, upload-time = "2025-09-14T22:17:16.103Z" },
+ { url = "https://files.pythonhosted.org/packages/5b/60/dd0f8cfa8129c5a0ce3ea6b7f70be5b33d2618013a161e1ff26c2b39787c/zstandard-0.25.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99c0c846e6e61718715a3c9437ccc625de26593fea60189567f0118dc9db7512", size = 5814292, upload-time = "2025-09-14T22:17:17.827Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/5f/75aafd4b9d11b5407b641b8e41a57864097663699f23e9ad4dbb91dc6bfe/zstandard-0.25.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:474d2596a2dbc241a556e965fb76002c1ce655445e4e3bf38e5477d413165ffa", size = 5360237, upload-time = "2025-09-14T22:17:19.954Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/8d/0309daffea4fcac7981021dbf21cdb2e3427a9e76bafbcdbdf5392ff99a4/zstandard-0.25.0-cp312-cp312-win32.whl", hash = "sha256:23ebc8f17a03133b4426bcc04aabd68f8236eb78c3760f12783385171b0fd8bd", size = 436922, upload-time = "2025-09-14T22:17:24.398Z" },
+ { url = "https://files.pythonhosted.org/packages/79/3b/fa54d9015f945330510cb5d0b0501e8253c127cca7ebe8ba46a965df18c5/zstandard-0.25.0-cp312-cp312-win_amd64.whl", hash = "sha256:ffef5a74088f1e09947aecf91011136665152e0b4b359c42be3373897fb39b01", size = 506276, upload-time = "2025-09-14T22:17:21.429Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/6b/8b51697e5319b1f9ac71087b0af9a40d8a6288ff8025c36486e0c12abcc4/zstandard-0.25.0-cp312-cp312-win_arm64.whl", hash = "sha256:181eb40e0b6a29b3cd2849f825e0fa34397f649170673d385f3598ae17cca2e9", size = 462679, upload-time = "2025-09-14T22:17:23.147Z" },
+ { url = "https://files.pythonhosted.org/packages/35/0b/8df9c4ad06af91d39e94fa96cc010a24ac4ef1378d3efab9223cc8593d40/zstandard-0.25.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec996f12524f88e151c339688c3897194821d7f03081ab35d31d1e12ec975e94", size = 795735, upload-time = "2025-09-14T22:17:26.042Z" },
+ { url = "https://files.pythonhosted.org/packages/3f/06/9ae96a3e5dcfd119377ba33d4c42a7d89da1efabd5cb3e366b156c45ff4d/zstandard-0.25.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a1a4ae2dec3993a32247995bdfe367fc3266da832d82f8438c8570f989753de1", size = 640440, upload-time = "2025-09-14T22:17:27.366Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/14/933d27204c2bd404229c69f445862454dcc101cd69ef8c6068f15aaec12c/zstandard-0.25.0-cp313-cp313-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:e96594a5537722fdfb79951672a2a63aec5ebfb823e7560586f7484819f2a08f", size = 5343070, upload-time = "2025-09-14T22:17:28.896Z" },
+ { url = "https://files.pythonhosted.org/packages/6d/db/ddb11011826ed7db9d0e485d13df79b58586bfdec56e5c84a928a9a78c1c/zstandard-0.25.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bfc4e20784722098822e3eee42b8e576b379ed72cca4a7cb856ae733e62192ea", size = 5063001, upload-time = "2025-09-14T22:17:31.044Z" },
+ { url = "https://files.pythonhosted.org/packages/db/00/87466ea3f99599d02a5238498b87bf84a6348290c19571051839ca943777/zstandard-0.25.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:457ed498fc58cdc12fc48f7950e02740d4f7ae9493dd4ab2168a47c93c31298e", size = 5394120, upload-time = "2025-09-14T22:17:32.711Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/95/fc5531d9c618a679a20ff6c29e2b3ef1d1f4ad66c5e161ae6ff847d102a9/zstandard-0.25.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:fd7a5004eb1980d3cefe26b2685bcb0b17989901a70a1040d1ac86f1d898c551", size = 5451230, upload-time = "2025-09-14T22:17:34.41Z" },
+ { url = "https://files.pythonhosted.org/packages/63/4b/e3678b4e776db00f9f7b2fe58e547e8928ef32727d7a1ff01dea010f3f13/zstandard-0.25.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8e735494da3db08694d26480f1493ad2cf86e99bdd53e8e9771b2752a5c0246a", size = 5547173, upload-time = "2025-09-14T22:17:36.084Z" },
+ { url = "https://files.pythonhosted.org/packages/4e/d5/ba05ed95c6b8ec30bd468dfeab20589f2cf709b5c940483e31d991f2ca58/zstandard-0.25.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3a39c94ad7866160a4a46d772e43311a743c316942037671beb264e395bdd611", size = 5046736, upload-time = "2025-09-14T22:17:37.891Z" },
+ { url = "https://files.pythonhosted.org/packages/50/d5/870aa06b3a76c73eced65c044b92286a3c4e00554005ff51962deef28e28/zstandard-0.25.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:172de1f06947577d3a3005416977cce6168f2261284c02080e7ad0185faeced3", size = 5576368, upload-time = "2025-09-14T22:17:40.206Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/35/398dc2ffc89d304d59bc12f0fdd931b4ce455bddf7038a0a67733a25f550/zstandard-0.25.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3c83b0188c852a47cd13ef3bf9209fb0a77fa5374958b8c53aaa699398c6bd7b", size = 4954022, upload-time = "2025-09-14T22:17:41.879Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/5c/36ba1e5507d56d2213202ec2b05e8541734af5f2ce378c5d1ceaf4d88dc4/zstandard-0.25.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1673b7199bbe763365b81a4f3252b8e80f44c9e323fc42940dc8843bfeaf9851", size = 5267889, upload-time = "2025-09-14T22:17:43.577Z" },
+ { url = "https://files.pythonhosted.org/packages/70/e8/2ec6b6fb7358b2ec0113ae202647ca7c0e9d15b61c005ae5225ad0995df5/zstandard-0.25.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:0be7622c37c183406f3dbf0cba104118eb16a4ea7359eeb5752f0794882fc250", size = 5433952, upload-time = "2025-09-14T22:17:45.271Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/01/b5f4d4dbc59ef193e870495c6f1275f5b2928e01ff5a81fecb22a06e22fb/zstandard-0.25.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:5f5e4c2a23ca271c218ac025bd7d635597048b366d6f31f420aaeb715239fc98", size = 5814054, upload-time = "2025-09-14T22:17:47.08Z" },
+ { url = "https://files.pythonhosted.org/packages/b2/e5/fbd822d5c6f427cf158316d012c5a12f233473c2f9c5fe5ab1ae5d21f3d8/zstandard-0.25.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f187a0bb61b35119d1926aee039524d1f93aaf38a9916b8c4b78ac8514a0aaf", size = 5360113, upload-time = "2025-09-14T22:17:48.893Z" },
+ { url = "https://files.pythonhosted.org/packages/8e/e0/69a553d2047f9a2c7347caa225bb3a63b6d7704ad74610cb7823baa08ed7/zstandard-0.25.0-cp313-cp313-win32.whl", hash = "sha256:7030defa83eef3e51ff26f0b7bfb229f0204b66fe18e04359ce3474ac33cbc09", size = 436936, upload-time = "2025-09-14T22:17:52.658Z" },
+ { url = "https://files.pythonhosted.org/packages/d9/82/b9c06c870f3bd8767c201f1edbdf9e8dc34be5b0fbc5682c4f80fe948475/zstandard-0.25.0-cp313-cp313-win_amd64.whl", hash = "sha256:1f830a0dac88719af0ae43b8b2d6aef487d437036468ef3c2ea59c51f9d55fd5", size = 506232, upload-time = "2025-09-14T22:17:50.402Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/57/60c3c01243bb81d381c9916e2a6d9e149ab8627c0c7d7abb2d73384b3c0c/zstandard-0.25.0-cp313-cp313-win_arm64.whl", hash = "sha256:85304a43f4d513f5464ceb938aa02c1e78c2943b29f44a750b48b25ac999a049", size = 462671, upload-time = "2025-09-14T22:17:51.533Z" },
+ { url = "https://files.pythonhosted.org/packages/3d/5c/f8923b595b55fe49e30612987ad8bf053aef555c14f05bb659dd5dbe3e8a/zstandard-0.25.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e29f0cf06974c899b2c188ef7f783607dbef36da4c242eb6c82dcd8b512855e3", size = 795887, upload-time = "2025-09-14T22:17:54.198Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/09/d0a2a14fc3439c5f874042dca72a79c70a532090b7ba0003be73fee37ae2/zstandard-0.25.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:05df5136bc5a011f33cd25bc9f506e7426c0c9b3f9954f056831ce68f3b6689f", size = 640658, upload-time = "2025-09-14T22:17:55.423Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/7c/8b6b71b1ddd517f68ffb55e10834388d4f793c49c6b83effaaa05785b0b4/zstandard-0.25.0-cp314-cp314-manylinux2010_i686.manylinux_2_12_i686.manylinux_2_28_i686.whl", hash = "sha256:f604efd28f239cc21b3adb53eb061e2a205dc164be408e553b41ba2ffe0ca15c", size = 5379849, upload-time = "2025-09-14T22:17:57.372Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/86/a48e56320d0a17189ab7a42645387334fba2200e904ee47fc5a26c1fd8ca/zstandard-0.25.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223415140608d0f0da010499eaa8ccdb9af210a543fac54bce15babbcfc78439", size = 5058095, upload-time = "2025-09-14T22:17:59.498Z" },
+ { url = "https://files.pythonhosted.org/packages/f8/ad/eb659984ee2c0a779f9d06dbfe45e2dc39d99ff40a319895df2d3d9a48e5/zstandard-0.25.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e54296a283f3ab5a26fc9b8b5d4978ea0532f37b231644f367aa588930aa043", size = 5551751, upload-time = "2025-09-14T22:18:01.618Z" },
+ { url = "https://files.pythonhosted.org/packages/61/b3/b637faea43677eb7bd42ab204dfb7053bd5c4582bfe6b1baefa80ac0c47b/zstandard-0.25.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ca54090275939dc8ec5dea2d2afb400e0f83444b2fc24e07df7fdef677110859", size = 6364818, upload-time = "2025-09-14T22:18:03.769Z" },
+ { url = "https://files.pythonhosted.org/packages/31/dc/cc50210e11e465c975462439a492516a73300ab8caa8f5e0902544fd748b/zstandard-0.25.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e09bb6252b6476d8d56100e8147b803befa9a12cea144bbe629dd508800d1ad0", size = 5560402, upload-time = "2025-09-14T22:18:05.954Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/ae/56523ae9c142f0c08efd5e868a6da613ae76614eca1305259c3bf6a0ed43/zstandard-0.25.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a9ec8c642d1ec73287ae3e726792dd86c96f5681eb8df274a757bf62b750eae7", size = 4955108, upload-time = "2025-09-14T22:18:07.68Z" },
+ { url = "https://files.pythonhosted.org/packages/98/cf/c899f2d6df0840d5e384cf4c4121458c72802e8bda19691f3b16619f51e9/zstandard-0.25.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a4089a10e598eae6393756b036e0f419e8c1d60f44a831520f9af41c14216cf2", size = 5269248, upload-time = "2025-09-14T22:18:09.753Z" },
+ { url = "https://files.pythonhosted.org/packages/1b/c0/59e912a531d91e1c192d3085fc0f6fb2852753c301a812d856d857ea03c6/zstandard-0.25.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:f67e8f1a324a900e75b5e28ffb152bcac9fbed1cc7b43f99cd90f395c4375344", size = 5430330, upload-time = "2025-09-14T22:18:11.966Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/1d/7e31db1240de2df22a58e2ea9a93fc6e38cc29353e660c0272b6735d6669/zstandard-0.25.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:9654dbc012d8b06fc3d19cc825af3f7bf8ae242226df5f83936cb39f5fdc846c", size = 5811123, upload-time = "2025-09-14T22:18:13.907Z" },
+ { url = "https://files.pythonhosted.org/packages/f6/49/fac46df5ad353d50535e118d6983069df68ca5908d4d65b8c466150a4ff1/zstandard-0.25.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4203ce3b31aec23012d3a4cf4a2ed64d12fea5269c49aed5e4c3611b938e4088", size = 5359591, upload-time = "2025-09-14T22:18:16.465Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/38/f249a2050ad1eea0bb364046153942e34abba95dd5520af199aed86fbb49/zstandard-0.25.0-cp314-cp314-win32.whl", hash = "sha256:da469dc041701583e34de852d8634703550348d5822e66a0c827d39b05365b12", size = 444513, upload-time = "2025-09-14T22:18:20.61Z" },
+ { url = "https://files.pythonhosted.org/packages/3a/43/241f9615bcf8ba8903b3f0432da069e857fc4fd1783bd26183db53c4804b/zstandard-0.25.0-cp314-cp314-win_amd64.whl", hash = "sha256:c19bcdd826e95671065f8692b5a4aa95c52dc7a02a4c5a0cac46deb879a017a2", size = 516118, upload-time = "2025-09-14T22:18:17.849Z" },
+ { url = "https://files.pythonhosted.org/packages/f0/ef/da163ce2450ed4febf6467d77ccb4cd52c4c30ab45624bad26ca0a27260c/zstandard-0.25.0-cp314-cp314-win_arm64.whl", hash = "sha256:d7541afd73985c630bafcd6338d2518ae96060075f9463d7dc14cfb33514383d", size = 476940, upload-time = "2025-09-14T22:18:19.088Z" },
+]