diff --git a/.changelog/unreleased/bug-fixes/1687-consensus-fix-block-validation.md b/.changelog/unreleased/bug-fixes/1687-consensus-fix-block-validation.md new file mode 100644 index 0000000000..778f0b538b --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1687-consensus-fix-block-validation.md @@ -0,0 +1,3 @@ +- `[mempool]` The calculation method of tx size returned by calling proxyapp should be consistent with that of mempool + ([\#1687](https://github.com/cometbft/cometbft/pull/1687)) + diff --git a/.changelog/unreleased/bug-fixes/1749-light-client-attack-verify-all-sigs.md b/.changelog/unreleased/bug-fixes/1749-light-client-attack-verify-all-sigs.md new file mode 100644 index 0000000000..1115c4d195 --- /dev/null +++ b/.changelog/unreleased/bug-fixes/1749-light-client-attack-verify-all-sigs.md @@ -0,0 +1,4 @@ +- `[evidence]` When `VerifyCommitLight` & `VerifyCommitLightTrusting` are called as part + of evidence verification, all signatures present in the evidence must be verified + ([\#1749](https://github.com/cometbft/cometbft/pull/1749)) + diff --git a/.changelog/unreleased/improvements/1715-validate-validator-address b/.changelog/unreleased/improvements/1715-validate-validator-address new file mode 100644 index 0000000000..ec7f2c7da6 --- /dev/null +++ b/.changelog/unreleased/improvements/1715-validate-validator-address @@ -0,0 +1 @@ +- `[types]` Validate `Validator#Address` in `ValidateBasic` ([\#1715](https://github.com/cometbft/cometbft/pull/1715)) diff --git a/.changelog/unreleased/improvements/1730-increase-abci-socket-message-size-limit.md b/.changelog/unreleased/improvements/1730-increase-abci-socket-message-size-limit.md new file mode 100644 index 0000000000..5246eb57f0 --- /dev/null +++ b/.changelog/unreleased/improvements/1730-increase-abci-socket-message-size-limit.md @@ -0,0 +1 @@ +- `[abci]` Increase ABCI socket message size limit to 2GB ([\#1730](https://github.com/cometbft/cometbft/pull/1730): @troykessler) diff --git a/.changelog/v0.37.3/breaking-changes/1113-rm-upnp.md b/.changelog/v0.37.3/breaking-changes/1113-rm-upnp.md new file mode 100644 index 0000000000..5526319ae5 --- /dev/null +++ b/.changelog/v0.37.3/breaking-changes/1113-rm-upnp.md @@ -0,0 +1,2 @@ +- `[p2p]` Remove unused UPnP functionality + ([\#1113](https://github.com/cometbft/cometbft/issues/1113)) diff --git a/.changelog/v0.37.3/bug-fixes/1529-indexer-respect-height-params-on-query.md b/.changelog/v0.37.3/bug-fixes/1529-indexer-respect-height-params-on-query.md new file mode 100644 index 0000000000..d12f3eda53 --- /dev/null +++ b/.changelog/v0.37.3/bug-fixes/1529-indexer-respect-height-params-on-query.md @@ -0,0 +1,2 @@ +- `[state/indexer]` Respect both height params while querying for events + ([\#1529](https://github.com/cometbft/cometbft/pull/1529)) diff --git a/.changelog/v0.37.3/features/1057-bootstrap-state-api.md b/.changelog/v0.37.3/features/1057-bootstrap-state-api.md new file mode 100644 index 0000000000..ff3dcb6820 --- /dev/null +++ b/.changelog/v0.37.3/features/1057-bootstrap-state-api.md @@ -0,0 +1,2 @@ +- `[node/state]` Add Go API to bootstrap block store and state store to a height + ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@yihuang) \ No newline at end of file diff --git a/.changelog/v0.37.3/features/1512-metric-mempool-size-bytes.md b/.changelog/v0.37.3/features/1512-metric-mempool-size-bytes.md new file mode 100644 index 0000000000..b935dc4084 --- /dev/null +++ b/.changelog/v0.37.3/features/1512-metric-mempool-size-bytes.md @@ -0,0 +1,2 @@ +- `[metrics]` Add metric for mempool size in bytes `SizeBytes`. + ([\#1512](https://github.com/cometbft/cometbft/pull/1512)) \ No newline at end of file diff --git a/.changelog/v0.37.3/improvements/1210-close-evidence-db.md b/.changelog/v0.37.3/improvements/1210-close-evidence-db.md new file mode 100644 index 0000000000..e32bc87dbe --- /dev/null +++ b/.changelog/v0.37.3/improvements/1210-close-evidence-db.md @@ -0,0 +1 @@ +- `[node]` Close evidence.db OnStop ([cometbft/cometbft\#1210](https://github.com/cometbft/cometbft/pull/1210): @chillyvee) diff --git a/.changelog/v0.37.3/improvements/1558-experimental-gossip-limiting.md b/.changelog/v0.37.3/improvements/1558-experimental-gossip-limiting.md new file mode 100644 index 0000000000..f3d84dddc7 --- /dev/null +++ b/.changelog/v0.37.3/improvements/1558-experimental-gossip-limiting.md @@ -0,0 +1,9 @@ +- `[mempool]` Add experimental feature to limit the number of persistent peers and non-persistent + peers to which the node gossip transactions (only for "v0" mempool). + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) +- `[config]` Add mempool parameters `experimental_max_gossip_connections_to_persistent_peers` and + `experimental_max_gossip_connections_to_non_persistent_peers` for limiting the number of peers to + which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) diff --git a/.changelog/v0.37.3/improvements/475-upgrade-go-schnorrkel.md b/.changelog/v0.37.3/improvements/475-upgrade-go-schnorrkel.md new file mode 100644 index 0000000000..bdaf96c14c --- /dev/null +++ b/.changelog/v0.37.3/improvements/475-upgrade-go-schnorrkel.md @@ -0,0 +1 @@ +- `[crypto/sr25519]` Upgrade to go-schnorrkel@v1.0.0 ([\#475](https://github.com/cometbft/cometbft/issues/475)) diff --git a/.changelog/v0.37.3/improvements/857-make-handshake-cancelable.md b/.changelog/v0.37.3/improvements/857-make-handshake-cancelable.md new file mode 100644 index 0000000000..16b447f6d2 --- /dev/null +++ b/.changelog/v0.37.3/improvements/857-make-handshake-cancelable.md @@ -0,0 +1 @@ +- `[node]` Make handshake cancelable ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) diff --git a/.changelog/v0.37.3/summary.md b/.changelog/v0.37.3/summary.md new file mode 100644 index 0000000000..f1e5c7f755 --- /dev/null +++ b/.changelog/v0.37.3/summary.md @@ -0,0 +1,5 @@ +*November 17, 2023* + +This release contains, among other things, an opt-in, experimental feature to +help reduce the bandwidth consumption associated with the mempool's transaction +gossip. diff --git a/.changelog/v0.37.4/bug-fixes/1654-semaphore-wait.md b/.changelog/v0.37.4/bug-fixes/1654-semaphore-wait.md new file mode 100644 index 0000000000..9d0fb80adc --- /dev/null +++ b/.changelog/v0.37.4/bug-fixes/1654-semaphore-wait.md @@ -0,0 +1,3 @@ +- `[mempool]` Avoid infinite wait in transaction sending routine when + using experimental parameters to limiting transaction gossiping to peers + ([\#1654](https://github.com/cometbft/cometbft/pull/1654)) \ No newline at end of file diff --git a/.changelog/v0.37.4/features/1643-nop-mempool.md b/.changelog/v0.37.4/features/1643-nop-mempool.md new file mode 100644 index 0000000000..e12ec43fc1 --- /dev/null +++ b/.changelog/v0.37.4/features/1643-nop-mempool.md @@ -0,0 +1,17 @@ +- `[mempool]` Add `nop` mempool ([\#1643](https://github.com/cometbft/cometbft/pull/1643)) + + If you want to use it, change mempool's `type` to `nop`: + + ```toml + [mempool] + + # The type of mempool for this node to use. + # + # Possible types: + # - "flood" : concurrent linked list mempool with flooding gossip protocol + # (default) + # - "nop" : nop-mempool (short for no operation; the ABCI app is responsible + # for storing, disseminating and proposing txs). "create_empty_blocks=false" + # is not supported. + type = "nop" + ``` \ No newline at end of file diff --git a/.changelog/v0.37.4/summary.md b/.changelog/v0.37.4/summary.md new file mode 100644 index 0000000000..a391feeea2 --- /dev/null +++ b/.changelog/v0.37.4/summary.md @@ -0,0 +1,9 @@ +*November 27, 2023* + +This release provides the **nop** mempool for applications that want to build +their own mempool. Using this mempool effectively disables all mempool +functionality in CometBFT, including transaction dissemination and the +`broadcast_tx_*` endpoints. + +Also fixes a small bug in the mempool for an experimental feature, and reverts +the change from v0.37.3 that bumped the minimum Go version to v1.21. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c95d625629..b4d4200820 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,10 +19,10 @@ jobs: goos: ["linux"] timeout-minutes: 5 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" - - uses: actions/checkout@v3 + go-version: "1.21" + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -41,10 +41,10 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" - - uses: actions/checkout@v3 + go-version: "1.21" + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -63,10 +63,10 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" - - uses: actions/checkout@v3 + go-version: "1.21" + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index 70943e9aef..026d1d1da6 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -16,11 +16,11 @@ jobs: check-mocks: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" + go-version: "1.21" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Check generated mocks" run: | @@ -40,11 +40,11 @@ jobs: check-proto: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" + go-version: "1.21" - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 1 # we need a .git directory to run git diff diff --git a/.github/workflows/cometbft-docker.yml b/.github/workflows/cometbft-docker.yml index eb47108838..99cb523d7d 100644 --- a/.github/workflows/cometbft-docker.yml +++ b/.github/workflows/cometbft-docker.yml @@ -15,7 +15,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare id: prep run: | @@ -41,17 +41,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.7.0 + uses: docker/setup-buildx-action@v3.0.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.2.0 + uses: docker/login-action@v3.0.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v4.1.1 + uses: docker/build-push-action@v5.1.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/docs-toc.yml b/.github/workflows/docs-toc.yml index e4ad7f03e0..225b33bbdb 100644 --- a/.github/workflows/docs-toc.yml +++ b/.github/workflows/docs-toc.yml @@ -10,7 +10,7 @@ jobs: check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/e2e-long-37x.yml b/.github/workflows/e2e-long-37x.yml index 51c38f1cda..845d4df93f 100644 --- a/.github/workflows/e2e-long-37x.yml +++ b/.github/workflows/e2e-long-37x.yml @@ -15,11 +15,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: 'v0.37.x' diff --git a/.github/workflows/e2e-manual-multiversion.yml b/.github/workflows/e2e-manual-multiversion.yml index 164ee3b9e2..1d0d6cbc27 100644 --- a/.github/workflows/e2e-manual-multiversion.yml +++ b/.github/workflows/e2e-manual-multiversion.yml @@ -15,11 +15,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index d69cdf892c..405b6ae048 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -15,11 +15,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index d232806afe..a7fc8ca6c4 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -19,11 +19,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: '1.18' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: 'v0.34.x' diff --git a/.github/workflows/e2e-nightly-37x.yml b/.github/workflows/e2e-nightly-37x.yml index 0ac1f50d31..cf0f154ad3 100644 --- a/.github/workflows/e2e-nightly-37x.yml +++ b/.github/workflows/e2e-nightly-37x.yml @@ -19,11 +19,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: 'v0.37.x' diff --git a/.github/workflows/e2e-nightly-main.yml b/.github/workflows/e2e-nightly-main.yml index 6fa05d90b9..bf33933c1f 100644 --- a/.github/workflows/e2e-nightly-main.yml +++ b/.github/workflows/e2e-nightly-main.yml @@ -20,11 +20,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Build working-directory: test/e2e diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 494b9513ff..fdab004940 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -13,10 +13,10 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 15 steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' - - uses: actions/checkout@v3 + go-version: '1.21' + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 4af0978741..a2fe5c16cf 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -13,11 +13,11 @@ jobs: fuzz-nightly-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install go-fuzz working-directory: test/fuzz @@ -49,14 +49,14 @@ jobs: continue-on-error: true - name: Archive crashers - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: crashers path: test/fuzz/**/crashers retention-days: 3 - name: Archive suppressions - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: suppressions path: test/fuzz/**/suppressions diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml index 4e39bb302d..4c262bbcc5 100644 --- a/.github/workflows/govulncheck.yml +++ b/.github/workflows/govulncheck.yml @@ -14,11 +14,11 @@ jobs: govulncheck: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" + go-version: "1.21" check-latest: true - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | diff --git a/.github/workflows/janitor.yml b/.github/workflows/janitor.yml index 22cba4a93a..9c28eb4fd3 100644 --- a/.github/workflows/janitor.yml +++ b/.github/workflows/janitor.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 3 steps: - - uses: styfle/cancel-workflow-action@0.11.0 + - uses: styfle/cancel-workflow-action@0.12.0 with: workflow_id: 1041851,1401230,2837803 access_token: ${{ github.token }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 7b932c3c8c..01e04903bf 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -20,19 +20,21 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v4 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go go.mod go.sum + .github/** + Makefile - uses: golangci/golangci-lint-action@v3 with: - version: v1.51 + version: latest args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index c98da6e2f7..bd22d4bb8c 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Lint Code Base uses: docker://github/super-linter:v4 env: diff --git a/.github/workflows/pre-release.yml b/.github/workflows/pre-release.yml index 4cf6a83cb8..b910d27771 100644 --- a/.github/workflows/pre-release.yml +++ b/.github/workflows/pre-release.yml @@ -12,13 +12,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' # Similar check to ./release-version.yml, but enforces this when pushing # tags. The ./release-version.yml check can be bypassed and is mainly @@ -44,7 +44,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for changes available in this pre-release, but not yet officially released." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v5 with: version: latest args: release --clean --release-notes ../release_notes.md diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index a3404dd73e..998aa305db 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -14,8 +14,8 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 5 steps: - - uses: actions/checkout@v3 - - uses: bufbuild/buf-setup-action@v1.21.0 + - uses: actions/checkout@v4 + - uses: bufbuild/buf-setup-action@v1.28.1 - uses: bufbuild/buf-lint-action@v1 with: input: 'proto' diff --git a/.github/workflows/release-version.yml b/.github/workflows/release-version.yml index 773e9339ab..96cc598edb 100644 --- a/.github/workflows/release-version.yml +++ b/.github/workflows/release-version.yml @@ -11,11 +11,11 @@ jobs: check-version: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' - name: Check version run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 343818356e..5cc48eb265 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,13 +10,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: '1.20' + go-version: '1.21' # Similar check to ./release-version.yml, but enforces this when pushing # tags. The ./release-version.yml check can be bypassed and is mainly @@ -43,7 +43,7 @@ jobs: echo "See the [CHANGELOG](${CHANGELOG_URL}) for this release." > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v5 with: version: latest args: release --clean --release-notes ../release_notes.md diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 396f41b1ab..35bfb53d77 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v8 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request has been automatically marked as stale because it has not had diff --git a/.github/workflows/testapp-docker.yml b/.github/workflows/testapp-docker.yml index d4b6126d74..e426397657 100644 --- a/.github/workflows/testapp-docker.yml +++ b/.github/workflows/testapp-docker.yml @@ -15,7 +15,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Prepare id: prep run: | @@ -41,17 +41,17 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.7.0 + uses: docker/setup-buildx-action@v3.0.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.2.0 + uses: docker/login-action@v3.0.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v4.1.1 + uses: docker/build-push-action@v5.1.0 with: context: . file: ./test/e2e/docker/Dockerfile diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 46a2358a77..71a9dc8557 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -14,11 +14,12 @@ jobs: fail-fast: false matrix: part: ["00", "01", "02", "03", "04", "05"] + repeat: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40] steps: - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: - go-version: "1.20" - - uses: actions/checkout@v3 + go-version: "1.21" + - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: PATTERNS: | @@ -30,4 +31,3 @@ jobs: - name: Run Go Tests run: | make test-group-${{ matrix.part }} NUM_SPLIT=6 - if: env.GIT_DIFF diff --git a/.golangci.yml b/.golangci.yml index 80e7214b2c..d7db8f833c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,4 +1,5 @@ linters: + disable-all: true enable: - asciicheck - bodyclose @@ -7,16 +8,16 @@ linters: - dupl - errcheck - exportloopref - - goconst + # - goconst - gofmt - goimports - - revive + # - revive - gosec - gosimple - govet - ineffassign - misspell - - nakedret + # - nakedret - nolintlint - prealloc - staticcheck @@ -42,3 +43,59 @@ linters-settings: suggest-new: true misspell: locale: US + depguard: + rules: + main: + files: + - $all + - "!$test" + allow: + - $gostd + - github.com/cometbft + - github.com/cosmos + - github.com/btcsuite/btcd/btcec/v2 + - github.com/BurntSushi/toml + - github.com/go-git/go-git/v5 + - github.com/go-kit + - github.com/go-logfmt/logfmt + - github.com/gofrs/uuid + - github.com/google + - github.com/gorilla/websocket + - github.com/informalsystems/tm-load-test/pkg/loadtest + - github.com/lib/pq + - github.com/libp2p/go-buffer-pool + - github.com/Masterminds/semver/v3 + - github.com/minio/highwayhash + - github.com/oasisprotocol/curve25519-voi + - github.com/pkg/errors + - github.com/prometheus + - github.com/rcrowley/go-metrics + - github.com/rs/cors + - github.com/snikch/goodman + - github.com/spf13 + - github.com/stretchr/testify/require + - github.com/syndtr/goleveldb + - github.com/ChainSafe/go-schnorrkel + - github.com/creachadair/taskgroup + - github.com/gtank/merlin + test: + files: + - "$test" + allow: + - $gostd + - github.com/cosmos + - github.com/cometbft + - github.com/adlio/schema + - github.com/btcsuite/btcd + - github.com/fortytw2/leaktest + - github.com/go-kit + - github.com/google/uuid + - github.com/gorilla/websocket + - github.com/lib/pq + - github.com/oasisprotocol/curve25519-voi/primitives/merlin + - github.com/ory/dockertest + - github.com/pkg/errors + - github.com/prometheus/client_golang/prometheus/promhttp + - github.com/spf13 + - github.com/stretchr/testify + - github.com/gtank/merlin diff --git a/CHANGELOG.md b/CHANGELOG.md index e26955a191..cc1eac434d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,83 @@ # CHANGELOG +## v0.37.4 + +*November 27, 2023* + +This release provides the **nop** mempool for applications that want to build +their own mempool. Using this mempool effectively disables all mempool +functionality in CometBFT, including transaction dissemination and the +`broadcast_tx_*` endpoints. + +Also fixes a small bug in the mempool for an experimental feature, and reverts +the change from v0.37.3 that bumped the minimum Go version to v1.21. + +### BUG FIXES + +- `[mempool]` Avoid infinite wait in transaction sending routine when + using experimental parameters to limiting transaction gossiping to peers + ([\#1654](https://github.com/cometbft/cometbft/pull/1654)) + +### FEATURES + +- `[mempool]` Add `nop` mempool ([\#1643](https://github.com/cometbft/cometbft/pull/1643)) + + If you want to use it, change mempool's `type` to `nop`: + + ```toml + [mempool] + + # The type of mempool for this node to use. + # + # Possible types: + # - "flood" : concurrent linked list mempool with flooding gossip protocol + # (default) + # - "nop" : nop-mempool (short for no operation; the ABCI app is responsible + # for storing, disseminating and proposing txs). "create_empty_blocks=false" + # is not supported. + type = "nop" + ``` + +## v0.37.3 + +*November 17, 2023* + +This release contains, among other things, an opt-in, experimental feature to +help reduce the bandwidth consumption associated with the mempool's transaction +gossip. + +### BREAKING CHANGES + +- `[p2p]` Remove unused UPnP functionality + ([\#1113](https://github.com/cometbft/cometbft/issues/1113)) + +### BUG FIXES + +- `[state/indexer]` Respect both height params while querying for events + ([\#1529](https://github.com/cometbft/cometbft/pull/1529)) + +### FEATURES + +- `[node/state]` Add Go API to bootstrap block store and state store to a height + ([\#1057](https://github.com/tendermint/tendermint/pull/#1057)) (@yihuang) +- `[metrics]` Add metric for mempool size in bytes `SizeBytes`. + ([\#1512](https://github.com/cometbft/cometbft/pull/1512)) + +### IMPROVEMENTS + +- `[crypto/sr25519]` Upgrade to go-schnorrkel@v1.0.0 ([\#475](https://github.com/cometbft/cometbft/issues/475)) +- `[node]` Make handshake cancelable ([cometbft/cometbft\#857](https://github.com/cometbft/cometbft/pull/857)) +- `[node]` Close evidence.db OnStop ([cometbft/cometbft\#1210](https://github.com/cometbft/cometbft/pull/1210): @chillyvee) +- `[mempool]` Add experimental feature to limit the number of persistent peers and non-persistent + peers to which the node gossip transactions (only for "v0" mempool). + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) +- `[config]` Add mempool parameters `experimental_max_gossip_connections_to_persistent_peers` and + `experimental_max_gossip_connections_to_non_persistent_peers` for limiting the number of peers to + which the node gossip transactions. + ([\#1558](https://github.com/cometbft/cometbft/pull/1558)) + ([\#1584](https://github.com/cometbft/cometbft/pull/1584)) + ## v0.37.2 *June 14, 2023* @@ -9,14 +87,14 @@ security issues. ### BUG FIXES -- `[state/kvindex]` Querying event attributes that are bigger than int64 is now - enabled. We are not supporting reading floats from the db into the indexer - nor parsing them into BigFloats to not introduce breaking changes in minor - releases. ([\#771](https://github.com/cometbft/cometbft/pull/771)) - `[pubsub]` Pubsub queries are now able to parse big integers (larger than int64). Very big floats are also properly parsed into very big integers instead of being truncated to int64. ([\#771](https://github.com/cometbft/cometbft/pull/771)) +- `[state/kvindex]` Querying event attributes that are bigger than int64 is now + enabled. We are not supporting reading floats from the db into the indexer + nor parsing them into BigFloats to not introduce breaking changes in minor + releases. ([\#771](https://github.com/cometbft/cometbft/pull/771)) ### IMPROVEMENTS diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index c25964180e..3f93f1e5e8 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,13 +1,13 @@ # The CometBFT Code of Conduct -This code of conduct applies to all projects run by the CometBFT/Cosmos team and +This code of conduct applies to all projects run by the CometBFT team and hence to CometBFT. ---- # Conduct -## Contact: conduct@interchain.io +## Contact: conduct@informal.systems * We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and @@ -35,7 +35,7 @@ hence to CometBFT. * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community - member, please contact one of the channel admins or the person mentioned above + member, please get in touch with one of the channel admins or the contact address above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back. diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index cf99713757..3649cc5a0b 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -1,6 +1,6 @@ # Use a build arg to ensure that both stages use the same, # hopefully current, go version. -ARG GOLANG_BASE_IMAGE=golang:1.20-alpine +ARG GOLANG_BASE_IMAGE=golang:1.21-alpine # stage 1 Generate CometBFT Binary FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE as builder diff --git a/Makefile b/Makefile index 5f69579f5b..287561f247 100644 --- a/Makefile +++ b/Makefile @@ -271,7 +271,7 @@ format: lint: @echo "--> Running linter" - @go run github.com/golangci/golangci-lint/cmd/golangci-lint run + @go run github.com/golangci/golangci-lint/cmd/golangci-lint@latest run .PHONY: lint vulncheck: diff --git a/README.md b/README.md index c5c37a5aa9..00a2f5be67 100644 --- a/README.md +++ b/README.md @@ -39,14 +39,15 @@ Complete documentation can be found on the Please do not depend on `main` as your production branch. Use [releases](https://github.com/cometbft/cometbft/releases) instead. -We haven't released v1.0 yet -since we are making breaking changes to the protocol and the APIs. See below for -more details about [versioning](#versioning). - -In any case, if you intend to run CometBFT in production, we're happy to help. - -To contact us, you can also -[join the chat](https://discord.com/channels/669268347736686612/669283915743232011). +If you intend to run CometBFT in production, we're happy to help. To contact +us, in order of preference: + +- [Create a new discussion on + GitHub](https://github.com/cometbft/cometbft/discussions) +- Reach out to us via [Telegram](https://t.me/CometBFT) +- [Join the Cosmos Network Discord](https://discord.gg/cosmosnetwork) and + discuss in + [`#cometbft`](https://discord.com/channels/669268347736686612/1069933855307472906) More on how releases are conducted can be found [here](./RELEASES.md). diff --git a/SECURITY.md b/SECURITY.md index 01b989c6b1..2a5c566641 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,208 +1,33 @@ -# Security +# How to Report a Security Bug -## Reporting a Bug +If you believe you have found a security vulnerability in the Interchain Stack, +you can report it to our primary vulnerability disclosure channel, the [Cosmos +HackerOne Bug Bounty program][h1]. -As part of our Coordinated Vulnerability Disclosure Policy (link will be added -once this policy is finalized for CometBFT), we operate a [bug -bounty][hackerone]. See the policy for more details on submissions and rewards, -and see "Example Vulnerabilities" (below) for examples of the kinds of bugs -we're most interested in. +If you prefer to report an issue via email, you may send a bug report to + with the issue details, reproduction, impact, and other +information. Please submit only one unique email thread per vulnerability. Any +issues reported via email are ineligible for bounty rewards. -### Guidelines +Artifacts from an email report are saved at the time the email is triaged. +Please note: our team is not able to monitor dynamic content (e.g. a Google Docs +link that is edited after receipt) throughout the lifecycle of a report. If you +would like to share additional information or modify previous information, +please include it in an additional reply as an additional attachment. -We require that all researchers: +Please **DO NOT** file a public issue in this repository to report a security +vulnerability. -* Use the bug bounty to disclose all vulnerabilities, and avoid posting - vulnerability information in public places, including GitHub Issues, Discord - channels, and Telegram groups -* Make every effort to avoid privacy violations, degradation of user experience, - disruption to production systems (including but not limited to the Cosmos - Hub), and destruction of data -* Keep any information about vulnerabilities that you’ve discovered confidential - between yourself and the CometBFT engineering team until the issue has been - resolved and disclosed -* Avoid posting personally identifiable information, privately or publicly +## Coordinated Vulnerability Disclosure Policy and Safe Harbor -If you follow these guidelines when reporting an issue to us, we commit to: +For the most up-to-date version of the policies that govern vulnerability +disclosure, please consult the [HackerOne program page][h1-policy]. -* Not pursue or support any legal action related to your research on this - vulnerability -* Work with you to understand, resolve and ultimately disclose the issue in a - timely fashion +The policy hosted on HackerOne is the official Coordinated Vulnerability +Disclosure policy and Safe Harbor for the Interchain Stack, and the teams and +infrastructure it supports, and it supersedes previous security policies that +have been used in the past by individual teams and projects with targets in +scope of the program. -## Disclosure Process - -CometBFT uses the following disclosure process: - -1. Once a security report is received, the CometBFT team works to verify the - issue and confirm its severity level using CVSS. -2. The CometBFT team collaborates with the Gaia team to determine the - vulnerability’s potential impact on the Cosmos Hub. -3. Patches are prepared for eligible releases of CometBFT in private - repositories. See “Supported Releases” below for more information on which - releases are considered eligible. -4. If it is determined that a CVE-ID is required, we request a CVE through a CVE - Numbering Authority. -5. We notify the community that a security release is coming, to give users time - to prepare their systems for the update. Notifications can include forum - posts, tweets, and emails to partners and validators. -6. 24 hours following this notification, the fixes are applied publicly and new - releases are issued. -7. Cosmos SDK and Gaia update their CometBFT dependencies to use these releases, - and then themselves issue new releases. -8. Once releases are available for CometBFT, Cosmos SDK and Gaia, we notify the - community, again, through the same channels as above. We also publish a - Security Advisory on GitHub and publish the CVE, as long as neither the - Security Advisory nor the CVE include any information on how to exploit these - vulnerabilities beyond what information is already available in the patch - itself. -9. Once the community is notified, we will pay out any relevant bug bounties to - submitters. -10. One week after the releases go out, we will publish a post with further - details on the vulnerability as well as our response to it. - -This process can take some time. Every effort will be made to handle the bug in -as timely a manner as possible, however it's important that we follow the -process described above to ensure that disclosures are handled consistently and -to keep CometBFT and its downstream dependent projects--including but not -limited to Gaia and the Cosmos Hub--as secure as possible. - -### Example Timeline - -The following is an example timeline for the triage and response. The required -roles and team members are described in parentheses after each task; however, -multiple people can play each role and each person may play multiple roles. - -#### 24+ Hours Before Release Time - -1. Request CVE number (ADMIN) -2. Gather emails and other contact info for validators (COMMS LEAD) -3. Create patches in a private security repo, and ensure that PRs are open - targeting all relevant release branches (CometBFT ENG, CometBFT LEAD) -4. Test fixes on a testnet (CometBFT ENG, COSMOS SDK ENG) -5. Write “Security Advisory” for forum (CometBFT LEAD) - -#### 24 Hours Before Release Time - -1. Post “Security Advisory” pre-notification on forum (CometBFT LEAD) -2. Post Tweet linking to forum post (COMMS LEAD) -3. Announce security advisory/link to post in various other social channels - (Telegram, Discord) (COMMS LEAD) -4. Send emails to validators or other users (PARTNERSHIPS LEAD) - -#### Release Time - -1. Cut CometBFT releases for eligible versions (CometBFT ENG, CometBFT - LEAD) -2. Cut Cosmos SDK release for eligible versions (COSMOS ENG) -3. Cut Gaia release for eligible versions (GAIA ENG) -4. Post “Security releases” on forum (CometBFT LEAD) -5. Post new Tweet linking to forum post (COMMS LEAD) -6. Remind everyone via social channels (Telegram, Discord) that the release is - out (COMMS LEAD) -7. Send emails to validators or other users (COMMS LEAD) -8. Publish Security Advisory and CVE, if CVE has no sensitive information - (ADMIN) - -#### After Release Time - -1. Write forum post with exploit details (CometBFT LEAD) -2. Approve pay-out on HackerOne for submitter (ADMIN) - -#### 7 Days After Release Time - -1. Publish CVE if it has not yet been published (ADMIN) -2. Publish forum post with exploit details (CometBFT ENG, CometBFT LEAD) - -## Supported Releases - -The CometBFT team commits to releasing security patch releases for both -the latest minor release as well for the major/minor release that the Cosmos Hub -is running. - -If you are running older versions of CometBFT, we encourage you to -upgrade at your earliest opportunity so that you can receive security patches -directly from the CometBFT repo. While you are welcome to backport security -patches to older versions for your own use, we will not publish or promote these -backports. - -## Scope - -The full scope of our bug bounty program is outlined on our -[Hacker One program page][hackerone]. Please also note that, in the interest of -the safety of our users and staff, a few things are explicitly excluded from -scope: - -* Any third-party services -* Findings from physical testing, such as office access -* Findings derived from social engineering (e.g., phishing) - -## Example Vulnerabilities - -The following is a list of examples of the kinds of vulnerabilities that we’re -most interested in. It is not exhaustive: there are other kinds of issues we may -also be interested in! - -### Specification - -* Conceptual flaws -* Ambiguities, inconsistencies, or incorrect statements -* Mis-match between specification and implementation of any component - -### Consensus - -Assuming less than 1/3 of the voting power is Byzantine (malicious): - -* Validation of blockchain data structures, including blocks, block parts, - votes, and so on -* Execution of blocks -* Validator set changes -* Proposer round robin -* Two nodes committing conflicting blocks for the same height (safety failure) -* A correct node signing conflicting votes -* A node halting (liveness failure) -* Syncing new and old nodes - -Assuming more than 1/3 the voting power is Byzantine: - -* Attacks that go unpunished (unhandled evidence) - -### Networking - -* Authenticated encryption (MITM, information leakage) -* Eclipse attacks -* Sybil attacks -* Long-range attacks -* Denial-of-Service - -### RPC - -* Write-access to anything besides sending transactions -* Denial-of-Service -* Leakage of secrets - -### Denial-of-Service - -Attacks may come through the P2P network or the RPC layer: - -* Amplification attacks -* Resource abuse -* Deadlocks and race conditions - -### Libraries - -* Serialization -* Reading/Writing files and databases - -### Cryptography - -* Elliptic curves for validator signatures -* Hash algorithms and Merkle trees for block validation -* Authenticated encryption for P2P connections - -### Light Client - -* Core verification -* Bisection/sequential algorithms - -[hackerone]: https://hackerone.com/cosmos +[h1]: https://hackerone.com/cosmos?type=team +[h1-policy]: https://hackerone.com/cosmos?type=team&view_policy=true diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 3508495b6b..373a0dedc2 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -1,6 +1,9 @@ package kvstore import ( + "fmt" + "strings" + "github.com/cometbft/cometbft/abci/types" cmtrand "github.com/cometbft/cometbft/libs/rand" ) @@ -34,3 +37,27 @@ func InitKVStore(app *PersistentKVStoreApplication) { Validators: RandVals(1), }) } + +// Create a new transaction +func NewTx(key, value string) []byte { + return []byte(strings.Join([]string{key, value}, "=")) +} + +func NewRandomTx(size int) []byte { + if size < 4 { + panic("random tx size must be greater than 3") + } + return NewTx(cmtrand.Str(2), cmtrand.Str(size-3)) +} + +func NewRandomTxs(n int) [][]byte { + txs := make([][]byte, n) + for i := 0; i < n; i++ { + txs[i] = NewRandomTx(10) + } + return txs +} + +func NewTxFromID(i int) []byte { + return []byte(fmt.Sprintf("%d=%d", i, i)) +} diff --git a/abci/types/messages.go b/abci/types/messages.go index 74c8757851..94b49e1db0 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -2,13 +2,14 @@ package types import ( "io" + "math" "github.com/cometbft/cometbft/libs/protoio" "github.com/cosmos/gogoproto/proto" ) const ( - maxMsgSize = 104857600 // 100MB + maxMsgSize = math.MaxInt32 // 2GB ) // WriteMessage writes a varint length-delimited protobuf message. diff --git a/blocksync/reactor.go b/blocksync/reactor.go index cd5a811688..91477ceeb6 100644 --- a/blocksync/reactor.go +++ b/blocksync/reactor.go @@ -60,21 +60,29 @@ type Reactor struct { errorsCh <-chan peerError } -// NewReactor returns new reactor instance. -func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, - blockSync bool) *Reactor { - - if state.LastBlockHeight != store.Height() { - panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, - store.Height())) +func NewReactorWithOfflineStateSync(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, + blockSync bool, offlineStateSyncHeight int64) *Reactor { + + storeHeight := store.Height() + if storeHeight == 0 { + // If state sync was performed offline and the stores were bootstrapped to height H + // the state store's lastHeight will be H while blockstore's Height and Base are still 0 + // 1. This scenario should not lead to a panic in this case, which is indicated by + // having a OfflineStateSyncHeight > 0 + // 2. We need to instruct the blocksync reactor to start fetching blocks from H+1 + // instead of 0. + storeHeight = offlineStateSyncHeight + } + if state.LastBlockHeight != storeHeight { + panic(fmt.Sprintf("state (%v) and store (%v) height mismatch, stores were left in an inconsistent state", state.LastBlockHeight, + storeHeight)) } - requestsCh := make(chan BlockRequest, maxTotalRequesters) const capacity = 1000 // must be bigger than peers count errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock - startHeight := store.Height() + 1 + startHeight := storeHeight + 1 if startHeight == 1 { startHeight = state.InitialHeight } @@ -93,6 +101,14 @@ func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockS return bcR } +// NewReactor returns new reactor instance. +func NewReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore, + blockSync bool) *Reactor { + + return NewReactorWithOfflineStateSync(state, blockExec, store, blockSync, 0) + +} + // SetLogger implements service.Service by setting the logger on reactor and pool. func (bcR *Reactor) SetLogger(l log.Logger) { bcR.BaseService.Logger = l diff --git a/cmd/cometbft/commands/probe_upnp.go b/cmd/cometbft/commands/probe_upnp.go deleted file mode 100644 index c2c4609a13..0000000000 --- a/cmd/cometbft/commands/probe_upnp.go +++ /dev/null @@ -1,33 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - cmtjson "github.com/cometbft/cometbft/libs/json" - "github.com/cometbft/cometbft/p2p/upnp" -) - -// ProbeUpnpCmd adds capabilities to test the UPnP functionality. -var ProbeUpnpCmd = &cobra.Command{ - Use: "probe-upnp", - Aliases: []string{"probe_upnp"}, - Short: "Test UPnP functionality", - RunE: probeUpnp, -} - -func probeUpnp(cmd *cobra.Command, args []string) error { - capabilities, err := upnp.Probe(logger) - if err != nil { - fmt.Println("Probe failed: ", err) - } else { - fmt.Println("Probe success!") - jsonBytes, err := cmtjson.Marshal(capabilities) - if err != nil { - return err - } - fmt.Println(string(jsonBytes)) - } - return nil -} diff --git a/cmd/cometbft/commands/root_test.go b/cmd/cometbft/commands/root_test.go index 40909d86eb..31c90ac868 100644 --- a/cmd/cometbft/commands/root_test.go +++ b/cmd/cometbft/commands/root_test.go @@ -17,30 +17,12 @@ import ( cmtos "github.com/cometbft/cometbft/libs/os" ) -var ( - defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") -) - // clearConfig clears env vars, the given root dir, and resets viper. -func clearConfig(dir string) { - if err := os.Unsetenv("CMTHOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("CMT_HOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("TMHOME"); err != nil { - //XXX: Deprecated. - panic(err) - } - if err := os.Unsetenv("TM_HOME"); err != nil { - //XXX: Deprecated. - panic(err) - } +func clearConfig(t *testing.T, dir string) { + os.Clearenv() + err := os.RemoveAll(dir) + require.NoError(t, err) - if err := os.RemoveAll(dir); err != nil { - panic(err) - } viper.Reset() config = cfg.DefaultConfig() } @@ -58,11 +40,11 @@ func testRootCmd() *cobra.Command { return rootCmd } -func testSetup(rootDir string, args []string, env map[string]string) error { - clearConfig(defaultRoot) +func testSetup(t *testing.T, root string, args []string, env map[string]string) error { + clearConfig(t, root) rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "CMT", defaultRoot) + cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) // run with the args and env args = append([]string{rootCmd.Use}, args...) @@ -70,22 +52,27 @@ func testSetup(rootDir string, args []string, env map[string]string) error { } func TestRootHome(t *testing.T) { - newRoot := filepath.Join(defaultRoot, "something-else") + tmpDir := os.TempDir() + root := filepath.Join(tmpDir, "adir") + newRoot := filepath.Join(tmpDir, "something-else") + defer clearConfig(t, root) + defer clearConfig(t, newRoot) + cases := []struct { args []string env map[string]string root string }{ - {nil, nil, defaultRoot}, + {nil, nil, root}, {[]string{"--home", newRoot}, nil, newRoot}, {nil, map[string]string{"TMHOME": newRoot}, newRoot}, //XXX: Deprecated. {nil, map[string]string{"CMTHOME": newRoot}, newRoot}, } for i, tc := range cases { - idxString := strconv.Itoa(i) + idxString := "idx: " + strconv.Itoa(i) - err := testSetup(defaultRoot, tc.args, tc.env) + err := testSetup(t, root, tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.root, config.RootDir, idxString) @@ -118,8 +105,10 @@ func TestRootFlagsEnv(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - - err := testSetup(defaultRoot, tc.args, tc.env) + root := filepath.Join(os.TempDir(), "adir2_"+idxString) + idxString = "idx: " + idxString + defer clearConfig(t, root) + err := testSetup(t, root, tc.args, tc.env) require.Nil(t, err, idxString) assert.Equal(t, tc.logLevel, config.LogLevel, idxString) @@ -148,11 +137,12 @@ func TestRootConfig(t *testing.T) { for i, tc := range cases { idxString := strconv.Itoa(i) - clearConfig(defaultRoot) - + root := filepath.Join(os.TempDir(), "adir3_"+idxString) + idxString = "idx: " + idxString + defer clearConfig(t, root) // XXX: path must match cfg.defaultConfigPath - configFilePath := filepath.Join(defaultRoot, "config") - err := cmtos.EnsureDir(configFilePath, 0700) + configFilePath := filepath.Join(root, "config") + err := cmtos.EnsureDir(configFilePath, 0o700) require.Nil(t, err) // write the non-defaults to a different path @@ -161,7 +151,7 @@ func TestRootConfig(t *testing.T) { require.Nil(t, err) rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "CMT", defaultRoot) + cmd := cli.PrepareBaseCmd(rootCmd, "CMT", root) // run with the args and env tc.args = append([]string{rootCmd.Use}, tc.args...) diff --git a/cmd/cometbft/commands/run_node.go b/cmd/cometbft/commands/run_node.go index b8fa35f26d..c25e9e448c 100644 --- a/cmd/cometbft/commands/run_node.go +++ b/cmd/cometbft/commands/run_node.go @@ -68,7 +68,6 @@ func AddNodeFlags(cmd *cobra.Command) { cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") cmd.Flags().String("p2p.unconditional_peer_ids", config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") - cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "enable/disable seed mode") cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") diff --git a/cmd/cometbft/main.go b/cmd/cometbft/main.go index b7af0ebbb8..e35c078867 100644 --- a/cmd/cometbft/main.go +++ b/cmd/cometbft/main.go @@ -16,7 +16,6 @@ func main() { rootCmd.AddCommand( cmd.GenValidatorCmd, cmd.InitFilesCmd, - cmd.ProbeUpnpCmd, cmd.LightCmd, cmd.ReplayCmd, cmd.ReplayConsoleCmd, diff --git a/config/config.go b/config/config.go index 9f46752074..e8a18f02d6 100644 --- a/config/config.go +++ b/config/config.go @@ -28,6 +28,9 @@ const ( // Default is v0. MempoolV0 = "v0" MempoolV1 = "v1" + + MempoolTypeFlood = "flood" + MempoolTypeNop = "nop" ) // NOTE: Most of the structs & relevant comments + the @@ -73,7 +76,7 @@ type Config struct { Mempool *MempoolConfig `mapstructure:"mempool"` StateSync *StateSyncConfig `mapstructure:"statesync"` BlockSync *BlockSyncConfig `mapstructure:"blocksync"` - //TODO(williambanfield): remove this field once v0.37 is released. + // TODO(williambanfield): remove this field once v0.37 is released. // https://github.com/tendermint/tendermint/issues/9279 DeprecatedFastSyncConfig map[interface{}]interface{} `mapstructure:"fastsync"` Consensus *ConsensusConfig `mapstructure:"consensus"` @@ -151,6 +154,9 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.Instrumentation.ValidateBasic(); err != nil { return fmt.Errorf("error in [instrumentation] section: %w", err) } + if !cfg.Consensus.CreateEmptyBlocks && cfg.Mempool.Type == MempoolTypeNop { + return fmt.Errorf("`nop` mempool does not support create_empty_blocks = false") + } return nil } @@ -174,6 +180,9 @@ func (cfg *Config) CheckDeprecated() []string { if cfg.BaseConfig.DeprecatedFastSyncMode != nil { warnings = append(warnings, "fast_sync key detected. This key has been renamed to block_sync. The value of this deprecated key will be disregarded.") } + if cfg.P2P.UPNP != nil { + warnings = append(warnings, "unused and deprecated upnp field detected in P2P config.") + } return warnings } @@ -202,7 +211,7 @@ type BaseConfig struct { //nolint: maligned // Deprecated: BlockSync will be enabled unconditionally in the next major release. BlockSyncMode bool `mapstructure:"block_sync"` - //TODO(williambanfield): remove this field once v0.37 is released. + // TODO(williambanfield): remove this field once v0.37 is released. // https://github.com/tendermint/tendermint/issues/9279 DeprecatedFastSyncMode interface{} `mapstructure:"fast_sync"` @@ -561,8 +570,8 @@ type P2PConfig struct { //nolint: maligned // Comma separated list of nodes to keep persistent connections to PersistentPeers string `mapstructure:"persistent_peers"` - // UPNP port forwarding - UPNP bool `mapstructure:"upnp"` + // Deprecated and unused. + UPNP interface{} `mapstructure:"upnp"` // Path to address book AddrBook string `mapstructure:"addr_book_file"` @@ -628,7 +637,6 @@ func DefaultP2PConfig() *P2PConfig { return &P2PConfig{ ListenAddress: "tcp://0.0.0.0:26656", ExternalAddress: "", - UPNP: false, AddrBook: defaultAddrBookPath, AddrBookStrict: true, MaxNumInboundPeers: 40, @@ -719,6 +727,17 @@ type MempoolConfig struct { // 1) "v0" - (default) FIFO mempool. // 2) "v1" - prioritized mempool (deprecated; will be removed in the next release). Version string `mapstructure:"version"` + + // The type of mempool for this node to use. + // + // Possible types: + // - "flood" : concurrent linked list mempool with flooding gossip protocol + // (default) + // - "nop" : nop-mempool (short for no operation; the ABCI app is + // responsible for storing, disseminating and proposing txs). + // "create_empty_blocks=false" is not supported. + Type string `mapstructure:"type"` + // RootDir is the root directory for all data. This should be configured via // the $CMTHOME env variable or --home cmd flag rather than overriding this // struct field. @@ -759,6 +778,21 @@ type MempoolConfig struct { // Including space needed by encoding (one varint per transaction). // XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 MaxBatchBytes int `mapstructure:"max_batch_bytes"` + // Experimental parameters to limit gossiping txs to up to the specified number of peers. + // This feature is only available for the default mempool (version config set to "v0"). + // We use two independent upper values for persistent and non-persistent peers. + // Unconditional peers are not affected by this feature. + // If we are connected to more than the specified number of persistent peers, only send txs to + // ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those + // persistent peers disconnects, activate another persistent peer. + // Similarly for non-persistent peers, with an upper limit of + // ExperimentalMaxGossipConnectionsToNonPersistentPeers. + // If set to 0, the feature is disabled for the corresponding group of peers, that is, the + // number of active connections to that group of peers is not bounded. + // For non-persistent peers, if enabled, a value of 10 is recommended based on experimental + // performance results using the default P2P configuration. + ExperimentalMaxGossipConnectionsToPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_persistent_peers"` + ExperimentalMaxGossipConnectionsToNonPersistentPeers int `mapstructure:"experimental_max_gossip_connections_to_non_persistent_peers"` // TTLDuration, if non-zero, defines the maximum amount of time a transaction // can exist for in the mempool. @@ -786,16 +820,19 @@ type MempoolConfig struct { // DefaultMempoolConfig returns a default configuration for the CometBFT mempool func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ + Type: MempoolTypeFlood, Version: MempoolV0, Recheck: true, Broadcast: true, WalPath: "", // Each signature verification takes .5ms, Size reduced until we implement // ABCI Recheck - Size: 5000, - MaxTxsBytes: 1024 * 1024 * 1024, // 1GB - CacheSize: 10000, - MaxTxBytes: 1024 * 1024, // 1MB + Size: 5000, + MaxTxsBytes: 1024 * 1024 * 1024, // 1GB + CacheSize: 10000, + MaxTxBytes: 1024 * 1024, // 1MB + ExperimentalMaxGossipConnectionsToNonPersistentPeers: 0, + ExperimentalMaxGossipConnectionsToPersistentPeers: 0, TTLDuration: 0 * time.Second, TTLNumBlocks: 0, } @@ -821,6 +858,12 @@ func (cfg *MempoolConfig) WalEnabled() bool { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *MempoolConfig) ValidateBasic() error { + switch cfg.Type { + case MempoolTypeFlood, MempoolTypeNop: + case "": // allow empty string to be backwards compatible + default: + return fmt.Errorf("unknown mempool type: %q", cfg.Type) + } if cfg.Size < 0 { return errors.New("size can't be negative") } @@ -833,6 +876,12 @@ func (cfg *MempoolConfig) ValidateBasic() error { if cfg.MaxTxBytes < 0 { return errors.New("max_tx_bytes can't be negative") } + if cfg.ExperimentalMaxGossipConnectionsToPersistentPeers < 0 { + return errors.New("experimental_max_gossip_connections_to_persistent_peers can't be negative") + } + if cfg.ExperimentalMaxGossipConnectionsToNonPersistentPeers < 0 { + return errors.New("experimental_max_gossip_connections_to_non_persistent_peers can't be negative") + } return nil } diff --git a/config/config_test.go b/config/config_test.go index 86b32c7687..ac455a5c45 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -37,6 +37,11 @@ func TestConfigValidateBasic(t *testing.T) { // tamper with timeout_propose cfg.Consensus.TimeoutPropose = -10 * time.Second assert.Error(t, cfg.ValidateBasic()) + cfg.Consensus.TimeoutPropose = 3 * time.Second + + cfg.Consensus.CreateEmptyBlocks = false + cfg.Mempool.Type = MempoolTypeNop + assert.Error(t, cfg.ValidateBasic()) } func TestTLSConfiguration(t *testing.T) { @@ -121,6 +126,9 @@ func TestMempoolConfigValidateBasic(t *testing.T) { assert.Error(t, cfg.ValidateBasic()) reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) } + + reflect.ValueOf(cfg).Elem().FieldByName("Type").SetString("invalid") + assert.Error(t, cfg.ValidateBasic()) } func TestStateSyncConfigValidateBasic(t *testing.T) { diff --git a/config/toml.go b/config/toml.go index ad560960af..20fd733fa0 100644 --- a/config/toml.go +++ b/config/toml.go @@ -274,11 +274,9 @@ pprof_laddr = "{{ .RPC.PprofListenAddress }}" # Address to listen for incoming connections laddr = "{{ .P2P.ListenAddress }}" -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 external_address = "{{ .P2P.ExternalAddress }}" # Comma separated list of seed nodes to connect to @@ -287,9 +285,6 @@ seeds = "{{ .P2P.Seeds }}" # Comma separated list of nodes to keep persistent connections to persistent_peers = "{{ .P2P.PersistentPeers }}" -# UPNP port forwarding -upnp = {{ .P2P.UPNP }} - # Path to address book addr_book_file = "{{ js .P2P.AddrBook }}" @@ -350,6 +345,16 @@ dial_timeout = "{{ .P2P.DialTimeout }}" # 2) "v1" - prioritized mempool (deprecated; will be removed in the next release). version = "{{ .Mempool.Version }}" +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + recheck = {{ .Mempool.Recheck }} broadcast = {{ .Mempool.Broadcast }} wal_dir = "{{ js .Mempool.WalPath }}" @@ -395,6 +400,22 @@ ttl-duration = "{{ .Mempool.TTLDuration }}" # it's insertion time into the mempool is beyond ttl-duration. ttl-num-blocks = {{ .Mempool.TTLNumBlocks }} +# Experimental parameters to limit gossiping txs to up to the specified number of peers. +# This feature is only available for the default mempool (version config set to "v0"). +# We use two independent upper values for persistent and non-persistent peers. +# Unconditional peers are not affected by this feature. +# If we are connected to more than the specified number of persistent peers, only send txs to +# ExperimentalMaxGossipConnectionsToPersistentPeers of them. If one of those +# persistent peers disconnects, activate another persistent peer. +# Similarly for non-persistent peers, with an upper limit of +# ExperimentalMaxGossipConnectionsToNonPersistentPeers. +# If set to 0, the feature is disabled for the corresponding group of peers, that is, the +# number of active connections to that group of peers is not bounded. +# For non-persistent peers, if enabled, a value of 10 is recommended based on experimental +# performance results using the default P2P configuration. +experimental_max_gossip_connections_to_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers }} +experimental_max_gossip_connections_to_non_persistent_peers = {{ .Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers }} + ####################################################### ### State Sync Configuration Options ### ####################################################### diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 793f4e29e4..9d36ff35fa 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -621,12 +621,11 @@ func waitForBlockWithUpdatedValsAndValidateIt( if newBlock.LastCommit.Size() == len(updatedVals) { css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) break LOOP - } else { - css[j].Logger.Debug( - "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", - "height", - newBlock.Height) } + css[j].Logger.Debug( + "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", + "height", + newBlock.Height) } err := validateBlock(newBlock, updatedVals) diff --git a/consensus/replay.go b/consensus/replay.go index 43930f631c..7a9d07cb78 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -2,6 +2,7 @@ package consensus import ( "bytes" + "context" "fmt" "hash/crc32" "io" @@ -239,6 +240,11 @@ func (h *Handshaker) NBlocks() int { // TODO: retry the handshake/replay if it fails ? func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { + return h.HandshakeWithContext(context.TODO(), proxyApp) +} + +// HandshakeWithContext is cancellable version of Handshake +func (h *Handshaker) HandshakeWithContext(ctx context.Context, proxyApp proxy.AppConns) error { // Handshake is done via ABCI Info on the query conn. res, err := proxyApp.Query().InfoSync(proxy.RequestInfo) @@ -265,7 +271,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + appHash, err = h.ReplayBlocksWithContext(ctx, h.initialState, appHash, blockHeight, proxyApp) if err != nil { return fmt.Errorf("error on replay: %v", err) } @@ -286,6 +292,17 @@ func (h *Handshaker) ReplayBlocks( appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns, +) ([]byte, error) { + return h.ReplayBlocksWithContext(context.TODO(), state, appHash, appBlockHeight, proxyApp) +} + +// ReplayBlocksWithContext is cancellable version of ReplayBlocks. +func (h *Handshaker) ReplayBlocksWithContext( + ctx context.Context, + state sm.State, + appHash []byte, + appBlockHeight int64, + proxyApp proxy.AppConns, ) ([]byte, error) { storeBlockBase := h.store.Base() storeBlockHeight := h.store.Height() @@ -390,7 +407,7 @@ func (h *Handshaker) ReplayBlocks( // Either the app is asking for replay, or we're all synced up. if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, false) } else if appBlockHeight == storeBlockHeight { // We're good! @@ -405,7 +422,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight < stateBlockHeight: // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + return h.replayBlocks(ctx, state, proxyApp, appBlockHeight, storeBlockHeight, true) case appBlockHeight == stateBlockHeight: // We haven't run Commit (both the state and app are one block behind), @@ -435,6 +452,7 @@ func (h *Handshaker) ReplayBlocks( } func (h *Handshaker) replayBlocks( + ctx context.Context, state sm.State, proxyApp proxy.AppConns, appBlockHeight, @@ -461,6 +479,12 @@ func (h *Handshaker) replayBlocks( firstBlock = state.InitialHeight } for i := firstBlock; i <= finalBlock; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + h.logger.Info("Applying block", "height", i) block := h.store.LoadBlock(i) // Extra check to ensure the app was not changed in a way it shouldn't have. diff --git a/consensus/replay_stubs.go b/consensus/replay_stubs.go index 5ffb20ad82..5409d6f852 100644 --- a/consensus/replay_stubs.go +++ b/consensus/replay_stubs.go @@ -77,6 +77,10 @@ type mockProxyApp struct { abciResponses *cmtstate.ABCIResponses } +func (mock *mockProxyApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { + return *mock.abciResponses.BeginBlock +} + func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { r := mock.abciResponses.DeliverTxs[mock.txCount] mock.txCount++ diff --git a/consensus/state.go b/consensus/state.go index f6715cd8c5..6fb202af23 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -171,7 +171,9 @@ func NewState( evsw: cmtevents.NewEventSwitch(), metrics: NopMetrics(), } - + for _, option := range options { + option(cs) + } // set function defaults (may be overwritten before calling Start) cs.decideProposal = cs.defaultDecideProposal cs.doPrevote = cs.defaultDoPrevote @@ -179,6 +181,11 @@ func NewState( // We have no votes, so reconstruct LastCommit from SeenCommit. if state.LastBlockHeight > 0 { + // In case of out of band performed statesync, the state store + // will have a state but no extended commit (as no block has been downloaded). + // If the height at which the vote extensions are enabled is lower + // than the height at which we statesync, consensus will panic because + // it will try to reconstruct the extended commit here. cs.reconstructLastCommit(state) } @@ -187,9 +194,6 @@ func NewState( // NOTE: we do not call scheduleRound0 yet, we do that upon Start() cs.BaseService = *service.NewBaseService(nil, "State", cs) - for _, option := range options { - option(cs) - } return cs } @@ -996,7 +1000,7 @@ func (cs *State) enterNewRound(height int64, round int32) { logger.Debug("need to set a buffer and log message here for sanity", "start_time", cs.StartTime, "now", now) } - logger.Debug("entering new round", "current", log.NewLazySprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) + prevHeight, prevRound, prevStep := cs.Height, cs.Round, cs.Step // increment validators if necessary validators := cs.Validators @@ -1010,17 +1014,23 @@ func (cs *State) enterNewRound(height int64, round int32) { // but we fire an event, so update the round step first cs.updateRoundStep(round, cstypes.RoundStepNewRound) cs.Validators = validators + propAddress := validators.GetProposer().PubKey.Address() if round == 0 { // We've already reset these upon new height, // and meanwhile we might have received a proposal // for round 0. } else { - logger.Debug("resetting proposal info") + logger.Info("resetting proposal info", "proposer", propAddress) cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil } + logger.Debug("entering new round", + "previous", log.NewLazySprintf("%v/%v/%v", prevHeight, prevRound, prevStep), + "proposer", propAddress, + ) + cs.Votes.SetRound(cmtmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping cs.TriggeredTimeoutPrecommit = false @@ -1870,7 +1880,8 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { p := proposal.ToProto() // Verify signature - if !cs.Validators.GetProposer().PubKey.VerifySignature( + pubKey := cs.Validators.GetProposer().PubKey + if !pubKey.VerifySignature( types.ProposalSignBytes(cs.state.ChainID, p), proposal.Signature, ) { return ErrInvalidProposalSignature @@ -1885,7 +1896,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } - cs.Logger.Info("received proposal", "proposal", proposal) + cs.Logger.Info("received proposal", "proposal", proposal, "proposer", pubKey.Address()) return nil } diff --git a/crypto/sr25519/pubkey.go b/crypto/sr25519/pubkey.go index 52af0f4dad..1e9b791811 100644 --- a/crypto/sr25519/pubkey.go +++ b/crypto/sr25519/pubkey.go @@ -55,7 +55,8 @@ func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool { return false } - return publicKey.Verify(signature, signingContext) + ok, err := publicKey.Verify(signature, signingContext) + return ok && err == nil } func (pubKey PubKey) String() string { diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 322d9fc2da..e9ac6adfe6 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -259,7 +259,7 @@ You could put the commands in a file and run Note that the `abci-cli` is designed strictly for testing and debugging. In a real deployment, the role of sending messages is taken by CometBFT, which -connects to the app using three separate connections, each with its own +connects to the app using four separate connections, each with its own pattern of messages. For examples of running an ABCI app with CometBFT, see the diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index da191fb839..0c9922527e 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -14,11 +14,7 @@ the block itself is never stored. Each event contains a type and a list of attributes, which are key-value pairs denoting something about what happened during the method's execution. For more -details on `Events`, see the - -[ABCI](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_basic_concepts.md#events) - -documentation. +details on `Events`, see the [ABCI][abci-events] documentation. An `Event` has a composite key associated with it. A `compositeKey` is constructed by its type and key separated by a dot. @@ -36,7 +32,7 @@ would be equal to the composite key of `jack.account.number`. By default, CometBFT will index all transactions by their respective hashes and height and blocks by their height. -CometBFT allows for different events within the same height to have +CometBFT allows for different events within the same height to have equal attributes. ## Configuration @@ -74,8 +70,9 @@ entirely in the future. **Implementation and data layout** -The kv indexer stores each attribute of an event individually, by creating a composite key +The kv indexer stores each attribute of an event individually, by creating a composite key with + - event type, - attribute key, - attribute value, @@ -83,7 +80,7 @@ with - the height, and - event counter. For example the following events: - + ``` Type: "transfer", Attributes: []abci.EventAttribute{ @@ -94,7 +91,7 @@ Type: "transfer", }, ``` - + ``` Type: "transfer", Attributes: []abci.EventAttribute{ @@ -105,7 +102,7 @@ Type: "transfer", }, ``` -will be represented as follows in the store, assuming these events result from the EndBlock call for height 1: +will be represented as follows in the store, assuming these events result from the EndBlock call for height 1: ``` Key value @@ -121,10 +118,11 @@ transferBalance200EndBlock12 1 transferNodeNothingEndblock12 1 ``` -The event number is a local variable kept by the indexer and incremented when a new event is processed. -It is an `int64` variable and has no other semantics besides being used to associate attributes belonging to the same events within a height. + +The event number is a local variable kept by the indexer and incremented when a new event is processed. +It is an `int64` variable and has no other semantics besides being used to associate attributes belonging to the same events within a height. This variable is not atomically incremented as event indexing is deterministic. **Should this ever change**, the event id generation -will be broken. +will be broken. #### PostgreSQL @@ -237,18 +235,22 @@ curl "localhost:26657/block_search?query=\"block.height > 10 AND val_set.num_cha ``` -Storing the event sequence was introduced in CometBFT 0.34.26. Before that, up until Tendermint Core 0.34.26, -the event sequence was not stored in the kvstore and events were stored only by height. That means that queries -returned blocks and transactions whose event attributes match within the height but can match across different -events on that height. +Storing the event sequence was introduced in CometBFT 0.34.26. Before that, up until Tendermint Core 0.34.26, +the event sequence was not stored in the kvstore and events were stored only by height. That means that queries +returned blocks and transactions whose event attributes match within the height but can match across different +events on that height. This behavior was fixed with CometBFT 0.34.26+. However, if the data was indexed with earlier versions of Tendermint Core and not re-indexed, that data will be queried as if all the attributes within a height occurred within the same event. # Event attribute value types + Users can use anything as an event value. However, if the even attrbute value is a number, the following restrictions apply: + - Negative numbers will not be properly retrieved when querying the indexer - When querying the events using `tx_search` and `block_search`, the value given as part of the condition cannot be a float. - Any event value retrieved from the database will be represented as a `BigInt` (from `math/big`) -- Floating point values are not read from the database even with the introduction of `BigInt`. This was intentionally done -to keep the same beheaviour as was historically present and not introduce breaking changes. This will be fixed in the 0.38 series. \ No newline at end of file +- Floating point values are not read from the database even with the introduction of `BigInt`. This was intentionally done +to keep the same beheaviour as was historically present and not introduce breaking changes. This will be fixed in the 0.38 series. + +[abci-events]: https://github.com/cometbft/cometbft/blob/v0.37.x/spec/abci/abci++_basic_concepts.md#events diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 7a2ad6fed4..5e10cc7b11 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -84,6 +84,7 @@ therefore they have links to it and refer to CometBFT as "Tendermint" or "Tender - [ADR-075: RPC Event Subscription Interface](./adr-075-rpc-subscription.md) - [ADR-079: Ed25519 Verification](./adr-079-ed25519-verification.md) - [ADR-081: Protocol Buffers Management](./adr-081-protobuf-mgmt.md) +- [ADR-111: `nop` Mempool](./adr-111-nop-mempool.md) ### Deprecated diff --git a/docs/architecture/adr-111-nop-mempool.md b/docs/architecture/adr-111-nop-mempool.md new file mode 100644 index 0000000000..234cd5b9c1 --- /dev/null +++ b/docs/architecture/adr-111-nop-mempool.md @@ -0,0 +1,324 @@ +# ADR 111: `nop` Mempool + +## Changelog + +- 2023-11-07: First version (@sergio-mena) +- 2023-11-15: Addressed PR comments (@sergio-mena) +- 2023-11-17: Renamed `nil` to `nop` (@melekes) +- 2023-11-20: Mentioned that the app could reuse p2p network in the future (@melekes) +- 2023-11-22: Adapt ADR to implementation (@melekes) + +## Status + +Accepted + +[Tracking issue](https://github.com/cometbft/cometbft/issues/1666) + +## Context + +### Summary + +The current mempool built into CometBFT implements a robust yet somewhat inefficient transaction gossip mechanism. +While the CometBFT team is currently working on more efficient general-purpose transaction gossiping mechanisms, +some users have expressed their desire to manage both the mempool and the transaction dissemination mechanism +outside CometBFT (typically at the application level). + +This ADR proposes a fairly simple way for CometBFT to fulfill this use case without moving away from our current architecture. + +### In the Beginning... + +It is well understood that a dissemination mechanism +(sometimes using _Reliable Broadcast_ [\[HT94\]][HT94] but not necessarily), +is needed in a distributed system implementing State-Machine Replication (SMR). +This is also the case in blockchains. +Early designs such as Bitcoin or Ethereum include an _internal_ component, +responsible for dissemination, called mempool. +Tendermint Core chose to follow the same design given the success +of those early blockchains and, since inception, Tendermint Core and later CometBFT have featured a mempool as an internal piece of its architecture. + + +However, the design of ABCI clearly dividing the application logic (i.e., the appchain) +and the consensus logic that provides SMR semantics to the app is a unique innovation in Cosmos +that sets it apart from Bitcoin, Ethereum, and many others. +This clear separation of concerns entailed many consequences, mostly positive: +it allows CometBFT to be used underneath (currently) tens of different appchains in production +in the Cosmos ecosystem and elsewhere. +But there are other implications for having an internal mempool +in CometBFT: the interaction between the mempool, the application, and the network +becomes more indirect, and thus more complex and hard to understand and operate. + +### ABCI++ Improvements and Remaining Shortcomings + +Before the release of ABCI++, `CheckTx` was the main mechanism the app had at its disposal to influence +what transactions made it to the mempool, and very indirectly what transactions got ultimately proposed in a block. +Since ABCI 1.0 (the first part of ABCI++, shipped in `v0.37.x`), the application has +a more direct say in what is proposed through `PrepareProposal` and `ProcessProposal`. + +This has greatly improved the ability for appchains to influence the contents of the proposed block. +Further, ABCI++ has enabled many new use cases for appchains. However some issues remain with +the current model: + +* We are using the same P2P network for disseminating transactions and consensus-related messages. +* Many mempool parameters are configured on a per-node basis by node operators, + allowing the possibility of inconsistent mempool configuration across the network + with potentially serious scalability effects + (even causing unacceptable performance degradation in some extreme cases). +* The current mempool implementation uses a basic (robust but sub-optimal) flood algorithm + * the CometBFT team is working on improving it as one of our current priorities, + but any improvement we come up with must address the needs of a vast spectrum of applications, + as well as be heavily scaled-tested in various scenarios + (in an attempt to cover the applications' wide spectrum) + * a mempool designed specifically for one particular application + would reduce the search space as its designers can devise it with just their application's + needs in mind. +* The interaction with the application is still somewhat convoluted: + * the application has to decide what logic to implement in `CheckTx`, + what to do with the transaction list coming in `RequestPrepareProposal`, + whether it wants to maintain an app-side mempool (more on this below), and whether or not + to combine the transactions in the app-side mempool with those coming in `RequestPrepareProposal` + * all those combinations are hard to fully understand, as the semantics and guarantees are + often not clear + * when using exclusively an app-mempool (the approach taken in the Cosmos SDK `v0.47.x`) + for populating proposed blocks, with the aim of simplifying the app developers' life, + we still have a suboptimal model where we need to continue using CometBFT's mempool + in order to disseminate the transactions. So, we end up using twice as much memory, + as in-transit transactions need to be kept in both mempools. + +The approach presented in this ADR builds on the app-mempool design released in `v0.47.x` +of the Cosmos SDK, +and briefly discussed in the last bullet point above (see [SDK app-mempool][sdk-app-mempool] for further details of this model). + +In the app-mempool design in Cosmos SDK `v0.47.x` +an unconfirmed transaction must be both in CometBFT's mempool for dissemination and +in the app's mempool so the application can decide how to manage the mempool. +There is no doubt that this approach has numerous advantages. However, it also has some implications that need to be considered: + +* Having every transaction both in CometBFT and in the application is suboptimal in terms of memory. + Additionally, the app developer has to be careful + that the contents of both mempools do not diverge over time + (hence the crucial role `re-CheckTx` plays post-ABCI++). +* The main reason for a transaction needing to be in CometBFT's mempool is + because the design in Cosmos SDK `v0.47.x` does not consider an application + that has its own means of disseminating transactions. + It reuses the peer to peer network underneath CometBFT reactors. +* There is no point in having transactions in CometBFT's mempool if an application implements an ad-hoc design for disseminating transactions. + +This proposal targets this kind of applications: +those that have an ad-hoc mechanism for transaction dissemination that better meets the application requirements. + +The ABCI application could reuse the P2P network once this is exposed via ABCI. +But this will take some time as it needs to be implemented, and has a dependency +on bi-directional ABCI, which is also quite substantial. See +[1](https://github.com/cometbft/cometbft/discussions/1112) and +[2](https://github.com/cometbft/cometbft/discussions/494) discussions. + +We propose to introduce a `nop` (short for no operation) mempool which will effectively act as a stubbed object +internally: + +* it will reject any transaction being locally submitted or gossipped by a peer +* when a _reap_ (as it is currently called) is executed in the mempool, an empty answer will always be returned +* the application running on the proposer validator will add transactions it received + using the appchains's own mechanism via `PrepareProposal`. + +## Alternative Approaches + +These are the alternatives known to date: + +1. Keep the current model. Useful for basic apps, but clearly suboptimal for applications + with their own mechanism to disseminate transactions and particular performance requirements. +2. Provide more efficient general-purpose mempool implementations. + This is an ongoing effort (e.g., [CAT mempool][cat-mempool]), but will take some time, and R&D effort, to come up with + advanced mechanisms -- likely highly configurable and thus complex -- which then will have to be thoroughly tested. +3. A similar approach to this one ([ADR110][adr-110]) whereby the application-specific + mechanism directly interacts with CometBFT via a newly defined gRPC interface. +4. Partially adopting this ADR. There are several possibilities: + * Use the current mempool, disable transaction broadcast in `config.toml`, and accept transactions from users via `BroadcastTX*` RPC methods. + Positive: avoids transaction gossiping; app can reuse the mempool existing in ComeBFT. + Negative: requires clients to know the validators' RPC endpoints (potential security issues). + * Transaction broadcast is disabled in `config.toml`, and have the application always reject transactions in `CheckTx`. + Positive: effectively disables the mempool; does not require modifications to Comet (may be used in `v0.37.x` and `v0.38.x`). + Negative: requires apps to disseminate txs themselves; the setup for this is less straightforward than this ADR's proposal. + +## Decision + +TBD + +## Detailed Design + +What this ADR proposes can already be achieved with an unmodified CometBFT since +`v0.37.x`, albeit with a complex, poor UX (see the last alternative in section +[Alternative Approaches](#alternative-approaches)). The core of this proposal +is to make some internal changes so it is clear an simple for app developers, +thus improving the UX. + +#### `nop` Mempool + +We propose a new mempool implementation, called `nop` Mempool, that effectively disables all mempool functionality +within CometBFT. +The `nop` Mempool implements the `Mempool` interface in a very simple manner: + +* `CheckTx(tx types.Tx) (*abcicli.ReqRes, error)`: returns `nil, ErrNotAllowed` +* `RemoveTxByKey(txKey types.TxKey) error`: returns `ErrNotAllowed` +* `ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs`: returns `nil` +* `ReapMaxTxs(max int) types.Txs`: returns `nil` +* `Lock()`: does nothing +* `Unlock()`: does nothing +* `Update(...) error`: returns `nil` +* `FlushAppConn() error`: returns `nil` +* `Flush()`: does nothing +* `TxsAvailable() <-chan struct{}`: returns `nil` +* `EnableTxsAvailable()`: does nothing +* `SetTxRemovedCallback(cb func(types.TxKey))`: does nothing +* `Size() int` returns 0 +* `SizeBytes() int64` returns 0 + +Upon startup, the `nop` mempool reactor will advertise no channels to the peer-to-peer layer. + +### Configuration + +We propose the following changes to the `config.toml` file: + +```toml +[mempool] +# The type of mempool for this CometBFT node to use. +# +# Valid types of mempools supported by CometBFT: +# - "flood" : clist mempool with flooding gossip protocol (default) +# - "nop" : nop-mempool (app has implemented an alternative tx dissemination mechanism) +type = "nop" +``` + +The config validation logic will be modified to add a new rule that rejects a configuration file +if all of these conditions are met: + +* the mempool is set to `nop` +* `create_empty_blocks`, in `consensus` section, is set to `false`. + +The reason for this extra validity rule is that the `nop`-mempool, as proposed here, +does not support the "do not create empty blocks" functionality. +Here are some considerations on this: + +* The "do not create empty blocks" functionality + * entangles the consensus and mempool reactors + * is hardly used in production by real appchains (to the best of CometBFT team's knowledge) + * its current implementation for the built-in mempool has undesired side-effects + * app hashes currently refer to the previous block, + * and thus it interferes with query provability. +* If needed in the future, this can be supported by extending ABCI, + but we will first need to see a real need for this before committing to changing ABCI + (which has other, higher-impact changes waiting to be prioritized). + +### RPC Calls + +There are no changes needed in the code dealing with RPC. Those RPC paths that call methods of the `Mempool` interface, +will simply be calling the new implementation. + +### Impacted Workflows + +* *Submitting a transaction*. Users are not to submit transactions via CometBFT's RPC. + `BroadcastTx*` RPC methods will fail with a reasonable error and the 501 status code. + The application running on a full node must offer an interface for users to submit new transactions. + It could also be a distinct node (or set of nodes) in the network. + These considerations are exclusively the application's concern in this approach. +* *Time to propose a block*. The consensus reactor will call `ReapMaxBytesMaxGas` which will return a `nil` slice. + `RequestPrepareProposal` will thus contain no transactions. +* *Consensus waiting for transactions to become available*. `TxsAvailable()` returns `nil`. + `cs.handleTxsAvailable()` won't ever be executed. + At any rate, a configuration with the `nop` mempool and `create_empty_blocks` set to `false` + will be rejected in the first place. +* *A new block is decided*. + * When `Update` is called, nothing is done (no decided transaction is removed). + * Locking and unlocking the mempool has no effect. +* *ABCI mempool's connection* + CometBFT will still open a "mempool" connection, even though it won't be used. + This is to avoid doing lots of breaking changes. + +### Impact on Current Release Plans + +The changes needed for this approach, are fairly simple, and the logic is clear. +This might allow us to even deliver it as part of CometBFT `v1` (our next release) +even without a noticeable impact on `v1`'s delivery schedule. + +The CometBFT team (learning from past dramatic events) usually takes a conservative approach +for backporting changes to release branches that have already undergone a full QA cycle +(and thus are in code-freeze mode). +For this reason, although the limited impact of these changes would limit the risks +of backporting to `v0.38.x` and `v0.37.x`, a careful risk/benefit evaluation will +have to be carried out. + +Backporting to `v0.34.x` does not make sense as this version predates the release of `ABCI 1.0`, +so using the `nop` mempool renders CometBFT's operation useless. + +### Config parameter _vs._ application-enforced parameter + +In the current proposal, the parameter selecting the mempool is in `config.toml`. +However, it is not a clear-cut decision. These are the alternatives we see: + +* *Mempool selected in `config.toml` (our current design)*. + This is the way the mempool has always been selected in Tendermint Core and CometBFT, + in those versions where there were more than one mempool to choose from. + As the configuration is in `config.toml`, it is up to the node operators to configure their + nodes consistently, via social consensus. However this cannot be guaranteed. + A network with an inconsistent choice of mempool at different nodes might + result in undesirable side effects, such as peers disconnecting from nodes + that sent them messages via the mempool channel. +* *Mempool selected as a network-wide parameter*. + A way to prevent any inconsistency when selecting the mempool is to move the configuration out of `config.toml` + and have it as a network-wide application-enforced parameter, implemented in the same way as Consensus Params. + The Cosmos community may not be ready for such a rigid, radical change, + even if it eliminates the risk of operators shooting themselves in the foot. + Hence we went currently favor the previous alternative. +* *Mempool selected as a network-wide parameter, but allowing override*. + A third option, half way between the previous two, is to have the mempool selection + as a network-wide parameter, but with a special value called _local-config_ that still + allows an appchain to decide to leave it up to operators to configure it in `config.toml`. + +Ultimately, the "config parameter _vs._ application-enforced parameter" discussion +is a more general one that is applicable to other parameters not related to mempool selection. +In that sense, it is out of the scope of this ADR. + +## Consequences + +### Positive + +- Applications can now find mempool mechanisms that fit better their particular needs: + - Ad-hoc ways to add, remove, merge, reorder, modify, prioritize transactions according + to application needs. + - A way to disseminate transactions (gossip-based or other) to get the submitted transactions + to proposers. The application developers can devise simpler, efficient mechanisms tailored + to their application. + - Back-pressure mechanisms to prevent malicious users from abusing the transaction + dissemination mechanism. +- In this approach, CometBFT's peer-to-peer layer is relieved from managing transaction gossip, freeing up its resources for other reactors such as consensus, evidence, block-sync, or state-sync. +- There is no risk for the operators of a network to provide inconsistent configurations + for some mempool-related parameters. Some of those misconfigurations are known to have caused + serious performance issues in CometBFT's peer to peer network. + Unless, of course, the application-defined transaction dissemination mechanism ends up + allowing similar configuration inconsistencies. +- The interaction between the application and CometBFT at `PrepareProposal` time + is simplified. No transactions are ever provided by CometBFT, + and no transactions can ever be left in the mempool when CometBFT calls `PrepareProposal`: + the application trivially has all the information. +- UX is improved compared to how this can be done prior to this ADR. + +### Negative + +- With the `nop` mempool, it is up to the application to provide users with a way + to submit transactions and deliver those transactions to validators. + This is a considerable endeavor, and more basic appchains may consider it is not worth the hassle. +- There is a risk of wasting resources by those nodes that have a misconfigured + mempool (bandwidth, CPU, memory, etc). If there are TXs submitted (incorrectly) + via CometBFT's RPC, but those TXs are never submitted (correctly via an + app-specific interface) to the App. As those TXs risk being there until the node + is stopped. Moreover, those TXs will be replied & proposed every single block. + App developers will need to keep this in mind and panic on `CheckTx` or + `PrepareProposal` with non-empty list of transactions. +- Optimizing block proposals by only including transaction IDs (e.g. TX hashes) is more difficult. + The ABCI app could do it by submitting TX hashes (rather than TXs themselves) + in `PrepareProposal`, and then having a mechanism for pulling TXs from the + network upon `FinalizeBlock`. + +[sdk-app-mempool]: https://docs.cosmos.network/v0.47/build/building-apps/app-mempool +[adr-110]: https://github.com/cometbft/cometbft/pull/1565 +[HT94]: https://dl.acm.org/doi/book/10.5555/866693 +[cat-mempool]: https://github.com/cometbft/cometbft/pull/1472 \ No newline at end of file diff --git a/docs/core/block-structure.md b/docs/core/block-structure.md index 9d9a983c02..ef904da782 100644 --- a/docs/core/block-structure.md +++ b/docs/core/block-structure.md @@ -4,13 +4,16 @@ order: 8 # Block Structure -The CometBFT consensus engine records all agreements by a -supermajority of nodes into a blockchain, which is replicated among all -nodes. This blockchain is accessible via various RPC endpoints, mainly -`/block?height=` to get the full block, as well as -`/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what -exactly is stored in these blocks? +The CometBFT consensus engine records all agreements by a 2/3+ of nodes +into a blockchain, which is replicated among all nodes. This blockchain is +accessible via various RPC endpoints, mainly `/block?height=` to get the full +block, as well as `/blockchain?minHeight=_&maxHeight=_` to get a list of +headers. But what exactly is stored in these blocks? -The [specification](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started. +The [specification][data_structures] contains a detailed description of each +component - that's the best place to get started. -To dig deeper, check out the [types package documentation](https://godoc.org/github.com/cometbft/cometbft/types). +To dig deeper, check out the [types package documentation][types]. + +[data_structures]: https://github.com/cometbft/cometbft/blob/v0.37.x/spec/core/data_structures.md +[types]: https://pkg.go.dev/github.com/cometbft/cometbft/types diff --git a/docs/core/configuration.md b/docs/core/configuration.md index 0fc008e5c3..2f9f011683 100644 --- a/docs/core/configuration.md +++ b/docs/core/configuration.md @@ -223,11 +223,9 @@ pprof_laddr = "" # Address to listen for incoming connections laddr = "tcp://0.0.0.0:26656" -# Address to advertise to peers for them to dial -# If empty, will use the same port as the laddr, -# and will introspect on the listener or use UPnP -# to figure out the address. ip and port are required -# example: 159.89.10.97:26656 +# Address to advertise to peers for them to dial. If empty, will use the same +# port as the laddr, and will introspect on the listener to figure out the +# address. IP and port are required. Example: 159.89.10.97:26656 external_address = "" # Comma separated list of seed nodes to connect to @@ -236,9 +234,6 @@ seeds = "" # Comma separated list of nodes to keep persistent connections to persistent_peers = "" -# UPNP port forwarding -upnp = false - # Path to address book addr_book_file = "config/addrbook.json" @@ -299,6 +294,16 @@ dial_timeout = "3s" # 2) "v1" - prioritized mempool (deprecated; will be removed in the next release). version = "v0" +# The type of mempool for this node to use. +# +# Possible types: +# - "flood" : concurrent linked list mempool with flooding gossip protocol +# (default) +# - "nop" : nop-mempool (short for no operation; the ABCI app is responsible +# for storing, disseminating and proposing txs). "create_empty_blocks=false" is +# not supported. +type = "flood" + recheck = true broadcast = true wal_dir = "" @@ -492,6 +497,7 @@ namespace = "cometbft" ``` ## Empty blocks VS no empty blocks + ### create_empty_blocks = true If `create_empty_blocks` is set to `true` in your config, blocks will be created ~ every second (with default consensus parameters). You can regulate the delay between blocks by changing the `timeout_commit`. E.g. `timeout_commit = "10s"` should result in ~ 10 second blocks. @@ -505,6 +511,7 @@ Note after the block H, CometBFT creates something we call a "proof block" (only Plus, if you set `create_empty_blocks_interval` to something other than the default (`0`), CometBFT will be creating empty blocks even in the absence of transactions every `create_empty_blocks_interval.` For instance, with `create_empty_blocks = false` and `create_empty_blocks_interval = "30s"`, CometBFT will only create blocks if there are transactions, or after waiting 30 seconds without receiving any transactions. ## Consensus timeouts explained + There's a variety of information about timeouts in [Running in production](./running-in-production.md#configuration-parameters). You can also find more detailed explanation in the paper describing @@ -523,18 +530,20 @@ timeout_precommit = "1s" timeout_precommit_delta = "500ms" timeout_commit = "1s" ``` + Note that in a successful round, the only timeout that we absolutely wait no matter what is `timeout_commit`. Here's a brief summary of the timeouts: -- `timeout_propose` = how long we wait for a proposal block before prevoting nil -- `timeout_propose_delta` = how much `timeout_propose` increases with each round -- `timeout_prevote` = how long we wait after receiving +2/3 prevotes for + +- `timeout_propose` = how long a validator should wait for a proposal block before prevoting nil +- `timeout_propose_delta` = how much `timeout_propose` increases with each round +- `timeout_prevote` = how long a validator should wait after receiving +2/3 prevotes for anything (ie. not a single block or nil) - `timeout_prevote_delta` = how much the `timeout_prevote` increases with each round -- `timeout_precommit` = how long we wait after receiving +2/3 precommits for +- `timeout_precommit` = how long a validator should wait after receiving +2/3 precommits for anything (ie. not a single block or nil) - `timeout_precommit_delta` = how much the `timeout_precommit` increases with each round -- `timeout_commit` = how long we wait after committing a block, before starting +- `timeout_commit` = how long a validator should wait after committing a block, before starting on the new height (this gives us a chance to receive some more precommits, even though we already have +2/3) diff --git a/docs/core/how-to-read-logs.md b/docs/core/how-to-read-logs.md index 509216fcba..07024d4893 100644 --- a/docs/core/how-to-read-logs.md +++ b/docs/core/how-to-read-logs.md @@ -24,16 +24,13 @@ I[10-04|13:54:27.368] ABCI Replay Blocks module=consen I[10-04|13:54:27.368] Completed ABCI Handshake - CometBFT and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD ``` -After that, we start a few more things like the event switch, reactors, -and perform UPNP discover in order to detect the IP address. +After that, we start a few more things like the event switch and reactors. ```sh I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch I[10-04|13:54:27.375] This node is a validator module=consensus I[10-04|13:54:27.379] Starting Node module=main impl=Node I[10-04|13:54:27.381] Local listener module=p2p ip=:: port=26656 -I[10-04|13:54:27.382] Getting UPNP external address module=p2p -I[10-04|13:54:30.386] Could not perform UPNP discover module=p2p err="write udp4 0.0.0.0:38238->239.255.255.250:1900: i/o timeout" I[10-04|13:54:30.386] Starting DefaultListener module=p2p impl=Listener(@10.0.2.15:26656) I[10-04|13:54:30.387] Starting P2P Switch module=p2p impl="P2P Switch" I[10-04|13:54:30.387] Starting MempoolReactor module=mempool impl=MempoolReactor diff --git a/docs/core/mempool.md b/docs/core/mempool.md index 8dd9687819..f86083ee04 100644 --- a/docs/core/mempool.md +++ b/docs/core/mempool.md @@ -4,7 +4,41 @@ order: 12 # Mempool -## Transaction ordering +A mempool (a contraction of memory and pool) is a node’s data structure for +storing information on uncommitted transactions. It acts as a sort of waiting +room for transactions that have not yet been committed. + +CometBFT currently supports two types of mempools: `flood` and `nop`. + +## 1. Flood + +The `flood` mempool stores transactions in a concurrent linked list. When a new +transaction is received, it first checks if there's a space for it (`size` and +`max_txs_bytes` config options) and that it's not too big (`max_tx_bytes` config +option). Then, it checks if this transaction has already been seen before by using +an LRU cache (`cache_size` regulates the cache's size). If all checks pass and +the transaction is not in the cache (meaning it's new), the ABCI +[`CheckTxAsync`][1] method is called. The ABCI application validates the +transaction using its own rules. + +If the transaction is deemed valid by the ABCI application, it's added to the linked list. + +The mempool's name (`flood`) comes from the dissemination mechanism. When a new +transaction is added to the linked list, the mempool sends it to all connected +peers. Peers themselves gossip this transaction to their peers and so on. One +can say that each transaction "floods" the network, hence the name `flood`. + +Note there are experimental config options +`experimental_max_gossip_connections_to_persistent_peers` and +`experimental_max_gossip_connections_to_non_persistent_peers` to limit the +number of peers a transaction is broadcasted to. Also, you can turn off +broadcasting with `broadcast` config option. + +After each committed block, CometBFT rechecks all uncommitted transactions (can +be disabled with the `recheck` config option) by repeatedly calling the ABCI +`CheckTxAsync`. + +### Transaction ordering Currently, there's no ordering of transactions other than the order they've arrived (via RPC or from other nodes). @@ -46,3 +80,24 @@ order/nonce/sequence number, the application can reject transactions that are out of order. So if a node receives `tx3`, then `tx1`, it can reject `tx3` and then accept `tx1`. The sender can then retry sending `tx3`, which should probably be rejected until the node has seen `tx2`. + +## 2. Nop + +`nop` (short for no operation) mempool is used when the ABCI application developer wants to +build their own mempool. When `type = "nop"`, transactions are not stored anywhere +and are not gossiped to other peers using the P2P network. + +Submitting a transaction via the existing RPC methods (`BroadcastTxSync`, +`BroadcastTxAsync`, and `BroadcastTxCommit`) will always result in an error. + +Because there's no way for the consensus to know if transactions are available +to be committed, the node will always create blocks, which can be empty +sometimes. Using `consensus.create_empty_blocks=false` is prohibited in such +cases. + +The ABCI application becomes responsible for storing, disseminating, and +proposing transactions using [`PrepareProposal`][2]. The concrete design is up +to the ABCI application developers. + +[1]: ../../spec/abci/abci++_methods.md#checktx +[2]: ../../spec/abci/abci++_methods.md#prepareproposal \ No newline at end of file diff --git a/docs/core/state-sync.md b/docs/core/state-sync.md index 8400a6ef9d..8237e10957 100644 --- a/docs/core/state-sync.md +++ b/docs/core/state-sync.md @@ -30,7 +30,7 @@ The next information you will need to acquire it through publicly exposed RPC's - `trust_period`: Trust period is the period in which headers can be verified. > :warning: This value should be significantly smaller than the unbonding period. -If you are relying on publicly exposed RPC's to get the need information, you can use `curl`. +If you are relying on publicly exposed RPC's to get the need information, you can use `curl` and [`jq`][jq]. Example: @@ -46,3 +46,5 @@ The response will be: "hash": "188F4F36CBCD2C91B57509BBF231C777E79B52EE3E0D90D06B1A25EB16E6E23D" } ``` + +[jq]: https://jqlang.github.io/jq/ diff --git a/docs/core/using-cometbft.md b/docs/core/using-cometbft.md index 0c8906096c..c81124a0e0 100644 --- a/docs/core/using-cometbft.md +++ b/docs/core/using-cometbft.md @@ -130,7 +130,7 @@ cometbft node ``` By default, CometBFT will try to connect to an ABCI application on -`127.0.0.1:26658`. If you have the `kvstore` ABCI app installed, run it in +`tcp://127.0.0.1:26658`. If you have the `kvstore` ABCI app installed, run it in another window. If you don't, kill CometBFT and run an in-process version of the `kvstore` app: @@ -139,8 +139,8 @@ cometbft node --proxy_app=kvstore ``` After a few seconds, you should see blocks start streaming in. Note that blocks -are produced regularly, even if there are no transactions. See _No Empty -Blocks_, below, to modify this setting. +are produced regularly, even if there are no transactions. See [No Empty +Blocks](#no-empty-blocks), below, to modify this setting. CometBFT supports in-process versions of the `counter`, `kvstore`, and `noop` apps that ship as examples with `abci-cli`. It's easy to compile your app diff --git a/evidence/pool.go b/evidence/pool.go index e36b66db38..b502341a86 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -132,11 +132,11 @@ func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { // AddEvidence checks the evidence is valid and adds it to the pool. func (evpool *Pool) AddEvidence(ev types.Evidence) error { - evpool.logger.Debug("Attempting to add evidence", "ev", ev) + evpool.logger.Info("Attempting to add evidence", "ev", ev) // We have already verified this piece of evidence - no need to do it again if evpool.isPending(ev) { - evpool.logger.Debug("Evidence already pending, ignoring this one", "ev", ev) + evpool.logger.Info("Evidence already pending, ignoring this one", "ev", ev) return nil } @@ -144,7 +144,7 @@ func (evpool *Pool) AddEvidence(ev types.Evidence) error { if evpool.isCommitted(ev) { // this can happen if the peer that sent us the evidence is behind so we shouldn't // punish the peer. - evpool.logger.Debug("Evidence was already committed, ignoring this one", "ev", ev) + evpool.logger.Info("Evidence was already committed, ignoring this one", "ev", ev) return nil } @@ -513,13 +513,13 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { // check if we already have this evidence if evpool.isPending(dve) { - evpool.logger.Debug("evidence already pending; ignoring", "evidence", dve) + evpool.logger.Info("evidence already pending; ignoring", "evidence", dve) continue } // check that the evidence is not already committed on chain if evpool.isCommitted(dve) { - evpool.logger.Debug("evidence already committed; ignoring", "evidence", dve) + evpool.logger.Info("evidence already committed; ignoring", "evidence", dve) continue } diff --git a/evidence/verify.go b/evidence/verify.go index ca0766efda..a8baab8f40 100644 --- a/evidence/verify.go +++ b/evidence/verify.go @@ -106,6 +106,7 @@ func (evpool *Pool) verify(evidence types.Evidence) error { // the conflicting header's commit // - 2/3+ of the conflicting validator set correctly signed the conflicting block // - the nodes trusted header at the same height as the conflicting header has a different hash +// - all signatures must be checked as this will be used as evidence // // CONTRACT: must run ValidateBasic() on the evidence before verifying // @@ -115,7 +116,7 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t // In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single // verification jump between the common header and the conflicting one if commonHeader.Height != e.ConflictingBlock.Height { - err := commonVals.VerifyCommitLightTrusting(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel) + err := commonVals.VerifyCommitLightTrustingAllSignatures(trustedHeader.ChainID, e.ConflictingBlock.Commit, light.DefaultTrustLevel) if err != nil { return fmt.Errorf("skipping verification of conflicting block failed: %w", err) } @@ -127,7 +128,7 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t } // Verify that the 2/3+ commits from the conflicting validator set were for the conflicting header - if err := e.ConflictingBlock.ValidatorSet.VerifyCommitLight(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID, + if err := e.ConflictingBlock.ValidatorSet.VerifyCommitLightAllSignatures(trustedHeader.ChainID, e.ConflictingBlock.Commit.BlockID, e.ConflictingBlock.Height, e.ConflictingBlock.Commit); err != nil { return fmt.Errorf("invalid commit from conflicting block: %w", err) } diff --git a/go.mod b/go.mod index 5742c391cb..5aabe63be4 100644 --- a/go.mod +++ b/go.mod @@ -4,17 +4,16 @@ go 1.20 require ( github.com/BurntSushi/toml v1.2.1 - github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d + github.com/ChainSafe/go-schnorrkel v1.0.0 github.com/adlio/schema v1.3.3 github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.12.0 github.com/go-kit/log v0.2.1 github.com/go-logfmt/logfmt v0.5.1 github.com/gofrs/uuid v4.3.0+incompatible - github.com/golang/protobuf v1.5.3 github.com/golangci/golangci-lint v1.50.1 github.com/google/orderedcode v0.0.1 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.4.0 github.com/gorilla/websocket v1.5.0 github.com/gtank/merlin v0.1.1 github.com/informalsystems/tm-load-test v1.3.0 @@ -31,12 +30,12 @@ require ( github.com/rs/cors v1.8.2 github.com/sasha-s/go-deadlock v0.3.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.6.1 - github.com/spf13/viper v1.13.0 - github.com/stretchr/testify v1.8.1 - golang.org/x/crypto v0.12.0 - golang.org/x/net v0.14.0 - google.golang.org/grpc v1.57.0 + github.com/spf13/cobra v1.8.0 + github.com/spf13/viper v1.18.1 + github.com/stretchr/testify v1.8.4 + golang.org/x/crypto v0.17.0 + golang.org/x/net v0.19.0 + google.golang.org/grpc v1.60.0 ) require ( @@ -52,7 +51,9 @@ require ( github.com/cometbft/cometbft-db v0.7.0 github.com/cosmos/gogoproto v1.4.1 github.com/go-git/go-git/v5 v5.5.2 + github.com/golang/protobuf v1.5.3 github.com/vektra/mockery/v2 v2.14.0 + golang.org/x/sync v0.5.0 gonum.org/v1/gonum v0.8.2 google.golang.org/protobuf v1.31.0 ) @@ -93,10 +94,10 @@ require ( github.com/containerd/continuity v0.3.0 // indirect github.com/containerd/typeurl v1.0.2 // indirect github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/daixiang0/gci v0.8.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect @@ -113,10 +114,10 @@ require ( github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/fatih/color v1.14.1 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect github.com/go-chi/chi/v5 v5.0.7 // indirect github.com/go-critic/go-critic v0.6.5 // indirect @@ -162,7 +163,7 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect github.com/jgautheron/goconst v1.5.1 // indirect @@ -176,7 +177,7 @@ require ( github.com/kisielk/errcheck v1.6.2 // indirect github.com/kisielk/gotool v1.0.0 // indirect github.com/kkHAIKE/contextcheck v1.1.3 // indirect - github.com/klauspost/compress v1.15.11 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.6 // indirect @@ -185,12 +186,12 @@ require ( github.com/ldez/tagliatelle v0.3.1 // indirect github.com/leonklingele/grouper v1.1.0 // indirect github.com/lufeee/execinquery v1.2.1 // indirect - github.com/magiconair/properties v1.8.6 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/maratori/testableexamples v1.0.0 // indirect github.com/maratori/testpackage v1.1.0 // indirect github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.16 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect @@ -211,14 +212,13 @@ require ( github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0-rc2 // indirect github.com/opencontainers/runc v1.1.3 // indirect - github.com/pelletier/go-toml v1.9.5 // indirect - github.com/pelletier/go-toml/v2 v2.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect github.com/pjbgf/sha1cd v0.2.3 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/profile v1.6.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 // indirect github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 // indirect github.com/polyfloyd/go-errorlint v1.0.5 // indirect @@ -231,6 +231,8 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/ryancurrah/gomodguard v1.2.4 // indirect github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect github.com/sashamelentyev/interfacebloat v1.1.0 // indirect github.com/sashamelentyev/usestdlibvars v1.20.0 // indirect @@ -244,15 +246,15 @@ require ( github.com/sivchari/tenv v1.7.0 // indirect github.com/skeema/knownhosts v1.1.0 // indirect github.com/sonatard/noctx v0.0.1 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect - github.com/spf13/afero v1.8.2 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.1 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/tdakkota/asciicheck v0.1.1 // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tetafro/godot v1.4.11 // indirect @@ -268,23 +270,22 @@ require ( github.com/yeya24/promlinter v0.2.0 // indirect gitlab.com/bosi/decorder v0.2.3 // indirect go.etcd.io/bbolt v1.3.6 // indirect - go.opencensus.io v0.23.0 // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 // indirect go.opentelemetry.io/otel v1.11.0 // indirect go.opentelemetry.io/otel/trace v1.11.0 // indirect go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect + go.uber.org/multierr v1.9.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect - golang.org/x/tools v0.6.0 // indirect - google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.13.0 // indirect + google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -297,5 +298,3 @@ require ( mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect ) - -replace github.com/cosmos/gogoproto v1.4.1 => github.com/composableFi/gogoproto v1.5.3 diff --git a/go.sum b/go.sum index ce721bfc29..191533411b 100644 --- a/go.sum +++ b/go.sum @@ -5,7 +5,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -16,17 +15,14 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.110.6 h1:8uYAkj3YHTP/1iwReuHPxLSbdcyc+dSBbzFMrVwDR6Q= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= @@ -39,7 +35,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Abirdcfly/dupword v0.0.7 h1:z14n0yytA3wNO2gpCD/jVtp/acEXPGmYu0esewpBt6Q= github.com/Abirdcfly/dupword v0.0.7/go.mod h1:K/4M1kj+Zh39d2aotRwypvasonOyAMH1c/IZJzE0dmk= @@ -53,8 +48,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= @@ -169,13 +164,10 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY= github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= -github.com/composableFi/gogoproto v1.5.3 h1:iFQ8Lgy/gSK6sm2pH6CTd6rf0BZElw3I+/J2f4B/yek= -github.com/composableFi/gogoproto v1.5.3/go.mod h1:/g39Mh8m17X8Q/GDEs5zYTSNaNnInBSohtaxzQnYq1Y= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs= github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0= @@ -190,10 +182,12 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d h1:49RLWk1j44Xu4fjHb6JFYmeUnDORVwHNkDxaQ0ctCVU= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/gogoproto v1.4.1 h1:WoyH+0/jbCTzpKNvyav5FL1ZTWsp1im1MxEpJEzKUB8= +github.com/cosmos/gogoproto v1.4.1/go.mod h1:Ac9lzL4vFpBMcptJROQ6dQ4M3pOEK5Z/l0Q9p+LoCr4= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -207,8 +201,9 @@ github.com/daixiang0/gci v0.8.1 h1:T4xpSC+hmsi4CSyuYfIJdMZAr9o7xZmHpQVygMghGZ4= github.com/daixiang0/gci v0.8.1/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= @@ -240,11 +235,9 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.10.1 h1:c0g45+xCJhdgFGw7a5QAfdS4byAbud7miNWJ1WwEVf8= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= @@ -255,8 +248,8 @@ github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojt github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= @@ -265,11 +258,11 @@ github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= -github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= @@ -421,7 +414,6 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -431,17 +423,13 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -486,12 +474,11 @@ github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUq github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/informalsystems/tm-load-test v1.3.0 h1:FGjKy7vBw6mXNakt+wmNWKggQZRsKkEYpaFk/zR64VA= github.com/informalsystems/tm-load-test v1.3.0/go.mod h1:OQ5AQ9TbT5hKWBNIwsMjn6Bf4O0U4b1kRc+0qZlQJKw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -542,17 +529,16 @@ github.com/kkHAIKE/contextcheck v1.1.3 h1:l4pNvrb8JSwRd51ojtcOxOeHJzHek+MtOyXbaR github.com/kkHAIKE/contextcheck v1.1.3/go.mod h1:PG/cwd6c0705/LM0KTr1acO2gORUxkSVWyLJOFW5qoo= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= -github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -577,8 +563,8 @@ github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QT github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= -github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= @@ -588,14 +574,13 @@ github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859 github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -679,10 +664,8 @@ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6 github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= -github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= -github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= @@ -697,9 +680,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM= github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= -github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3 h1:hUmXhbljNFtrH5hzV9kiRoddZ5nfPTq3K0Sb2hYYiqE= github.com/pointlander/compress v1.1.1-0.20190518213731-ff44bd196cc3/go.mod h1:q5NXNGzqj5uPnVuhGkZfmgHqNUhf15VLi6L9kW0VEc0= github.com/pointlander/jetset v1.0.1-0.20190518214125-eee7eff80bd4 h1:RHHRCZeaNyBXdYPMjZNH8/XHDBH38TZzw8izrW7dmBE= @@ -764,6 +747,10 @@ github.com/ryancurrah/gomodguard v1.2.4 h1:CpMSDKan0LtNGGhPrvupAoLeObRFjND8/tU1r github.com/ryancurrah/gomodguard v1.2.4/go.mod h1:+Kem4VjWwvFpUJRJSwa16s1tBJe+vbv02+naTow2f6M= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= @@ -803,29 +790,29 @@ github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeX github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= -github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.13.0 h1:BWSJ/M+f+3nmdz9bxB+bWX28kkALN2ok11D0rSo8EJU= -github.com/spf13/viper v1.13.0/go.mod h1:Icm2xNL3/8uyh/wFuB1jI7TiTNKp8632Nwegu+zgdYw= +github.com/spf13/viper v1.18.1 h1:rmuU42rScKWlhhJDyXZRKJQHXFX02chSVW1IvkPGiVM= +github.com/spf13/viper v1.18.1/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= @@ -840,14 +827,14 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs= -github.com/subosito/gotenv v1.4.1/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -907,9 +894,8 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3 h1:syAz40OyelLZo42+3U68Phisvrx4qh+4wpdZw7eUUdY= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.36.3/go.mod h1:Dts42MGkzZne2yCru741+bFiTMWkIj/LLRizad7b9tw= go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk= @@ -917,13 +903,12 @@ go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+ go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI= go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= @@ -936,15 +921,13 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -958,8 +941,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb h1:mIKbk8weKhSeLH2GmUTrvx8CjkyJmnU1wFmg59CUjFA= -golang.org/x/exp v0.0.0-20230811145659-89c5cff77bcb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91 h1:Ic/qN6TEifvObMGQy72k0n1LlJr7DjWWEi+MOsDOiSk= golang.org/x/exp/typeparams v0.0.0-20220827204233-334a2380cb91/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= @@ -976,7 +959,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -985,14 +967,13 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1026,9 +1007,7 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= @@ -1040,20 +1019,16 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1066,8 +1041,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1095,7 +1070,6 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1112,20 +1086,15 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1141,7 +1110,6 @@ golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1151,29 +1119,28 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1236,18 +1203,12 @@ golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= @@ -1258,8 +1219,8 @@ golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1286,17 +1247,13 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1327,17 +1284,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 h1:L6iMMGrtzgHsWofoFcihmDEMYeDR9KN/ThbPWGrh++g= -google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878 h1:lv6/DhyiFFGsmzxbsUUTOkN29II+zeWHxvT8Lpdxsv0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230815205213-6bfd019c3878/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= +google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1350,13 +1300,10 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= +google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1398,7 +1345,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/mempool/metrics.gen.go b/mempool/metrics.gen.go index 100c5e71cb..cd41c2ebc4 100644 --- a/mempool/metrics.gen.go +++ b/mempool/metrics.gen.go @@ -20,6 +20,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "size", Help: "Number of uncommitted transactions in the mempool.", }, labels).With(labelsAndValues...), + SizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "size_bytes", + Help: "Total size of the mempool in bytes.", + }, labels).With(labelsAndValues...), TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -52,16 +58,24 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "recheck_times", Help: "Number of times transactions are rechecked in the mempool.", }, labels).With(labelsAndValues...), + ActiveOutboundConnections: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "active_outbound_connections", + Help: "Number of connections being actively used for gossiping transactions (experimental feature).", + }, labels).With(labelsAndValues...), } } func NopMetrics() *Metrics { return &Metrics{ - Size: discard.NewGauge(), - TxSizeBytes: discard.NewHistogram(), - FailedTxs: discard.NewCounter(), - RejectedTxs: discard.NewCounter(), - EvictedTxs: discard.NewCounter(), - RecheckTimes: discard.NewCounter(), + Size: discard.NewGauge(), + SizeBytes: discard.NewGauge(), + TxSizeBytes: discard.NewHistogram(), + FailedTxs: discard.NewCounter(), + RejectedTxs: discard.NewCounter(), + EvictedTxs: discard.NewCounter(), + RecheckTimes: discard.NewCounter(), + ActiveOutboundConnections: discard.NewGauge(), } } diff --git a/mempool/metrics.go b/mempool/metrics.go index 85ca8c0cfb..689d6f496d 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -18,6 +18,9 @@ type Metrics struct { // Number of uncommitted transactions in the mempool. Size metrics.Gauge + // Total size of the mempool in bytes. + SizeBytes metrics.Gauge + // Histogram of transaction sizes in bytes. TxSizeBytes metrics.Histogram `metrics_buckettype:"exp" metrics_bucketsizes:"1,3,7"` @@ -40,4 +43,8 @@ type Metrics struct { // Number of times transactions are rechecked in the mempool. RecheckTimes metrics.Counter + + // Number of connections being actively used for gossiping transactions + // (experimental feature). + ActiveOutboundConnections metrics.Gauge } diff --git a/mempool/nop_mempool.go b/mempool/nop_mempool.go new file mode 100644 index 0000000000..4a55a620d8 --- /dev/null +++ b/mempool/nop_mempool.go @@ -0,0 +1,110 @@ +package mempool + +import ( + "errors" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cometbft/cometbft/libs/service" + "github.com/cometbft/cometbft/p2p" + "github.com/cometbft/cometbft/types" +) + +// NopMempool is a mempool that does nothing. +// +// The ABCI app is responsible for storing, disseminating, and proposing transactions. +// See [ADR-111](../docs/architecture/adr-111-nop-mempool.md). +type NopMempool struct{} + +// errNotAllowed indicates that the operation is not allowed with `nop` mempool. +var errNotAllowed = errors.New("not allowed with `nop` mempool") + +var _ Mempool = &NopMempool{} + +// CheckTx always returns an error. +func (*NopMempool) CheckTx(types.Tx, func(*abci.Response), TxInfo) error { + return errNotAllowed +} + +// RemoveTxByKey always returns an error. +func (*NopMempool) RemoveTxByKey(types.TxKey) error { return errNotAllowed } + +// ReapMaxBytesMaxGas always returns nil. +func (*NopMempool) ReapMaxBytesMaxGas(int64, int64) types.Txs { return nil } + +// ReapMaxTxs always returns nil. +func (*NopMempool) ReapMaxTxs(int) types.Txs { return nil } + +// Lock does nothing. +func (*NopMempool) Lock() {} + +// Unlock does nothing. +func (*NopMempool) Unlock() {} + +// Update does nothing. +func (*NopMempool) Update( + int64, + types.Txs, + []*abci.ResponseDeliverTx, + PreCheckFunc, + PostCheckFunc, +) error { + return nil +} + +// FlushAppConn does nothing. +func (*NopMempool) FlushAppConn() error { return nil } + +// Flush does nothing. +func (*NopMempool) Flush() {} + +// TxsAvailable always returns nil. +func (*NopMempool) TxsAvailable() <-chan struct{} { + return nil +} + +// EnableTxsAvailable does nothing. +func (*NopMempool) EnableTxsAvailable() {} + +// SetTxRemovedCallback does nothing. +func (*NopMempool) SetTxRemovedCallback(func(txKey types.TxKey)) {} + +// Size always returns 0. +func (*NopMempool) Size() int { return 0 } + +// SizeBytes always returns 0. +func (*NopMempool) SizeBytes() int64 { return 0 } + +// NopMempoolReactor is a mempool reactor that does nothing. +type NopMempoolReactor struct { + service.BaseService +} + +// NewNopMempoolReactor returns a new `nop` reactor. +// +// To be used only in RPC. +func NewNopMempoolReactor() *NopMempoolReactor { + return &NopMempoolReactor{*service.NewBaseService(nil, "NopMempoolReactor", nil)} +} + +var _ p2p.Reactor = &NopMempoolReactor{} + +// WaitSync always returns false. +func (*NopMempoolReactor) WaitSync() bool { return false } + +// GetChannels always returns nil. +func (*NopMempoolReactor) GetChannels() []*p2p.ChannelDescriptor { return nil } + +// AddPeer does nothing. +func (*NopMempoolReactor) AddPeer(p2p.Peer) {} + +// InitPeer always returns nil. +func (*NopMempoolReactor) InitPeer(p2p.Peer) p2p.Peer { return nil } + +// RemovePeer does nothing. +func (*NopMempoolReactor) RemovePeer(p2p.Peer, interface{}) {} + +// Receive does nothing. +func (*NopMempoolReactor) ReceiveEnvelope(p2p.Envelope) {} + +// SetSwitch does nothing. +func (*NopMempoolReactor) SetSwitch(*p2p.Switch) {} diff --git a/mempool/nop_mempool_test.go b/mempool/nop_mempool_test.go new file mode 100644 index 0000000000..01b169e069 --- /dev/null +++ b/mempool/nop_mempool_test.go @@ -0,0 +1,38 @@ +package mempool + +import ( + "testing" + + "github.com/cometbft/cometbft/types" + "github.com/stretchr/testify/assert" +) + +var tx = types.Tx([]byte{0x01}) + +func TestNopMempool_Basic(t *testing.T) { + mem := &NopMempool{} + + assert.Equal(t, 0, mem.Size()) + assert.Equal(t, int64(0), mem.SizeBytes()) + + err := mem.CheckTx(tx, nil, TxInfo{}) + assert.Equal(t, errNotAllowed, err) + + err = mem.RemoveTxByKey(tx.Key()) + assert.Equal(t, errNotAllowed, err) + + txs := mem.ReapMaxBytesMaxGas(0, 0) + assert.Nil(t, txs) + + txs = mem.ReapMaxTxs(0) + assert.Nil(t, txs) + + err = mem.FlushAppConn() + assert.NoError(t, err) + + err = mem.Update(0, nil, nil, nil, nil) + assert.NoError(t, err) + + txsAvailable := mem.TxsAvailable() + assert.Nil(t, txsAvailable) +} diff --git a/mempool/v0/clist_mempool.go b/mempool/v0/clist_mempool.go index 2f4a0264be..e5d6563710 100644 --- a/mempool/v0/clist_mempool.go +++ b/mempool/v0/clist_mempool.go @@ -303,6 +303,7 @@ func (mem *CListMempool) reqResCb( // update metrics mem.metrics.Size.Set(float64(mem.Size())) + mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) // passed in by the caller of CheckTx, eg. the RPC if externalCb != nil { @@ -648,6 +649,7 @@ func (mem *CListMempool) Update( // Update metrics mem.metrics.Size.Set(float64(mem.Size())) + mem.metrics.SizeBytes.Set(float64(mem.SizeBytes())) return nil } diff --git a/mempool/v0/clist_mempool_test.go b/mempool/v0/clist_mempool_test.go index 35fc066317..37c8b42886 100644 --- a/mempool/v0/clist_mempool_test.go +++ b/mempool/v0/clist_mempool_test.go @@ -1,7 +1,6 @@ package v0 import ( - "crypto/rand" "encoding/binary" "fmt" mrand "math/rand" @@ -96,16 +95,27 @@ func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { } } +func callCheckTx(t *testing.T, mp mempool.Mempool, txs types.Txs) { + txInfo := mempool.TxInfo{SenderID: 0} + for i, tx := range txs { + if err := mp.CheckTx(tx, nil, txInfo); err != nil { + // Skip invalid txs. + // TestMempoolFilters will fail otherwise. It asserts a number of txs + // returned. + if mempool.IsPreCheckError(err) { + continue + } + t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) + } + } +} + func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { txs := make(types.Txs, count) txInfo := mempool.TxInfo{SenderID: peerID} for i := 0; i < count; i++ { - txBytes := make([]byte, 20) + txBytes := kvstore.NewRandomTx(20) txs[i] = txBytes - _, err := rand.Read(txBytes) - if err != nil { - t.Error(err) - } if err := mp.CheckTx(txBytes, nil, txInfo); err != nil { // Skip invalid txs. // TestMempoolFilters will fail otherwise. It asserts a number of txs diff --git a/mempool/v0/reactor.go b/mempool/v0/reactor.go index 508d76666f..55cf185edc 100644 --- a/mempool/v0/reactor.go +++ b/mempool/v0/reactor.go @@ -1,6 +1,7 @@ package v0 import ( + "context" "errors" "fmt" "time" @@ -13,6 +14,7 @@ import ( "github.com/cometbft/cometbft/p2p" protomem "github.com/cometbft/cometbft/proto/tendermint/mempool" "github.com/cometbft/cometbft/types" + "golang.org/x/sync/semaphore" ) // Reactor handles mempool tx broadcasting amongst peers. @@ -23,6 +25,12 @@ type Reactor struct { config *cfg.MempoolConfig mempool *CListMempool ids *mempoolIDs + + // Semaphores to keep track of how many connections to peers are active for broadcasting + // transactions. Each semaphore has a capacity that puts an upper bound on the number of + // connections for different groups of peers. + activePersistentPeersSemaphore *semaphore.Weighted + activeNonPersistentPeersSemaphore *semaphore.Weighted } type mempoolIDs struct { @@ -96,6 +104,9 @@ func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor { ids: newMempoolIDs(), } memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR) + memR.activePersistentPeersSemaphore = semaphore.NewWeighted(int64(memR.config.ExperimentalMaxGossipConnectionsToPersistentPeers)) + memR.activeNonPersistentPeersSemaphore = semaphore.NewWeighted(int64(memR.config.ExperimentalMaxGossipConnectionsToNonPersistentPeers)) + return memR } @@ -143,7 +154,42 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor { // It starts a broadcast routine ensuring all txs are forwarded to the given peer. func (memR *Reactor) AddPeer(peer p2p.Peer) { if memR.config.Broadcast { - go memR.broadcastTxRoutine(peer) + go func() { + // Always forward transactions to unconditional peers. + if !memR.Switch.IsPeerUnconditional(peer.ID()) { + // Depending on the type of peer, we choose a semaphore to limit the gossiping peers. + var peerSemaphore *semaphore.Weighted + if peer.IsPersistent() && memR.config.ExperimentalMaxGossipConnectionsToPersistentPeers > 0 { + peerSemaphore = memR.activePersistentPeersSemaphore + } else if !peer.IsPersistent() && memR.config.ExperimentalMaxGossipConnectionsToNonPersistentPeers > 0 { + peerSemaphore = memR.activeNonPersistentPeersSemaphore + } + + if peerSemaphore != nil { + for peer.IsRunning() { + // Block on the semaphore until a slot is available to start gossiping with this peer. + // Do not block indefinitely, in case the peer is disconnected before gossiping starts. + ctxTimeout, cancel := context.WithTimeout(context.TODO(), 30*time.Second) + // Block sending transactions to peer until one of the connections become + // available in the semaphore. + err := peerSemaphore.Acquire(ctxTimeout, 1) + cancel() + + if err != nil { + continue + } + + // Release semaphore to allow other peer to start sending transactions. + defer peerSemaphore.Release(1) + break + } + } + } + + memR.mempool.metrics.ActiveOutboundConnections.Add(1) + defer memR.mempool.metrics.ActiveOutboundConnections.Add(-1) + memR.broadcastTxRoutine(peer) + }() } } @@ -203,6 +249,7 @@ func (memR *Reactor) broadcastTxRoutine(peer p2p.Peer) { if !memR.IsRunning() || !peer.IsRunning() { return } + // This happens because the CElement we were looking at got garbage // collected (removed). That is, .NextWait() returned nil. Go ahead and // start from the beginning. diff --git a/mempool/v0/reactor_test.go b/mempool/v0/reactor_test.go index aa0de10461..f4e5f46bd9 100644 --- a/mempool/v0/reactor_test.go +++ b/mempool/v0/reactor_test.go @@ -293,6 +293,51 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { } } +// Test the experimental feature that limits the number of outgoing connections for gossiping +// transactions (only non-persistent peers). +// Note: in this test we know which gossip connections are active or not because of how the p2p +// functions are currently implemented, which affects the order in which peers are added to the +// mempool reactor. +func TestMempoolReactorMaxActiveOutboundConnections(t *testing.T) { + config := cfg.TestConfig() + config.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = 1 + reactors := makeAndConnectReactors(config, 4) + defer func() { + for _, r := range reactors { + if err := r.Stop(); err != nil { + assert.NoError(t, err) + } + } + }() + for _, r := range reactors { + for _, peer := range r.Switch.Peers().List() { + peer.Set(types.PeerStateKey, peerState{1}) + } + } + + // Add a bunch transactions to the first reactor. + txs := newUniqueTxs(100) + callCheckTx(t, reactors[0].mempool, txs) + + // Wait for all txs to be in the mempool of the second reactor; the other reactors should not + // receive any tx. (The second reactor only sends transactions to the first reactor.) + checkTxsInMempool(t, txs, reactors[1], 0) + for _, r := range reactors[2:] { + require.Zero(t, r.mempool.Size()) + } + + // Disconnect the second reactor from the first reactor. + firstPeer := reactors[0].Switch.Peers().List()[0] + reactors[0].Switch.StopPeerGracefully(firstPeer) + + // Now the third reactor should start receiving transactions from the first reactor; the fourth + // reactor's mempool should still be empty. + checkTxsInMempool(t, txs, reactors[2], 0) + for _, r := range reactors[3:] { + require.Zero(t, r.mempool.Size()) + } +} + // mempoolLogger is a TestingLogger which uses a different // color for each validator ("validator" key must exist). func mempoolLogger() log.Logger { @@ -328,6 +373,14 @@ func makeAndConnectReactors(config *cfg.Config, n int) []*Reactor { return reactors } +func newUniqueTxs(n int) types.Txs { + txs := make(types.Txs, n) + for i := 0; i < n; i++ { + txs[i] = kvstore.NewTxFromID(i) + } + return txs +} + func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { // wait for the txs in all mempools wg := new(sync.WaitGroup) @@ -335,7 +388,7 @@ func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { wg.Add(1) go func(r *Reactor, reactorIndex int) { defer wg.Done() - waitForTxsOnReactor(t, txs, r, reactorIndex) + checkTxsInOrder(t, txs, r, reactorIndex) }(reactor, i) } @@ -353,13 +406,30 @@ func waitForTxsOnReactors(t *testing.T, txs types.Txs, reactors []*Reactor) { } } -func waitForTxsOnReactor(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { - mempool := reactor.mempool - for mempool.Size() < len(txs) { +// Wait until the mempool has a certain number of transactions. +func waitForNumTxsInMempool(numTxs int, reactor *Reactor) { + for reactor.mempool.Size() < numTxs { time.Sleep(time.Millisecond * 100) } +} + +// Wait until all txs are in the mempool and check that the number of txs in the +// mempool is as expected. +func checkTxsInMempool(t *testing.T, txs types.Txs, reactor *Reactor, _ int) { + waitForNumTxsInMempool(len(txs), reactor) + + reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) + require.Equal(t, len(txs), len(reapedTxs)) + require.Equal(t, len(txs), reactor.mempool.Size()) +} + +// Wait until all txs are in the mempool and check that they are in the same +// order as given. +func checkTxsInOrder(t *testing.T, txs types.Txs, reactor *Reactor, reactorIndex int) { + waitForNumTxsInMempool(len(txs), reactor) - reapedTxs := mempool.ReapMaxTxs(len(txs)) + // Check that all transactions in the mempool are in the same order as txs. + reapedTxs := reactor.mempool.ReapMaxTxs(len(txs)) for i, tx := range txs { assert.Equalf(t, tx, reapedTxs[i], "txs at index %d on reactor %d don't match: %v vs %v", i, reactorIndex, tx, reapedTxs[i]) diff --git a/node/node.go b/node/node.go index 4e5238d1da..636a9d968f 100644 --- a/node/node.go +++ b/node/node.go @@ -7,6 +7,7 @@ import ( "fmt" "net" "net/http" + "os" "strings" "time" @@ -21,12 +22,13 @@ import ( cs "github.com/cometbft/cometbft/consensus" "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/evidence" + "github.com/cometbft/cometbft/light" cmtjson "github.com/cometbft/cometbft/libs/json" "github.com/cometbft/cometbft/libs/log" cmtpubsub "github.com/cometbft/cometbft/libs/pubsub" "github.com/cometbft/cometbft/libs/service" - "github.com/cometbft/cometbft/light" + mempl "github.com/cometbft/cometbft/mempool" mempoolv0 "github.com/cometbft/cometbft/mempool/v0" mempoolv1 "github.com/cometbft/cometbft/mempool/v1" //nolint:staticcheck // SA1019 Priority mempool deprecated but still supported in this release. @@ -188,6 +190,124 @@ func StateProvider(stateProvider statesync.StateProvider) Option { } } +// BootstrapState synchronizes the stores with the application after state sync +// has been performed offline. It is expected that the block store and state +// store are empty at the time the function is called. +// +// If the block store is not empty, the function returns an error. +func BootstrapState(ctx context.Context, config *cfg.Config, dbProvider DBProvider, height uint64, appHash []byte) (err error) { + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + if ctx == nil { + ctx = context.Background() + } + + if config == nil { + logger.Info("no config provided, using default configuration") + config = cfg.DefaultConfig() + } + + if dbProvider == nil { + dbProvider = DefaultDBProvider + } + blockStore, stateDB, err := initDBs(config, dbProvider) + + defer func() { + if derr := blockStore.Close(); derr != nil { + logger.Error("Failed to close blockstore", "err", derr) + // Set the return value + err = derr + } + }() + + if err != nil { + return err + } + + if !blockStore.IsEmpty() { + return fmt.Errorf("blockstore not empty, trying to initialize non empty state") + } + + stateStore := sm.NewBootstrapStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: config.Storage.DiscardABCIResponses, + }) + + defer func() { + if derr := stateStore.Close(); derr != nil { + logger.Error("Failed to close statestore", "err", derr) + // Set the return value + err = derr + } + }() + state, err := stateStore.Load() + if err != nil { + return err + } + + if !state.IsEmpty() { + return fmt.Errorf("state not empty, trying to initialize non empty state") + } + + genState, _, err := LoadStateFromDBOrGenesisDocProvider(stateDB, DefaultGenesisDocProviderFunc(config)) + if err != nil { + return err + } + + stateProvider, err := statesync.NewLightClientStateProvider( + ctx, + genState.ChainID, genState.Version, genState.InitialHeight, + config.StateSync.RPCServers, light.TrustOptions{ + Period: config.StateSync.TrustPeriod, + Height: config.StateSync.TrustHeight, + Hash: config.StateSync.TrustHashBytes(), + }, logger.With("module", "light")) + if err != nil { + return fmt.Errorf("failed to set up light client state provider: %w", err) + } + + state, err = stateProvider.State(ctx, height) + if err != nil { + return err + } + if appHash == nil { + logger.Info("warning: cannot verify appHash. Verification will happen when node boots up!") + } else { + if !bytes.Equal(appHash, state.AppHash) { + if err := blockStore.Close(); err != nil { + logger.Error("failed to close blockstore: %w", err) + } + if err := stateStore.Close(); err != nil { + logger.Error("failed to close statestore: %w", err) + } + return fmt.Errorf("the app hash returned by the light client does not match the provided appHash, expected %X, got %X", state.AppHash, appHash) + } + } + + commit, err := stateProvider.Commit(ctx, height) + if err != nil { + return err + } + + if err = stateStore.Bootstrap(state); err != nil { + return err + } + + err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) + if err != nil { + return err + } + + // Once the stores are bootstrapped, we need to set the height at which the node has finished + // statesyncing. This will allow the blocksync reactor to fetch blocks at a proper height. + // In case this operation fails, it is equivalent to a failure in online state sync where the operator + // needs to manually delete the state and blockstores and rerun the bootstrapping process. + err = stateStore.SetOfflineStateSyncHeight(state.LastBlockHeight) + if err != nil { + return fmt.Errorf("failed to set synced height: %w", err) + } + + return err +} + //------------------------------------------------------------------------------ // Node is the highest level interface to a full CometBFT node. @@ -314,6 +434,7 @@ func createAndStartIndexerService( } func doHandshake( + ctx context.Context, stateStore sm.Store, state sm.State, blockStore sm.BlockStore, @@ -325,7 +446,7 @@ func doHandshake( handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) - if err := handshaker.Handshake(proxyApp); err != nil { + if err := handshaker.HandshakeWithContext(ctx, proxyApp); err != nil { return fmt.Errorf("error during handshake: %v", err) } return nil @@ -373,52 +494,62 @@ func createMempoolAndMempoolReactor( memplMetrics *mempl.Metrics, logger log.Logger, ) (mempl.Mempool, p2p.Reactor) { - switch config.Mempool.Version { - case cfg.MempoolV1: - mp := mempoolv1.NewTxMempool( - logger, - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv1.WithMetrics(memplMetrics), - mempoolv1.WithPreCheck(sm.TxPreCheck(state)), - mempoolv1.WithPostCheck(sm.TxPostCheck(state)), - ) - - reactor := mempoolv1.NewReactor( - config.Mempool, - mp, - ) - if config.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return mp, reactor + switch config.Mempool.Type { + // allow empty string for backward compatibility + case cfg.MempoolTypeFlood, "": + switch config.Mempool.Version { + case cfg.MempoolV1: + mp := mempoolv1.NewTxMempool( + logger, + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempoolv1.WithMetrics(memplMetrics), + mempoolv1.WithPreCheck(sm.TxPreCheck(state)), + mempoolv1.WithPostCheck(sm.TxPostCheck(state)), + ) + + reactor := mempoolv1.NewReactor( + config.Mempool, + mp, + ) + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } - case cfg.MempoolV0: - mp := mempoolv0.NewCListMempool( - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(memplMetrics), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), - ) + return mp, reactor + + case cfg.MempoolV0: + mp := mempoolv0.NewCListMempool( + config.Mempool, + proxyApp.Mempool(), + state.LastBlockHeight, + mempoolv0.WithMetrics(memplMetrics), + mempoolv0.WithPreCheck(sm.TxPreCheck(state)), + mempoolv0.WithPostCheck(sm.TxPostCheck(state)), + ) + + mp.SetLogger(logger) + + reactor := mempoolv0.NewReactor( + config.Mempool, + mp, + ) + if config.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() + } - mp.SetLogger(logger) + return mp, reactor - reactor := mempoolv0.NewReactor( - config.Mempool, - mp, - ) - if config.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() + default: + return nil, nil } - - return mp, reactor - + case cfg.MempoolTypeNop: + // Strictly speaking, there's no need to have a `mempl.NopMempoolReactor`, but + // adding it leads to a cleaner code. + return &mempl.NopMempool{}, mempl.NewNopMempoolReactor() default: - return nil, nil + panic(fmt.Sprintf("unknown mempool type: %q", config.Mempool.Type)) } } @@ -447,10 +578,11 @@ func createBlockchainReactor(config *cfg.Config, blockStore *store.BlockStore, blockSync bool, logger log.Logger, + offlineStateSyncHeight int64, ) (bcReactor p2p.Reactor, err error) { switch config.BlockSync.Version { case "v0": - bcReactor = bc.NewReactor(state.Copy(), blockExec, blockStore, blockSync) + bcReactor = bc.NewReactorWithOfflineStateSync(state.Copy(), blockExec, blockStore, blockSync, offlineStateSyncHeight) case "v1", "v2": return nil, fmt.Errorf("block sync version %s has been deprecated. Please use v0", config.BlockSync.Version) default: @@ -583,7 +715,9 @@ func createSwitch(config *cfg.Config, p2p.SwitchPeerFilters(peerFilters...), ) sw.SetLogger(p2pLogger) - sw.AddReactor("MEMPOOL", mempoolReactor) + if config.Mempool.Type != cfg.MempoolTypeNop { + sw.AddReactor("MEMPOOL", mempoolReactor) + } sw.AddReactor("BLOCKCHAIN", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("EVIDENCE", evidenceReactor) @@ -712,12 +846,29 @@ func NewNode(config *cfg.Config, logger log.Logger, options ...Option, ) (*Node, error) { + return NewNodeWithContext(context.TODO(), config, privValidator, + nodeKey, clientCreator, genesisDocProvider, dbProvider, + metricsProvider, logger, options...) +} + +func NewNodeWithContext(ctx context.Context, + config *cfg.Config, + privValidator types.PrivValidator, + nodeKey *p2p.NodeKey, + clientCreator proxy.ClientCreator, + genesisDocProvider GenesisDocProvider, + dbProvider DBProvider, + metricsProvider MetricsProvider, + logger log.Logger, + options ...Option, +) (*Node, error) { + blockStore, stateDB, err := initDBs(config, dbProvider) if err != nil { return nil, err } - stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + stateStore := sm.NewBootstrapStore(stateDB, sm.StoreOptions{ DiscardABCIResponses: config.Storage.DiscardABCIResponses, }) @@ -775,7 +926,7 @@ func NewNode(config *cfg.Config, // and replays any blocks as necessary to sync CometBFT with the app. consensusLogger := logger.With("module", "consensus") if !stateSync { - if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + if err := doHandshake(ctx, stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { return nil, err } @@ -794,11 +945,10 @@ func NewNode(config *cfg.Config, logNodeStartupInfo(state, pubKey, logger, consensusLogger) - // Make MempoolReactor mempool, mempoolReactor := createMempoolAndMempoolReactor(config, proxyApp, state, memplMetrics, logger) - // Make Evidence Reactor evidenceReactor, evidencePool, err := createEvidenceReactor(config, dbProvider, stateDB, blockStore, logger) + if err != nil { return nil, err } @@ -812,9 +962,15 @@ func NewNode(config *cfg.Config, evidencePool, sm.BlockExecutorWithMetrics(smMetrics), ) - + offlineStateSyncHeight := int64(0) + if blockStore.Height() == 0 { + offlineStateSyncHeight, err = stateStore.GetOfflineStateSyncHeight() + if err != nil && err.Error() != "value empty" { + panic(fmt.Sprintf("failed to retrieve statesynced height from store %s; expected state store height to be %v", err, state.LastBlockHeight)) + } + } // Make BlockchainReactor. Don't start block sync if we're doing a state sync first. - bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger) + bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, blockSync && !stateSync, logger, offlineStateSyncHeight) if err != nil { return nil, fmt.Errorf("could not create blockchain reactor: %w", err) } @@ -826,10 +982,15 @@ func NewNode(config *cfg.Config, } else if blockSync { csMetrics.BlockSyncing.Set(1) } + consensusReactor, consensusState := createConsensusReactor( config, state, blockExec, blockStore, mempool, evidencePool, privValidator, csMetrics, stateSync || blockSync, eventBus, consensusLogger, ) + err = stateStore.SetOfflineStateSyncHeight(0) + if err != nil { + panic(fmt.Sprintf("failed to reset the offline state sync height %s", err)) + } // Set up state sync reactor, and schedule a sync if requested. // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, @@ -848,10 +1009,8 @@ func NewNode(config *cfg.Config, return nil, err } - // Setup Transport. transport, peerFilters := createTransport(config, nodeInfo, nodeKey, proxyApp) - // Setup Switch. p2pLogger := logger.With("module", "p2p") sw := createSwitch( config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, @@ -1048,15 +1207,23 @@ func (n *Node) OnStop() { } } if n.blockStore != nil { + n.Logger.Info("Closing blockstore") if err := n.blockStore.Close(); err != nil { n.Logger.Error("problem closing blockstore", "err", err) } } if n.stateStore != nil { + n.Logger.Info("Closing statestore") if err := n.stateStore.Close(); err != nil { n.Logger.Error("problem closing statestore", "err", err) } } + if n.evidencePool != nil { + n.Logger.Info("Closing evidencestore") + if err := n.EvidencePool().Close(); err != nil { + n.Logger.Error("problem closing evidencestore", "err", err) + } + } } // ConfigureRPC makes sure RPC has all the objects it needs to operate. diff --git a/p2p/README.md b/p2p/README.md index 99903f405d..bdde71d201 100644 --- a/p2p/README.md +++ b/p2p/README.md @@ -4,7 +4,7 @@ The p2p package provides an abstraction around peer-to-peer communication. Docs: -- [Connection](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/p2p/connection.md) for details on how connections and multiplexing work -- [Peer](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/p2p/node.md) for details on peer ID, handshakes, and peer exchange -- [Node](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/p2p/node.md) for details about different types of nodes and how they should work -- [Config](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/p2p/config.md) for details on some config option +- [Connection](../spec/p2p/legacy-docs/connection.md) for details on how connections and multiplexing work +- [Peer](../spec/p2p/legacy-docs/node.md) for details on peer ID, handshakes, and peer exchange +- [Node](../spec/p2p/legacy-docs/node.md) for details about different types of nodes and how they should work +- [Config](../spec/p2p/legacy-docs/config.md) for details on some config option diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go deleted file mode 100644 index b40d92e65a..0000000000 --- a/p2p/upnp/probe.go +++ /dev/null @@ -1,117 +0,0 @@ -package upnp - -import ( - "fmt" - "net" - "time" - - "github.com/cometbft/cometbft/libs/log" -) - -type Capabilities struct { - PortMapping bool - Hairpin bool -} - -func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) { - nat, err := Discover() - if err != nil { - return nil, nil, nil, fmt.Errorf("nat upnp could not be discovered: %v", err) - } - logger.Info("make upnp listener", "msg", log.NewLazySprintf("ourIP: %v", nat.(*upnpNAT).ourIP)) - - ext, err := nat.GetExternalAddress() - if err != nil { - return nat, nil, nil, fmt.Errorf("external address error: %v", err) - } - logger.Info("make upnp listener", "msg", log.NewLazySprintf("External address: %v", ext)) - - port, err := nat.AddPortMapping("tcp", extPort, intPort, "CometBFT UPnP Probe", 0) - if err != nil { - return nat, nil, ext, fmt.Errorf("port mapping error: %v", err) - } - logger.Info("make upnp listener", "msg", log.NewLazySprintf("Port mapping mapped: %v", port)) - - // also run the listener, open for all remote addresses. - listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) - if err != nil { - return nat, nil, ext, fmt.Errorf("error establishing listener: %v", err) - } - return nat, listener, ext, nil -} - -func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supportsHairpin bool) { - // Listener - go func() { - inConn, err := listener.Accept() - if err != nil { - logger.Info("test hair pin", "msg", log.NewLazySprintf("Listener.Accept() error: %v", err)) - return - } - logger.Info("test hair pin", - "msg", - log.NewLazySprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) - buf := make([]byte, 1024) - n, err := inConn.Read(buf) - if err != nil { - logger.Info("test hair pin", - "msg", - log.NewLazySprintf("Incoming connection read error: %v", err)) - return - } - logger.Info("test hair pin", - "msg", - log.NewLazySprintf("Incoming connection read %v bytes: %X", n, buf)) - if string(buf) == "test data" { - supportsHairpin = true - return - } - }() - - // Establish outgoing - outConn, err := net.Dial("tcp", extAddr) - if err != nil { - logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection dial error: %v", err)) - return - } - - n, err := outConn.Write([]byte("test data")) - if err != nil { - logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection write error: %v", err)) - return - } - logger.Info("test hair pin", "msg", log.NewLazySprintf("Outgoing connection wrote %v bytes", n)) - - // Wait for data receipt - time.Sleep(1 * time.Second) - return supportsHairpin -} - -func Probe(logger log.Logger) (caps Capabilities, err error) { - logger.Info("Probing for UPnP!") - - intPort, extPort := 8001, 8001 - - nat, listener, ext, err := makeUPNPListener(intPort, extPort, logger) - if err != nil { - return - } - caps.PortMapping = true - - // Deferred cleanup - defer func() { - if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { - logger.Error(fmt.Sprintf("Port mapping delete error: %v", err)) - } - if err := listener.Close(); err != nil { - logger.Error(fmt.Sprintf("Listener closing error: %v", err)) - } - }() - - supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger) - if supportsHairpin { - caps.Hairpin = true - } - - return -} diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go deleted file mode 100644 index 45da9d33cb..0000000000 --- a/p2p/upnp/upnp.go +++ /dev/null @@ -1,404 +0,0 @@ -// Taken from taipei-torrent. -// Just enough UPnP to be able to forward ports -// For more information, see: http://www.upnp-hacks.org/upnp.html -package upnp - -// TODO: use syscalls to get actual ourIP, see issue #712 - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io" - "net" - "net/http" - "strconv" - "strings" - "time" -) - -type upnpNAT struct { - serviceURL string - ourIP string - urnDomain string -} - -// protocol is either "udp" or "tcp" -type NAT interface { - GetExternalAddress() (addr net.IP, err error) - AddPortMapping( - protocol string, - externalPort, - internalPort int, - description string, - timeout int) (mappedExternalPort int, err error) - DeletePortMapping(protocol string, externalPort, internalPort int) (err error) -} - -func Discover() (nat NAT, err error) { - ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") - if err != nil { - return - } - conn, err := net.ListenPacket("udp4", ":0") - if err != nil { - return - } - socket := conn.(*net.UDPConn) - defer socket.Close() - - if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { - return nil, err - } - - st := "InternetGatewayDevice:1" - - buf := bytes.NewBufferString( - "M-SEARCH * HTTP/1.1\r\n" + - "HOST: 239.255.255.250:1900\r\n" + - "ST: ssdp:all\r\n" + - "MAN: \"ssdp:discover\"\r\n" + - "MX: 2\r\n\r\n") - message := buf.Bytes() - answerBytes := make([]byte, 1024) - for i := 0; i < 3; i++ { - _, err = socket.WriteToUDP(message, ssdp) - if err != nil { - return - } - var n int - _, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - return - } - for { - n, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - break - } - answer := string(answerBytes[0:n]) - if !strings.Contains(answer, st) { - continue - } - // HTTP header field names are case-insensitive. - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 - locString := "\r\nlocation:" - answer = strings.ToLower(answer) - locIndex := strings.Index(answer, locString) - if locIndex < 0 { - continue - } - loc := answer[locIndex+len(locString):] - endIndex := strings.Index(loc, "\r\n") - if endIndex < 0 { - continue - } - locURL := strings.TrimSpace(loc[0:endIndex]) - var serviceURL, urnDomain string - serviceURL, urnDomain, err = getServiceURL(locURL) - if err != nil { - return - } - var ourIP net.IP - ourIP, err = localIPv4() - if err != nil { - return - } - nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain} - return - } - } - err = errors.New("upnp port discovery failed") - return nat, err -} - -type Envelope struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` - Soap *SoapBody -} -type SoapBody struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"` - ExternalIP *ExternalIPAddressResponse -} - -type ExternalIPAddressResponse struct { - XMLName xml.Name `xml:"GetExternalIPAddressResponse"` - IPAddress string `xml:"NewExternalIPAddress"` -} - -type ExternalIPAddress struct { - XMLName xml.Name `xml:"NewExternalIPAddress"` - IP string -} - -type Service struct { - ServiceType string `xml:"serviceType"` - ControlURL string `xml:"controlURL"` -} - -type DeviceList struct { - Device []Device `xml:"device"` -} - -type ServiceList struct { - Service []Service `xml:"service"` -} - -type Device struct { - XMLName xml.Name `xml:"device"` - DeviceType string `xml:"deviceType"` - DeviceList DeviceList `xml:"deviceList"` - ServiceList ServiceList `xml:"serviceList"` -} - -type Root struct { - Device Device -} - -func getChildDevice(d *Device, deviceType string) *Device { - dl := d.DeviceList.Device - for i := 0; i < len(dl); i++ { - if strings.Contains(dl[i].DeviceType, deviceType) { - return &dl[i] - } - } - return nil -} - -func getChildService(d *Device, serviceType string) *Service { - sl := d.ServiceList.Service - for i := 0; i < len(sl); i++ { - if strings.Contains(sl[i].ServiceType, serviceType) { - return &sl[i] - } - } - return nil -} - -func localIPv4() (net.IP, error) { - tt, err := net.Interfaces() - if err != nil { - return nil, err - } - for _, t := range tt { - aa, err := t.Addrs() - if err != nil { - return nil, err - } - for _, a := range aa { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - v4 := ipnet.IP.To4() - if v4 == nil || v4[0] == 127 { // loopback address - continue - } - return v4, nil - } - } - return nil, errors.New("cannot find local IP address") -} - -func getServiceURL(rootURL string) (url, urnDomain string, err error) { - r, err := http.Get(rootURL) //nolint: gosec - if err != nil { - return - } - defer r.Body.Close() - - if r.StatusCode >= 400 { - err = errors.New(string(rune(r.StatusCode))) - return - } - var root Root - err = xml.NewDecoder(r.Body).Decode(&root) - if err != nil { - return - } - a := &root.Device - if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") { - err = errors.New("no InternetGatewayDevice") - return - } - b := getChildDevice(a, "WANDevice:1") - if b == nil { - err = errors.New("no WANDevice") - return - } - c := getChildDevice(b, "WANConnectionDevice:1") - if c == nil { - err = errors.New("no WANConnectionDevice") - return - } - d := getChildService(c, "WANIPConnection:1") - if d == nil { - // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice, - // instead of under WanConnectionDevice - d = getChildService(b, "WANIPConnection:1") - - if d == nil { - err = errors.New("no WANIPConnection") - return - } - } - // Extract the domain name, which isn't always 'schemas-upnp-org' - urnDomain = strings.Split(d.ServiceType, ":")[1] - url = combineURL(rootURL, d.ControlURL) - return url, urnDomain, err -} - -func combineURL(rootURL, subURL string) string { - protocolEnd := "://" - protoEndIndex := strings.Index(rootURL, protocolEnd) - a := rootURL[protoEndIndex+len(protocolEnd):] - rootIndex := strings.Index(a, "/") - return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL -} - -func soapRequest(url, function, message, domain string) (r *http.Response, err error) { - fullMessage := "" + - "\r\n" + - "" + message + "" - - req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") - req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") - // req.Header.Set("Transfer-Encoding", "chunked") - req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"") - req.Header.Set("Connection", "Close") - req.Header.Set("Cache-Control", "no-cache") - req.Header.Set("Pragma", "no-cache") - - // log.Stderr("soapRequest ", req) - - r, err = http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - /*if r.Body != nil { - defer r.Body.Close() - }*/ - - if r.StatusCode >= 400 { - // log.Stderr(function, r.StatusCode) - err = errors.New("error " + strconv.Itoa(r.StatusCode) + " for " + function) - r = nil - return - } - return r, err -} - -type statusInfo struct { - externalIPAddress string -} - -func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { - - message := "\r\n" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - var envelope Envelope - data, err := io.ReadAll(response.Body) - if err != nil { - return - } - reader := bytes.NewReader(data) - err = xml.NewDecoder(reader).Decode(&envelope) - if err != nil { - return - } - - info = statusInfo{envelope.Soap.ExternalIP.IPAddress} - - if err != nil { - return - } - - return info, err -} - -// GetExternalAddress returns an external IP. If GetExternalIPAddress action -// fails or IP returned is invalid, GetExternalAddress returns an error. -func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { - info, err := n.getExternalIPAddress() - if err != nil { - return - } - addr = net.ParseIP(info.externalIPAddress) - if addr == nil { - err = fmt.Errorf("failed to parse IP: %v", info.externalIPAddress) - } - return -} - -func (n *upnpNAT) AddPortMapping( - protocol string, - externalPort, - internalPort int, - description string, - timeout int) (mappedExternalPort int, err error) { - // A single concatenation would break ARM compilation. - message := "\r\n" + - "" + strconv.Itoa(externalPort) - message += "" + protocol + "" - message += "" + strconv.Itoa(internalPort) + "" + - "" + n.ourIP + "" + - "1" - message += description + - "" + strconv.Itoa(timeout) + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was forwarded - // log.Println(message, response) - // JAE: - // body, err := io.ReadAll(response.Body) - // fmt.Println(string(body), err) - mappedExternalPort = externalPort - _ = response - return mappedExternalPort, err -} - -func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - - message := "\r\n" + - "" + strconv.Itoa(externalPort) + - "" + protocol + "" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was deleted - // log.Println(message, response) - _ = response - return -} diff --git a/proto/README.md b/proto/README.md index fcce452a24..24c9131c21 100644 --- a/proto/README.md +++ b/proto/README.md @@ -1,41 +1,56 @@ -# Protocol Buffers - -This sections defines the types and messages shared across implementations. The -definition of the data structures are located in the -[core/data\_structures](../spec/core/data_structures.md) for the core data types -and ABCI definitions are located in the [ABCI](../spec/abci/README.md) section. - -## Process of Updates - -The `.proto` files within this section are core to the protocol and updates must -be treated as such. - -### Steps - -1. Make an issue with the proposed change. Within in the issue members from - both the CometBFT and tendermint-rs team will leave comments. If there is not - consensus on the change an [RFC](../docs/rfc/README.md) may be requested. - 1. Submission of an RFC as a pull request should be made to facilitate - further discussion. - 2. Merge the RFC. -2. Make the necessary changes to the `.proto` file(s), [core data - structures](../spec/core/data_structures.md) and/or [ABCI - protocol](../spec/abci). -3. Open issues within CometBFT and Tendermint-rs repos. This is used to notify - the teams that a change occurred in the spec. - 1. Tag the issue with a spec version label. This will notify the team the - changed has been made on master but has not entered a release. - -### Versioning - -The spec repo aims to be versioned. Once it has been versioned, updates to the -protobuf files will live on master. After a certain amount of time, decided on -by CometBFT and tendermint-rs team leads, a release will be made on the spec -repo. The spec may contain minor releases as well, depending on the -implementation these changes may lead to a breaking change. If so, the -implementation team should open an issue within the spec repo requiring a major -release of the spec. - -If the steps above were followed each implementation should have issues tagged -with a spec change label. Once all issues have been completed the team should -signify their readiness for release. + +# CometBFT v0.37.x Protocol Buffers Definitions + +This is the set of [Protobuf][protobuf] definitions of types used by various +parts of [CometBFT]: + +- The [Application Blockchain Interface][abci] (ABCI), especially in the context + of _remote_ applications. +- The P2P layer, in how CometBFT nodes interact with each other over the + network. +- In interaction with remote signers ("privval"). +- The RPC, in that the native JSON serialization of certain Protobuf types is + used when accepting and responding to RPC requests. +- The storage layer, in how data is serialized to and deserialized from on-disk + storage. + +The canonical Protobuf definitions live in the `proto` folder of the relevant +release branch of CometBFT. These definitions are published to the [Buf +registry][buf] for integrators' convenience. + +## Why does CometBFT use `tendermint` Protobuf definitions? + +This is as a result of CometBFT being a fork of [Tendermint Core][tmcore] and +wanting to provide integrators with as painless a way as possible of +transitioning from Tendermint Core to CometBFT. + +As of CometBFT v1, however, the project will transition to using and providing a +`cometbft` package of Protobuf definitions (see [\#1330]). + +## How are `tendermint` Protobuf definitions versioned? + +At present, the canonical source of Protobuf definitions for all CometBFT v0.x +releases is on each respective release branch. Each respective release's +Protobuf definitions are also, for convenience, published to a corresponding +branch in the `tendermint/tendermint` Buf repository. + +| CometBFT version | Canonical Protobufs | Buf registry | +|------------------|---------------------------------------------|-------------------------------------------| +| v0.38.x | [v0.38.x Protobuf definitions][v038-protos] | [Buf repository v0.38.x branch][v038-buf] | +| v0.37.x | [v0.37.x Protobuf definitions][v037-protos] | [Buf repository v0.37.x branch][v037-buf] | +| v0.34.x | [v0.34.x Protobuf definitions][v034-protos] | [Buf repository v0.34.x branch][v034-buf] | + +[protobuf]: https://protobuf.dev/ +[CometBFT]: https://github.com/cometbft/cometbft +[abci]: https://github.com/cometbft/cometbft/tree/v0.37.x/spec/abci +[buf]: https://buf.build/tendermint/tendermint +[tmcore]: https://github.com/tendermint/tendermint +[\#1330]: https://github.com/cometbft/cometbft/issues/1330 +[v034-protos]: https://github.com/cometbft/cometbft/tree/v0.34.x/proto +[v034-buf]: https://buf.build/tendermint/tendermint/docs/v0.34.x +[v037-protos]: https://github.com/cometbft/cometbft/tree/v0.37.x/proto +[v037-buf]: https://buf.build/tendermint/tendermint/docs/v0.37.x +[v038-protos]: https://github.com/cometbft/cometbft/tree/v0.38.x/proto +[v038-buf]: https://buf.build/tendermint/tendermint/docs/v0.38.x diff --git a/proto/buf.lock b/proto/buf.lock index f2b6936985..51b78ffe35 100644 --- a/proto/buf.lock +++ b/proto/buf.lock @@ -4,4 +4,5 @@ deps: - remote: buf.build owner: cosmos repository: gogo-proto - commit: 6652e3443c3b4504bb3bf82e73a7e409 + commit: 5e5b9fdd01804356895f8f79a6f1ddc1 + digest: shake256:0b85da49e2e5f9ebc4806eae058e2f56096ff3b1c59d1fb7c190413dd15f45dd456f0b69ced9059341c80795d2b6c943de15b120a9e0308b499e43e4b5fc2952 diff --git a/proto/buf.yaml b/proto/buf.yaml index c6e0660f14..a646c2030a 100644 --- a/proto/buf.yaml +++ b/proto/buf.yaml @@ -1,4 +1,5 @@ version: v1 +name: buf.build/tendermint/tendermint deps: - buf.build/cosmos/gogo-proto breaking: diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index c267a09591..e4e55a91c7 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -886,7 +886,7 @@ paths: required: true schema: type: string - example: "tx.height=1000" + example: '"tx.height=1000"' - in: query name: prove description: Include proofs of the transactions inclusion in the block @@ -949,7 +949,7 @@ paths: required: true schema: type: string - example: "block.height > 1000 AND valset.changed > 0" + example: '"block.height > 1000 AND valset.changed > 0"' - in: query name: page description: "Page number (1-based)" @@ -1065,7 +1065,7 @@ paths: required: true schema: type: string - example: "/a/b/c" + example: '"/a/b/c"' - in: query name: data description: Data @@ -2812,10 +2812,10 @@ components: properties: key: type: string - example: "YWN0aW9u" + example: "action" value: type: string - example: "c2VuZA==" + example: "send" index: type: boolean example: false diff --git a/scripts/qa/reporting/requirements.txt b/scripts/qa/reporting/requirements.txt index be1613b382..335c842fdf 100644 --- a/scripts/qa/reporting/requirements.txt +++ b/scripts/qa/reporting/requirements.txt @@ -5,7 +5,7 @@ kiwisolver==1.4.4 matplotlib==3.6.3 numpy==1.24.2 packaging==21.3 -Pillow==9.2.0 +Pillow==10.0.1 pyparsing==3.0.9 python-dateutil==2.8.2 six==1.16.0 diff --git a/spec/README.md b/spec/README.md index 921c68b7cb..61c4d3fc92 100644 --- a/spec/README.md +++ b/spec/README.md @@ -35,12 +35,12 @@ please submit them to our [bug bounty](https://cometbft.com/security)! ### P2P and Network Protocols -- [The Base P2P Layer](./p2p/node.md): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections -- [Peer Exchange (PEX)](./p2p/messages/pex.md): gossip known peer addresses so peers can find each other -- [Block Sync](./p2p/messages/block-sync.md): gossip blocks so peers can catch up quickly -- [Consensus](./p2p/messages/consensus.md): gossip votes and block parts so new blocks can be committed -- [Mempool](./p2p/messages/mempool.md): gossip transactions so they get included in blocks -- [Evidence](./p2p/messages/evidence.md): sending invalid evidence will stop the peer +- [The Base P2P Layer](./p2p/legacy-docs/node.md): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections +- [Peer Exchange (PEX)](./p2p/legacy-docs/messages/pex.md): gossip known peer addresses so peers can find each other +- [Block Sync](./p2p/legacy-docs/messages/block-sync.md): gossip blocks so peers can catch up quickly +- [Consensus](./p2p/legacy-docs/messages/consensus.md): gossip votes and block parts so new blocks can be committed +- [Mempool](./p2p/legacy-docs/messages/mempool.md): gossip transactions so they get included in blocks +- [Evidence](./p2p/legacy-docs/messages/evidence.md): sending invalid evidence will stop the peer ### RPC diff --git a/spec/abci/abci++_app_requirements.md b/spec/abci/abci++_app_requirements.md index 52c325d6c3..8d017553ef 100644 --- a/spec/abci/abci++_app_requirements.md +++ b/spec/abci/abci++_app_requirements.md @@ -582,10 +582,24 @@ These are the current consensus parameters (as of v0.37.x): The maximum size of a complete Protobuf encoded block. This is enforced by the consensus algorithm. -This implies a maximum transaction size that is this `MaxBytes`, less the expected size of +This implies a maximum transaction size that is `MaxBytes`, less the expected size of the header, the validator set, and any included evidence in the block. -Must have `0 < MaxBytes < 100 MB`. +The Application should be aware that honest validators _may_ produce and +broadcast blocks with up to the configured `MaxBytes` size. +As a result, the consensus +[timeout parameters](../../docs/core/configuration.md#consensus-timeouts-explained) +adopted by nodes should be configured so as to account for the worst-case +latency for the delivery of a full block with `MaxBytes` size to all validators. + +Must have `0 < MaxBytes <= 100 MB`. + +> Bear in mind that the default value for the `BlockParams.MaxBytes` consensus +> parameter accepts as valid blocks with size up to 21 MB. +> If the Application's use case does not need blocks of that size, +> or if the impact (specially on bandwidth consumption and block latency) +> of propagating blocks of that size was not evaluated, +> it is strongly recommended to wind down this default value. ##### BlockParams.MaxGas diff --git a/spec/abci/abci++_basic_concepts.md b/spec/abci/abci++_basic_concepts.md index 9643761e71..c0e6007783 100644 --- a/spec/abci/abci++_basic_concepts.md +++ b/spec/abci/abci++_basic_concepts.md @@ -24,7 +24,7 @@ title: Overview and basic concepts # Overview and basic concepts -## ABCI++ vs. ABCI +## ABCI 2.0 vs. ABCI [↑ Back to Outline](#outline) @@ -40,7 +40,7 @@ as the Application cannot require validators to do more than executing the trans finalized blocks. This includes features such as threshold cryptography, and guaranteed IBC connection attempts. -ABCI++ addresses these limitations by allowing the application to intervene at two key places of +ABCI 2.0 addresses these limitations by allowing the application to intervene at two key places of consensus execution: (a) at the moment a new proposal is to be created and (b) at the moment a proposal is to be validated. The new interface allows block proposers to perform application-dependent work in a block through the `PrepareProposal` method (a); and validators to perform application-dependent work @@ -53,7 +53,7 @@ We plan to extend this to allow applications to intervene at the moment a (preco The applications could then require their validators to do more than just validating blocks through the `ExtendVote` and `VerifyVoteExtension` methods. -## Method overview +## Methods overview [↑ Back to Outline](#outline) @@ -85,19 +85,21 @@ call sequences of these methods. can make changes to the raw proposal, such as modifying the set of transactions or the order in which they appear, and returns the (potentially) modified proposal, called *prepared proposal* in the `ResponsePrepareProposal` - call. The logic modifying the raw proposal can be non-deterministic. + call. + The logic modifying the raw proposal MAY be non-deterministic. - [**ProcessProposal:**](./abci++_methods.md#processproposal) It allows a validator to perform application-dependent work in a proposed block. This enables features such as immediate block execution, and allows the Application to reject invalid blocks. CometBFT calls it when it receives a proposal and _validValue_ is `nil`. - The Application cannot modify the proposal at this point but can reject it if it is + The Application cannot modify the proposal at this point but can reject it if invalid. If that is the case, the consensus algorithm will prevote `nil` on the proposal, which has strong liveness implications for CometBFT. As a general rule, the Application SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of the proposal is invalid (e.g., an invalid transaction); the Application can ignore the invalid part of the prepared proposal at block execution time. + The logic in `ProcessProposal` MUST be deterministic. - [**BeginBlock:**](./abci++_methods.md#beginblock) Is called exactly once after a block has been decided and executes once before all `DeliverTx` method calls. @@ -125,6 +127,7 @@ call sequences of these methods. CometBFT calls `ExtendVote` when the consensus algorithm is about to send a non-`nil` precommit message. If the Application does not have vote extension information to provide at that time, it returns a 0-length byte array as its vote extension. + The logic in `ExtendVote` MAY be non-deterministic. - [**VerifyVoteExtension:**](./abci++_methods.md#verifyvoteextension) It allows validators to validate the vote extension data attached to a precommit message. If the validation @@ -174,7 +177,7 @@ call sequences of these methods. State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying state machine (application) snapshots instead of replaying historical blocks. For more details, see the -[state sync documentation](../p2p/messages/state-sync.md). +[state sync documentation](../p2p/legacy-docs/messages/state-sync.md). New nodes discover and request snapshots from other nodes in the P2P network. A CometBFT node that receives a request for snapshots from a peer will call @@ -224,23 +227,26 @@ More details on managing state across connections can be found in the section on ## Proposal timeout -Immediate execution requires the Application to fully execute the prepared block -before returning from `PrepareProposal`, this means that CometBFT cannot make progress -during the block execution. -This stands on the consensus algorithm critical path: if the Application takes a long time -executing the block, the default value of *TimeoutPropose* might not be sufficient -to accommodate the long block execution time and non-proposer nodes might time -out and prevote `nil`. The proposal, in this case, will probably be rejected and a new round will be necessary. +`PrepareProposal` stands on the consensus algorithm critical path, +i.e., CometBFT cannot make progress while this method is being executed. +Hence, if the Application takes a long time preparing a proposal, +the default value of *TimeoutPropose* might not be sufficient +to accommodate the method's execution and validator nodes might time out and prevote `nil`. +The proposal, in this case, will probably be rejected and a new round will be necessary. - -Operators will need to adjust the default value of *TimeoutPropose* in CometBFT's configuration file, +Timeouts are automatically increased for each new round of a height and, if the execution of `PrepareProposal` is bound, eventually *TimeoutPropose* will be long enough to accommodate the execution of `PrepareProposal`. +However, relying on this self adaptation could lead to performance degradation and, therefore, +operators are suggested to adjust the initial value of *TimeoutPropose* in CometBFT's configuration file, in order to suit the needs of the particular application being deployed. +This is particularly important if applications implement *immediate execution*. +To implement this technique, proposers need to execute the block being proposed within `PrepareProposal`, which could take longer than *TimeoutPropose*. + ## Deterministic State-Machine Replication [↑ Back to Outline](#outline) -ABCI++ applications must implement deterministic finite-state machines to be +ABCI applications must implement deterministic finite-state machines to be securely replicated by the CometBFT consensus engine. This means block execution must be strictly deterministic: given the same ordered set of transactions, all nodes will compute identical responses, for all @@ -255,11 +261,13 @@ from block execution (`BeginBlock`, `DeliverTx`, `EndBlock`, and `Commit` calls) any other kind of request. This is the only way to ensure all nodes see the same transactions and compute the same results. -Some Applications may choose to implement immediate execution, which entails executing the blocks -that are about to be proposed (via `PrepareProposal`), and those that the Application is asked to -validate (via `ProcessProposal`). However, the state changes caused by processing those -proposed blocks must never replace the previous state until the block execution calls confirm -the block decided. +Applications that implement immediate execution (execute the blocks +that are about to be proposed, in `PrepareProposal`, or that require validation, in `ProcessProposal`) produce a new candidate state before a block is decided. +The state changes caused by processing those +proposed blocks must never replace the previous state until `FinalizeBlock` confirms +that the proposed block was decided and `Commit` is invoked for it. + +The same is true to Applications that quickly accept blocks and execute the blocks optimistically in parallel with the remaining consensus steps to save time during block execution (`BeginBlock`, `DeliverTx`, `EndBlock`, and `Commit` calls); they must only apply state changes in `Commit`. >```abnf ->proposer = [prepare-proposal process-proposal] +>proposer = [prepare-proposal [process-proposal]] >``` * Also for every round, if the local process is _not_ the proposer of the current round, CometBFT diff --git a/spec/abci/abci++_example_scenarios.md b/spec/abci/abci++_example_scenarios.md index 93fb6a7a6f..1a7de60b91 100644 --- a/spec/abci/abci++_example_scenarios.md +++ b/spec/abci/abci++_example_scenarios.md @@ -4,13 +4,13 @@ title: ABCI++ extra --- # Introduction -In the section [CometBFT's expected behaviour](./abci++_comet_expected_behavior.md#valid-method-call-sequences), -we presented the most common behaviour, usually referred to as the good case. -However, the grammar specified in the same section is more general and covers more scenarios -that an Application designer needs to account for. +In the section [CometBFT's expected behaviour](./abci++_comet_expected_behavior.md#valid-method-call-sequences), +we presented the most common behaviour, usually referred to as the good case. +However, the grammar specified in the same section is more general and covers more scenarios +that an Application designer needs to account for. -In this section, we give more information about these possible scenarios. We focus on methods -introduced by ABCI++: `PrepareProposal` and `ProcessProposal`. Specifically, we concentrate +In this section, we give more information about these possible scenarios. We focus on methods +introduced by ABCI++: `PrepareProposal` and `ProcessProposal`. Specifically, we concentrate on the part of the grammar presented below. ```abnf @@ -21,25 +21,27 @@ proposer = [prepare-proposal process-proposal] non-proposer = [process-proposal] ``` -We can see from the grammar that we can have several rounds before deciding a block. The reasons +We can see from the grammar that we can have several rounds before deciding a block. The reasons why one round may not be enough are: + * network asynchrony, and -* a Byzantine process being the proposer. +* a Byzantine process being the proposer. -If we assume that the consensus algorithm decides on block $X$ in round $r$, in the rounds +If we assume that the consensus algorithm decides on block $X$ in round $r$, in the rounds $r' <= r$, CometBFT can exhibit any of the following behaviours: -1. Call `PrepareProposal` and/or `ProcessProposal` for block $X$. +1. Call `PrepareProposal` and/or `ProcessProposal` for block $X$. 1. Call `PrepareProposal` and/or `ProcessProposal` for block $Y \neq X$. 1. Does not call `PrepareProposal` and/or `ProcessProposal`. -In the rounds when it is the proposer, CometBFT's `PrepareProposal` call is always followed by the -`ProcessProposal` call. The reason is that the process always delivers the proposal to itself, which -triggers the `ProcessProposal` call. +In the rounds in which the process is the proposer, CometBFT's `PrepareProposal` call is always followed by the +`ProcessProposal` call. The reason is that the process also broadcasts the proposal to itself, which is locally delivered and triggers the `ProcessProposal` call. +The proposal processed by `ProcessProposal` is the same as what was returned by any of the preceding `PrepareProposal` invoked for the same height and round. +While in the absence of restarts there is only one such preceding invocations, if the proposer restarts there could have been one extra invocation to `PrepareProposal` for each restart. -As the number of rounds the consensus algorithm needs to decide in a given run is a priori unknown, the -application needs to account for any number of rounds, where each round can exhibit any of these three -behaviours. Recall that the application is unaware of the internals of consensus and thus of the rounds. +As the number of rounds the consensus algorithm needs to decide in a given run is a priori unknown, the +application needs to account for any number of rounds, where each round can exhibit any of these three +behaviours. Recall that the application is unaware of the internals of consensus and thus of the rounds. # Possible scenarios The unknown number of rounds we can have when following the consensus algorithm yields a vast number of @@ -125,19 +127,20 @@ some process will propose block $X$ and if $p$ receives $2f+1$ $Precommit$ messa value. -## Scenario 3 +## Scenario 3 -$p$ calls `PrepareProposal` and `ProcessProposal` for many values, but decides on a value for which it did +$p$ calls `PrepareProposal` and `ProcessProposal` for many values, but decides on a value for which it did not call `PrepareProposal` or `ProcessProposal`. -In this scenario, in all rounds before $r$ we can have any round presented in [Scenario 1](#scenario-1) or +In this scenario, in all rounds before $r$ we can have any round presented in [Scenario 1](#scenario-1) or [Scenario 2](#scenario-2). What is important is that: -- no proposer proposed block $X$ or if it did, process $p$, due to asynchrony, did not receive it in time, + +* no proposer proposed block $X$ or if it did, process $p$, due to asynchrony, did not receive it in time, so it did not call `ProcessProposal`, and -- if $p$ was the proposer it proposed some other value $\neq X$. +* if $p$ was the proposer it proposed some other value $\neq X$. -### Round $r$: +### Round $r$: 1. **Propose:** A correct process is the proposer in this round, and it proposes block $X$. Due to asynchrony, the proposal message arrives to process $p$ after its $timeoutPropose$ @@ -158,4 +161,3 @@ rounds $0 <= r' <= r$ and that due to network asynchrony or Byzantine proposer, proposal before $timeoutPropose$ expires. As a result, it will enter round $r$ without calling `PrepareProposal` and `ProcessProposal` before it, and as shown in Round $r$ of [Scenario 3](#scenario-3) it will decide in this round. Again without calling any of these two calls. - diff --git a/spec/abci/abci++_methods.md b/spec/abci/abci++_methods.md index 2edd07080b..983f902246 100644 --- a/spec/abci/abci++_methods.md +++ b/spec/abci/abci++_methods.md @@ -346,7 +346,7 @@ title: Methods can be spoofed by adversaries, so applications should employ additional verification schemes to avoid denial-of-service attacks. The verified `AppHash` is automatically checked against the restored application at the end of snapshot restoration. - * For more information, see the `Snapshot` data type or the [state sync section](../p2p/messages/state-sync.md). + * For more information, see the `Snapshot` data type or the [state sync section](../p2p/legacy-docs/messages/state-sync.md). ### ApplySnapshotChunk @@ -390,7 +390,7 @@ title: Methods peers are available), it will reject the snapshot and try a different one via `OfferSnapshot`. The application should be prepared to reset and accept it or abort as appropriate. -## New methods introduced in ABCI++ +## New methods introduced in ABCI 2.0 ### PrepareProposal @@ -479,8 +479,7 @@ and _p_'s _validValue_ is `nil`: returns from the call. 3. The Application uses the information received (transactions, commit info, misbehavior, time) to (potentially) modify the proposal. - * the Application MAY fully execute the block and produce a candidate state — immediate - execution + * the Application MAY fully execute the block and produce a candidate state (immediate execution) * the Application can manipulate transactions: * leave transactions untouched * add new transactions (not present initially) to the proposal @@ -524,10 +523,12 @@ the consensus algorithm will use it as proposal and will not call `RequestPrepar * The Application may fully execute the block as though it was handling the calls to `BeginBlock-DeliverTx-EndBlock`. * However, any resulting state changes must be kept as _candidate state_, and the Application should be ready to discard it in case another block is decided. - * `RequestProcessProposal` is also called at the proposer of a round. The reason for this is to - inform the Application of the block header's hash, which cannot be done at `PrepareProposal` - time. In this case, the call to `RequestProcessProposal` occurs right after the call to - `RequestPrepareProposal`. + * `RequestProcessProposal` is also called at the proposer of a round. + Normally the call to `RequestProcessProposal` occurs right after the call to `RequestPrepareProposal` and + `RequestProcessProposal` matches the block produced based on `ResponsePrepareProposal` (i.e., + `RequestPrepareProposal.txs` equals `RequestProcessProposal.txs`). + However, no such guarantee is made since, in the presence of failures, `RequestProcessProposal` may match + `ResponsePrepareProposal` from an earlier invocation or `ProcessProposal` may not be invoked at all. * The height and time values match the values from the header of the proposed block. * If `ResponseProcessProposal.status` is `REJECT`, consensus assumes the proposal received is not valid. @@ -863,7 +864,7 @@ Most of the data structures used in ABCI are shared [common data structures](../ | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 5 | * **Usage**: - * Used for state sync snapshots, see the [state sync section](../p2p/messages/state-sync.md) for details. + * Used for state sync snapshots, see the [state sync section](../p2p/legacy-docs/messages/state-sync.md) for details. * A snapshot is considered identical across nodes only if _all_ fields are equal (including `Metadata`). Chunks may be retrieved from all nodes that have the same snapshot. * When sent across the network, a snapshot message can be at most 4 MB. diff --git a/spec/p2p/README.md b/spec/p2p/README.md new file mode 100644 index 0000000000..29efd8ecad --- /dev/null +++ b/spec/p2p/README.md @@ -0,0 +1,46 @@ +--- +order: 1 +parent: + title: P2P + order: 6 +--- + +# Peer-to-Peer Communication + +A CometBFT network is composed of multiple CometBFT instances, hereafter called +`nodes`, that interact by exchanging messages. + +The CometBFT protocols are designed under the assumption of a partially-connected network model. +This means that a node is not assumed to be directly connected to every other +node in the network. +Instead, each node is directly connected to only a subset of other nodes, +hereafter called its `peers`. + +The peer-to-peer (p2p) communication layer is then the component of CometBFT that: + +1. establishes connections between nodes in a CometBFT network +2. manages the communication between a node and the connected peers +3. intermediates the exchange of messages between peers in CometBFT protocols + +The specification the p2p layer is a work in progress, +tracked by [issue #19](https://github.com/cometbft/cometbft/issues/19). +The current content is organized as follows: + +- [`implementation`](./implementation/README.md): documents the current state + of the implementation of the p2p layer, covering the main components of the + `p2p` package. The documentation covers, in a fairly comprehensive way, + the items 1. and 2. from the list above. +- [`reactor-api`](./reactor-api/README.md): specifies the API offered by the + p2p layer to the protocol layer, through the `Reactor` abstraction. + This is a high-level specification (i.e., it should not be implementation-specific) + of the p2p layer API, covering item 3. from the list above. +- [`legacy-docs`](./legacy-docs/): We keep older documentation in + the `legacy-docs` directory, as overall, it contains useful information. + However, part of this content is redundant, + being more comprehensively covered in more recent documents, + and some implementation details might be outdated + (see [issue #981](https://github.com/cometbft/cometbft/issues/981)). + +In addition to this content, some unfinished, work in progress, and auxiliary +material can be found in the +[knowledge-base](https://github.com/cometbft/knowledge-base/tree/main/p2p) repository. diff --git a/spec/p2p/images/p2p-reactors.png b/spec/p2p/images/p2p-reactors.png new file mode 100644 index 0000000000..5515976c10 Binary files /dev/null and b/spec/p2p/images/p2p-reactors.png differ diff --git a/spec/p2p/images/p2p_state.png b/spec/p2p/images/p2p_state.png new file mode 100644 index 0000000000..c86d016826 Binary files /dev/null and b/spec/p2p/images/p2p_state.png differ diff --git a/spec/p2p/implementation/README.md b/spec/p2p/implementation/README.md new file mode 100644 index 0000000000..b32c95658e --- /dev/null +++ b/spec/p2p/implementation/README.md @@ -0,0 +1,38 @@ +# Implementation of the p2p layer + +This section documents the implementation of the peer-to-peer (p2p) +communication layer in CometBFT. + +The documentation was [produced](https://github.com/tendermint/tendermint/pull/9348) +using the `v0.34.*` releases +and the branch [`v0.34.x`](https://github.com/cometbft/cometbft/tree/v0.34.x) +of this repository as reference. +As there were no substancial changes in the p2p implementation, the +documentation also applies to the releases `v0.37.*` and `v0.38.*` [^v35]. + +[^v35]: The releases `v0.35.*` and `v0.36.*`, which included a major + refactoring of the p2p layer implementation, were [discontinued][v35postmorten]. + +[v35postmorten]: https://interchain-io.medium.com/discontinuing-tendermint-v0-35-a-postmortem-on-the-new-networking-layer-3696c811dabc + +## Contents + +The documentation follows the organization of the +[`p2p` package](https://github.com/cometbft/cometbft/tree/v0.34.x/p2p), +which implements the following abstractions: + +- [Transport](./transport.md): establishes secure and authenticated + connections with peers; +- [Switch](./switch.md): responsible for dialing peers and accepting + connections from peers, for managing established connections, and for + routing messages between the reactors and peers, + that is, between local and remote instances of the CometBFT protocols; +- [PEX Reactor](./pex.md): due to the several roles of this component, the + documentation is split in several parts: + - [Peer Exchange protocol](./pex-protocol.md): enables nodes to exchange peer addresses, thus implementing a peer discovery service; + - [Address Book](./addressbook.md): stores discovered peer addresses and + quality metrics associated to peers with which the node has interacted; + - [Peer Manager](./peer_manager.md): defines when and to which peers a node + should dial, in order to establish outbound connections; +- [Types](./types.md) and [Configuration](./configuration.md) provide a list of + existing types and configuration parameters used by the p2p package. diff --git a/spec/p2p/implementation/addressbook.md b/spec/p2p/implementation/addressbook.md new file mode 100644 index 0000000000..26b9504214 --- /dev/null +++ b/spec/p2p/implementation/addressbook.md @@ -0,0 +1,368 @@ +# Address Book + +The address book tracks information about peers, i.e., about other nodes in the network. + +The primary information stored in the address book are peer addresses. +A peer address is composed by a node ID and a network address; a network +address is composed by an IP address or a DNS name plus a port number. +The same node ID can be associated to multiple network addresses. + +There are two sources for the addresses stored in the address book. +The [Peer Exchange protocol](./pex-protocol.md) stores in the address book +the peer addresses it discovers, i.e., it learns from connected peers. +And the [Switch](./switch.md) registers the addresses of peers with which it +has interacted: to which it has dialed or from which it has accepted a +connection. + +The address book also records additional information about peers with which the +node has interacted, from which is possible to rank peers. +The Switch reports [connection attempts](#dial-attempts) to a peer address; too +much failed attempts indicate that a peer address is invalid. +Reactors, in they turn, report a peer as [good](#good-peers) when it behaves as +expected, or as a [bad peer](#bad-peers), when it misbehaves. + +There are two entities that retrieve peer addresses from the address book. +The [Peer Manager](./peer_manager.md) retrieves peer addresses to dial, so to +establish outbound connections. +This selection is random, but has a configurable bias towards peers that have +been marked as good peers. +The [Peer Exchange protocol](./pex-protocol.md) retrieves random samples of +addresses to offer (send) to peers. +This selection is also random but it includes, in particular for nodes that +operate in seed mode, some bias toward peers marked as good ones. + +## Buckets + +Peer addresses are stored in buckets. +There are buckets for new addresses and buckets for old addresses. +The buckets for new addresses store addresses of peers about which the node +does not have much information; the first address registered for a peer ID is +always stored in a bucket for new addresses. +The buckets for old addresses store addresses of peers with which the node has +interacted and that were reported as [good peers](#good-peers) by a reactor. +An old address therefore can be seen as an alias for a good address. + +> Note that new addresses does not mean bad addresses. +> The addresses of peers marked as [bad peers](#bad-peers) are removed from the +> buckets where they are stored, and temporarily kept in a table of banned peers. + +The number of buckets is fixed and there are more buckets for new addresses +(`256`) than buckets for old addresses (`64`), a ratio of 4:1. +Each bucket can store up to `64` addresses. +When a bucket becomes full, the peer address with the lowest ranking is removed +from the bucket. +The first choice is to remove bad addresses, with multiple failed attempts +associated. +In the absence of those, the *oldest* address in the bucket is removed, i.e., +the address with the oldest last attempt to dial. + +When a bucket for old addresses becomes full, the lowest-ranked peer address in +the bucket is moved to a bucket of new addresses. +When a bucket for new addresses becomes full, the lowest-ranked peer address in +the bucket is removed from the address book. +In other words, exceeding old or good addresses are downgraded to new +addresses, while exceeding new addresses are dropped. + +The bucket that stores an `address` is defined by the following two methods, +for new and old addresses: + +- `calcNewBucket(address, source) = hash(key + groupKey(source) + hash(key + groupKey(address) + groupKey(source)) % newBucketsPerGroup) % newBucketCount` +- `calcOldBucket(address) = hash(key + groupKey(address) + hash(key + address) % oldBucketsPerGroup) % oldBucketCount` + +The `key` is a fixed random 96-bit (8-byte) string. +The `groupKey` for an address is a string representing its network group. +The `source` of an address is the address of the peer from which we learn the +address.. +The first (internal) hash is reduced to an integer up to `newBucketsPerGroup = +32`, for new addresses, and `oldBucketsPerGroup = 4`, for old addresses. +The second (external) hash is reduced to bucket indexes, in the interval from 0 +to the number of new (`newBucketCount = 256`) or old (`oldBucketCount = 64`) buckets. + +Notice that new addresses with sources from the same network group are more +likely to end up in the same bucket, therefore to competing for it. +For old address, instead, two addresses are more likely to end up in the same +bucket when they belong to the same network group. + +## Adding addresses + +The `AddAddress` method adds the address of a peer to the address book. + +The added address is associated to a *source* address, which identifies the +node from which the peer address was learned. + +Addresses are added to the address book in the following situations: + +1. When a peer address is learned via PEX protocol, having the sender + of the PEX message as its source +2. When an inbound peer is added, in this case the peer itself is set as the + source of its own address +3. When the switch is instructed to dial addresses via the `DialPeersAsync` + method, in this case the node itself is set as the source + +If the added address contains a node ID that is not registered in the address +book, the address is added to a [bucket](#buckets) of new addresses. +Otherwise, the additional address for an existing node ID is **not added** to +the address book when: + +- The last address added with the same node ID is stored in an old bucket, so + it is considered a "good" address +- There are addresses associated to the same node ID stored in + `maxNewBucketsPerAddress = 4` distinct buckets +- Randomly, with a probability that increases exponentially with the number of + buckets in which there is an address with the same node ID. + So, a new address for a node ID which is already present in one bucket is + added with 50% of probability; if the node ID is present in two buckets, the + probability decreases to 25%; and if it is present in three buckets, the + probability is 12.5%. + +The new address is also added to the `addrLookup` table, which stores +`knownAddress` entries indexed by their node IDs. +If the new address is from an unknown peer, a new entry is added to the +`addrLookup` table; otherwise, the existing entry is updated with the new +address. +Entries of this table contain, among other fields, the list of buckets where +addresses of a peer are stored. +The `addrLookup` table is used by most of the address book methods (e.g., +`HasAddress`, `IsGood`, `MarkGood`, `MarkAttempt`), as it provides fast access +to addresses. + +### Errors + +- if the added address or the associated source address are nil +- if the added address is invalid +- if the added address is the local node's address +- if the added address ID is of a [banned](#bad-peers) peer +- if either the added address or the associated source address IDs are configured as private IDs +- if `routabilityStrict` is set and the address is not routable +- in case of failures computing the bucket for the new address (`calcNewBucket` method) +- if the added address instance, which is a new address, is configured as an + old address (sanity check of `addToNewBucket` method) + +## Need for Addresses + +The `NeedMoreAddrs` method verifies whether the address book needs more addresses. + +It is invoked by the PEX reactor to define whether to request peer addresses +to a new outbound peer or to a randomly selected connected peer. + +The address book needs more addresses when it has less than `1000` addresses +registered, counting all buckets for new and old addresses. + +## Pick address + +The `PickAddress` method returns an address stored in the address book, chosen +at random with a configurable bias towards new addresses. + +It is invoked by the Peer Manager to obtain a peer address to dial, as part of +its `ensurePeers` routine. +The bias starts from 10%, when the peer has no outbound peers, increasing by +10% for each outbound peer the node has, up to 90%, when the node has at least +8 outbound peers. + +The configured bias is a parameter that influences the probability of choosing +an address from a bucket of new addresses or from a bucket of old addresses. +A second parameter influencing this choice is the number of new and old +addresses stored in the address book. +In the absence of bias (i.e., if the configured bias is 50%), the probability +of picking a new address is given by the square root of the number of new +addresses divided by the sum of the square roots of the numbers of new and old +addresses. +By adding a bias toward new addresses (i.e., configured bias larger than 50%), +the portion on the sample occupied by the square root of the number of new +addresses increases, while the corresponding portion for old addresses decreases. +As a result, it becomes more likely to pick a new address at random from this sample. + +> The use of the square roots softens the impact of disproportional numbers of +> new and old addresses in the address book. This is actually the expected +> scenario, as there are 4 times more buckets for new addresses than buckets +> for old addresses. + +Once the type of address, new or old, is defined, a non-empty bucket of this +type is selected at random. +From the selected bucket, an address is chosen at random and returned. +If all buckets of the selected type are empty, no address is returned. + +## Random selection + +The `GetSelection` method returns a selection of addresses stored in the +address book, with no bias toward new or old addresses. + +It is invoked by the PEX protocol to obtain a list of peer addresses with two +purposes: + +- To send to a peer in a PEX response, in the case of outbound peers or of + nodes not operating in seed mode +- To crawl, in the case of nodes operating in seed mode, as part of every + interaction of the `crawlPeersRoutine` + +The selection is a random subset of the peer addresses stored in the +`addrLookup` table, which stores the last address added for each peer ID. +The target size of the selection is `23%` (`getSelectionPercent`) of the +number of addresses stored in the address book, but it should not be lower than +`32` (`minGetSelection`) --- if it is, all addresses in the book are returned +--- nor greater than `250` (`maxGetSelection`). + +> The random selection is produced by: +> +> - Retrieving all entries of the `addrLookup` map, which by definition are +> returned in random order. +> - Randomly shuffling the retrieved list, using the Fisher-Yates algorithm + +## Random selection with bias + +The `GetSelectionWithBias` method returns a selection of addresses stored in +the address book, with bias toward new addresses. + +It is invoked by the PEX protocol to obtain a list of peer addresses to be sent +to a peer in a PEX response. +This method is only invoked by seed nodes, when replying to a PEX request +received from an inbound peer (i.e., a peer that dialed the seed node). +The bias used in this scenario is hard-coded to 30%, meaning that 70% of +the returned addresses are expected to be old addresses. + +The number of addresses that compose the selection is computed in the same way +as for the non-biased random selection. +The bias toward new addresses is implemented by requiring that the configured +bias, interpreted as a percentage, of the select addresses come from buckets of +new addresses, while the remaining come from buckets of old addresses. +Since the number of old addresses is typically lower than the number of new +addresses, it is possible that the address book does not have enough old +addresses to include in the selection. +In this case, additional new addresses are included in the selection. +Thus, the configured bias, in practice, is towards old addresses, not towards +new addresses. + +To randomly select addresses of a type, the address book considers all +addresses present in every bucket of that type. +This list of all addresses of a type is randomly shuffled, and the requested +number of addresses are retrieved from the tail of this list. +The returned selection contains, at its beginning, a random selection of new +addresses in random order, followed by a random selection of old addresses, in +random order. + +## Dial Attempts + +The `MarkAttempt` method records a failed attempt to connect to an address. + +It is invoked by the Peer Manager when it fails dialing a peer, but the failure +is not in the authentication step (`ErrSwitchAuthenticationFailure` error). +In case of authentication errors, the peer is instead marked as a [bad peer](#bad-peers). + +The failed connection attempt is recorded in the address registered for the +peer's ID in the `addrLookup` table, which is the last address added with that ID. +The known address' counter of failed `Attempts` is increased and the failure +time is registered in `LastAttempt`. + +The possible effect of recording multiple failed connect attempts to a peer is +to turn its address into a *bad* address (do not confuse with banned addresses). +A known address becomes bad if it is stored in buckets of new addresses, and +when connection attempts: + +- Have not been made over a week, i.e., `LastAttempt` is older than a week +- Have failed 3 times and never succeeded, i.e., `LastSucess` field is unset +- Have failed 10 times in the last week, i.e., `LastSucess` is older than a week + +Addresses marked as *bad* are the first candidates to be removed from a bucket of +new addresses when the bucket becomes full. + +> Note that failed connection attempts are reported for a peer address, but in +> fact the address book records them for a peer. +> +> More precisely, failed connection attempts are recorded in the entry of the +> `addrLookup` table with reported peer ID, which contains the last address +> added for that node ID, which is not necessarily the reported peer address. + +## Good peers + +The `MarkGood` method marks a peer ID as good. + +It is invoked by the consensus reactor, via switch, when the number of useful +messages received from a peer is a multiple of `10000`. +Vote and block part messages are considered for this number, they must be valid +and not be duplicated messages to be considered useful. + +> The `SwitchReporter` type of `behaviour` package also invokes the `MarkGood` +> method when a "reason" associated with consensus votes and block parts is +> reported. +> No reactor, however, currently provides these "reasons" to the `SwitchReporter`. + +The effect of this action is that the address registered for the peer's ID in the +`addrLookup` table, which is the last address added with that ID, is marked as +good and moved to a bucket of old addresses. +An address marked as good has its failed to connect counter and timestamp reset. +If the destination bucket of old addresses is full, the oldest address in the +bucket is moved (downgraded) to a bucket of new addresses. + +Moving the peer address to a bucket of old addresses has the effect of +upgrading, or increasing the ranking of a peer in the address book. + +## Bad peers + +The `MarkBad` method marks a peer as bad and bans it for a period of time. + +This method is only invoked within the PEX reactor, with a banning time of 24 +hours, for the following reasons: + +- A peer misbehaves in the [PEX protocol](./pex-protocol.md#misbehavior) +- When the `maxAttemptsToDial` limit (`16`) is reached for a peer +- If an `ErrSwitchAuthenticationFailure` error is returned when dialing a peer + +The effect of this action is that the address registered for the peer's ID in the +`addrLookup` table, which is the last address added with that ID, is banned for +a period of time. +The banned peer is removed from the `addrLookup` table and from all buckets +where its addresses are stored. + +The information about banned peers, however, is not discarded. +It is maintained in the `badPeers` map, indexed by peer ID. +This allows, in particular, addresses of banned peers to be +[reinstated](#reinstating-addresses), i.e., to be added +back to the address book, when their ban period expires. + +## Reinstating addresses + +The `ReinstateBadPeers` method attempts to re-add banned addresses to the address book. + +It is invoked by the PEX reactor when dialing new peers. +This action is taken before requesting additional addresses to peers, +in the case that the node needs more peer addresses. + +The set of banned peer addresses is retrieved from the `badPeers` map. +Addresses that are not any longer banned, i.e., whose banned period has expired, +are added back to the address book as new addresses, while the corresponding +node IDs are removed from the `badPeers` map. + +## Removing addresses + +The `RemoveAddress` method removes an address from the address book. + +It is invoked by the switch when it dials a peer or accepts a connection from a +peer that ends up being the node itself (`IsSelf` error). +In both cases, the address dialed or accepted is also added to the address book +as a local address, via the `AddOurAddress` method. + +The same logic is also internally used by the address book for removing +addresses of a peer that is [marked as a bad peer](#bad-peers). + +The entry registered with the peer ID of the address in the `addrLookup` table, +which is the last address added with that ID, is removed from all buckets where +it is stored and from the `addrLookup` table. + +> FIXME: is it possible that addresses with the same ID as the removed address, +> but with distinct network addresses, are kept in buckets of the address book? +> While they will not be accessible anymore, as there is no reference to them +> in the `addrLookup`, they will still be there. + +## Persistence + +The `loadFromFile` method, called when the address book is started, reads +address book entries from a file, passed to the address book constructor. +The file, at this point, does not need to exist. + +The `saveRoutine` is started when the address book is started. +It saves the address book to the configured file every `dumpAddressInterval`, +hard-coded to 2 minutes. +It is also possible to save the content of the address book using the `Save` +method. +Saving the address book content to a file acquires the address book lock, also +employed by all other public methods. diff --git a/spec/p2p/implementation/configuration.md b/spec/p2p/implementation/configuration.md new file mode 100644 index 0000000000..9f172c22c8 --- /dev/null +++ b/spec/p2p/implementation/configuration.md @@ -0,0 +1,49 @@ +# CometBFT p2p configuration + +This document contains configurable parameters a node operator can use to tune the p2p behaviour. + +| Parameter| Default| Description | +| --- | --- | ---| +| ListenAddress | "tcp://0.0.0.0:26656" | Address to listen for incoming connections (0.0.0.0:0 means any interface, any port) | +| ExternalAddress | "" | Address to advertise to peers for them to dial | +| [Seeds](./pex-protocol.md#seed-nodes) | empty | Comma separated list of seed nodes to connect to (ID@host:port )| +| [Persistent peers](./peer_manager.md#persistent-peers) | empty | Comma separated list of nodes to keep persistent connections to (ID@host:port ) | +| [AddrBook](./addressbook.md) | defaultAddrBookPath | Path do address book | +| AddrBookStrict | true | Set true for strict address routability rules and false for private or local networks | +| [MaxNumInboundPeers](./switch.md#accepting-peers) | 40 | Maximum number of inbound peers | +| [MaxNumOutboundPeers](./peer_manager.md#ensure-peers) | 10 | Maximum number of outbound peers to connect to, excluding persistent peers | +| [UnconditionalPeers](./switch.md#accepting-peers) | empty | These are IDs of the peers which are allowed to be (re)connected as both inbound or outbound regardless of whether the node reached `max_num_inbound_peers` or `max_num_outbound_peers` or not. | +| PersistentPeersMaxDialPeriod| 0 * time.Second | Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) | +| FlushThrottleTimeout |100 * time.Millisecond| Time to wait before flushing messages out on the connection | +| MaxPacketMsgPayloadSize | 1024 | Maximum size of a message packet payload, in bytes | +| SendRate | 5120000 (5 mB/s) | Rate at which packets can be sent, in bytes/second | +| RecvRate | 5120000 (5 mB/s) | Rate at which packets can be received, in bytes/second| +| [PexReactor](./pex.md) | true | Set true to enable the peer-exchange reactor | +| SeedMode | false | Seed mode, in which node constantly crawls the network and looks for. Does not work if the peer-exchange reactor is disabled. | +| PrivatePeerIDs | empty | Comma separated list of peer IDsthat we do not add to the address book or gossip to other peers. They stay private to us. | +| AllowDuplicateIP | false | Toggle to disable guard against peers connecting from the same ip.| +| [HandshakeTimeout](./transport.md#connection-upgrade) | 20 * time.Second | Timeout for handshake completion between peers | +| [DialTimeout](./switch.md#dialing-peers) | 3 * time.Second | Timeout for dialing a peer | + + +These parameters can be set using the `$CMTHOME/config/config.toml` file. A subset of them can also be changed via command line using the following command line flags: + +| Parameter | Flag | Example | +| --- | --- | --- | +| Listen address| `p2p.laddr` | "tcp://0.0.0.0:26656" | +| Seed nodes | `p2p.seeds` | `--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` | +| Persistent peers | `p2p.persistent_peers` | `--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` | +| Unconditional peers | `p2p.unconditional_peer_ids` | `--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` | +| PexReactor | `p2p.pex` | `--p2p.pex` | +| Seed mode | `p2p.seed_mode` | `--p2p.seed_mode` | +| Private peer ids | `p2p.private_peer_ids` | `--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` | + + **Note on persistent peers** + + If `persistent_peers_max_dial_period` is set greater than zero, the +pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period` +during exponential backoff and we keep trying again without giving up. + +If `seeds` and `persistent_peers` intersect, +the user will be warned that seeds may auto-close connections +and that the node may not be able to keep the connection persistent. diff --git a/spec/p2p/implementation/peer_manager.md b/spec/p2p/implementation/peer_manager.md new file mode 100644 index 0000000000..4f83cc6dfd --- /dev/null +++ b/spec/p2p/implementation/peer_manager.md @@ -0,0 +1,147 @@ +# Peer Manager + +The peer manager is responsible for establishing connections with peers. +It defines when a node should dial peers and which peers it should dial. +The peer manager is not an implementation abstraction of the p2p layer, +but a role that is played by the [PEX reactor](./pex.md). + +## Outbound peers + +The `ensurePeersRoutine` is a persistent routine intended to ensure that a node +is connected to `MaxNumOutboundPeers` outbound peers. +This routine is continuously executed by regular nodes, i.e. nodes not +operating in seed mode, as part of the PEX reactor implementation. + +The logic defining when the node should dial peers, for selecting peers to dial +and for actually dialing them is implemented in the `ensurePeers` method. +This method is periodically invoked -- every `ensurePeersPeriod`, with default +value to 30 seconds -- by the `ensurePeersRoutine`. + +A node is expected to dial peers whenever the number of outbound peers is lower +than the configured `MaxNumOutboundPeers` parameter. +The current number of outbound peers is retrieved from the switch, using the +`NumPeers` method, which also reports the number of nodes to which the switch +is currently dialing. +If the number of outbound peers plus the number of dialing routines equals to +`MaxNumOutboundPeers`, nothing is done. +Otherwise, the `ensurePeers` method will attempt to dial node addresses in +order to reach the target number of outbound peers. + +Once defined that the node needs additional outbound peers, the node queries +the address book for candidate addresses. +This is done using the [`PickAddress`](./addressbook.md#pick-address) method, +which returns an address selected at random on the address book, with some bias +towards new or old addresses. +When the node has up to 3 outbound peers, the adopted bias is towards old +addresses, i.e., addresses of peers that are believed to be "good". +When the node has from 5 outbound peers, the adopted bias is towards new +addresses, i.e., addresses of peers about which the node has not yet collected +much information. +So, the more outbound peers a node has, the less conservative it will be when +selecting new peers. + +The selected peer addresses are then dialed in parallel, by starting a dialing +routine per peer address. +Dialing a peer address can fail for multiple reasons. +The node might have attempted to dial the peer too many times. +In this case, the peer address is marked as bad and removed from the address book. +The node might have attempted and failed to dial the peer recently +and the exponential `backoffDuration` has not yet passed. +Or the current connection attempt might fail, which is registered in the address book. +None of these errors are explicitly handled by the `ensurePeers` method, which +also does not wait until the connections are established. + +The third step of the `ensurePeers` method is to ensure that the address book +has enough addresses. +This is done, first, by [reinstating banned peers](./addressbook.md#Reinstating-addresses) +whose ban period has expired. +Then, the node randomly selects a connected peer, which can be either an +inbound or outbound peer, to [requests addresses](./pex-protocol.md#Requesting-Addresses) +using the PEX protocol. +Last, and this action is only performed if the node could not retrieve any new +address to dial from the address book, the node dials the configured seed nodes +in order to establish a connection to at least one of them. + +### Fast dialing + +As above described, seed nodes are actually the last source of peer addresses +for regular nodes. +They are contacted by a node when, after an invocation of the `ensurePeers` +method, no suitable peer address to dial is retrieved from the address book +(e.g., because it is empty). + +Once a connection with a seed node is established, the node immediately +[sends a PEX request](./pex-protocol.md#Requesting-Addresses) to it, as it is +added as an outbound peer. +When the corresponding PEX response is received, the addresses provided by the +seed node are added to the address book. +As a result, in the next invocation of the `ensurePeers` method, the node +should be able to dial some of the peer addresses provided by the seed node. + +However, as observed in this [issue](https://github.com/tendermint/tendermint/issues/2093), +it can take some time, up to `ensurePeersPeriod` or 30 seconds, from when the +node receives new peer addresses and when it dials the received addresses. +To avoid this delay, which can be particularly relevant when the node has no +peers, a node immediately attempts to dial peer addresses when they are +received from a peer that is locally configured as a seed node. + +> FIXME: The current logic was introduced in [#3762](https://github.com/tendermint/tendermint/pull/3762). +> Although it fix the issue, the delay between receiving an address and dialing +> the peer, it does not impose and limit on how many addresses are dialed in this +> scenario. +> So, all addresses received from a seed node are dialed, regardless of the +> current number of outbound peers, the number of dialing routines, or the +> `MaxNumOutboundPeers` parameter. +> +> Issue [#9548](https://github.com/tendermint/tendermint/issues/9548) was +> created to handle this situation. + +### First round + +When the PEX reactor is started, the `ensurePeersRoutine` is created and it +runs thorough the operation of a node, periodically invoking the `ensurePeers` +method. +However, if when the persistent routine is started the node already has some +peers, either inbound or outbound peers, or is dialing some addresses, the +first invocation of `ensurePeers` is delayed by a random amount of time from 0 +to `ensurePeersPeriod`. + +### Persistent peers + +The node configuration can contain a list of *persistent peers*. +Those peers have preferential treatment compared to regular peers and the node +is always trying to connect to them. +Moreover, these peers are not removed from the address book in the case of +multiple failed dial attempts. + +On startup, the node immediately tries to dial the configured persistent peers +by calling the switch's [`DialPeersAsync`](./switch.md#manual-operation) method. +This is not done in the p2p package, but it is part of the procedure to set up a node. + +> TODO: the handling of persistent peers should be described in more detail. + +### Life cycle + +The picture below is a first attempt of illustrating the life cycle of an outbound peer: + + + +A peer can be in the following states: + +- Candidate peers: peer addresses stored in the address boook, that can be + retrieved via the [`PickAddress`](./addressbook.md#pick-address) method +- [Dialing](./switch.md#dialing-peers): peer addresses that are currently being + dialed. This state exists to ensure that a single dialing routine exist per peer. +- [Reconnecting](./switch.md#reconnect-to-peer): persistent peers to which a node + is currently reconnecting, as a previous connection attempt has failed. +- Connected peers: peers that a node has successfully dialed, added as outbound peers. +- [Bad peers](./addressbook.md#bad-peers): peers marked as bad in the address + book due to exhibited [misbehavior](./pex-protocol.md#misbehavior). + Peers can be reinstated after being marked as bad. + +## Pending of documentation + +The `dialSeeds` method of the PEX reactor. + +The `dialPeer` method of the PEX reactor. +This includes `dialAttemptsInfo`, `maxBackoffDurationForPeer` methods. diff --git a/spec/p2p/implementation/pex-protocol.md b/spec/p2p/implementation/pex-protocol.md new file mode 100644 index 0000000000..760a56bd9d --- /dev/null +++ b/spec/p2p/implementation/pex-protocol.md @@ -0,0 +1,240 @@ +# Peer Exchange Protocol + +The Peer Exchange (PEX) protocol enables nodes to exchange peer addresses, thus +implementing a peer discovery mechanism. + +The PEX protocol uses two messages: + +- `PexRequest`: sent by a node to [request](#requesting-addresses) peer + addresses to a peer +- `PexAddrs`: a list of peer addresses [provided](#providing-addresses) to a + peer as response to a `PexRequest` message + +While all nodes, with few exceptions, participate on the PEX protocol, +a subset of nodes, configured as [seed nodes](#seed-nodes) have a particular +role in the protocol. +They crawl the network, connecting to random peers, in order to learn as many +peer addresses as possible to provide to other nodes. + +## Requesting Addresses + +A node requests peer addresses by sending a `PexRequest` message to a peer. + +For regular nodes, not operating in seed mode, a PEX request is sent when +the node *needs* peers addresses, a condition checked: + +1. When an *outbound* peer is added, causing the node to request addresses from + the new peer +2. Periodically, by the `ensurePeersRoutine`, causing the node to request peer + addresses to a randomly selected peer + +A node needs more peer addresses when its addresses book has +[less than 1000 records](./addressbook.md#need-for-addresses). +It is thus reasonable to assume that the common case is that a peer needs more +peer addresses, so that PEX requests are sent whenever the above two situations happen. + +A PEX request is sent when a new *outbound* peer is added. +The same does not happen with new inbound peers because the implementation +considers outbound peers, that the node has chosen for dialing, more +trustworthy than inbound peers, that the node has accepted. +Moreover, when a node is short of peer addresses, it dials the configured seed nodes; +since they are added as outbound peers, the node can immediately request peer addresses. + +The `ensurePeersRoutine` periodically checks, by default every 30 seconds (`ensurePeersPeriod`), +whether the node has enough outbound peers. +If it does not have, the node tries dialing some peer addresses stored in the address book. +As part of this procedure, the node selects a peer at random, +from the set of connected peers retrieved from the switch, +and sends a PEX request to the selected peer. + +Sending a PEX request to a peer is implemented by the `RequestAddrs` method of +the PEX reactor. + +### Responses + +After a PEX request is sent to a peer, the node expects to receive, +as a response, a `PexAddrs` message from the peer. +This message encodes a list of peer addresses that are +[added to address book](./addressbook.md#adding-addresses), +having the peer from which the PEX response was received as their source. + +Received PEX responses are handled by the `ReceiveAddrs` method of the PEX reactor. +In the case of a PEX response received from a peer which is configured as +a seed node, the PEX reactor attempts immediately to dial the provided peer +addresses, as detailed [here](./peer_manager.md#fast-dialing). + +### Misbehavior + +Sending multiple PEX requests to a peer, before receiving a reply from it, +is considered a misbehavior. +To prevent it, the node maintains a `requestsSent` set of outstanding +requests, indexed by destination peers. +While a peer ID is present in the `requestsSent` set, the node does not send +further PEX requests to that peer. +A peer ID is removed from the `requestsSent` set when a PEX response is +received from it. + +Sending a PEX response to a peer that has not requested peer addresses +is also considered a misbehavior. +So, if a PEX response is received from a peer that is not registered in +the `requestsSent` set, a `ErrUnsolicitedList` error is produced. +This leads the peer to be disconnected and [marked as a bad peer](./addressbook.md#bad-peers). + +## Providing Addresses + +When a node receives a `PexRequest` message from a peer, +it replies with a `PexAddrs` message. + +This message encodes a [random selection of peer addresses](./addressbook.md#random-selection) +retrieved from the address book. + +Sending a PEX response to a peer is implemented by the `SendAddrs` method of +the PEX reactor. + +### Misbehavior + +Requesting peer addresses too often is considered a misbehavior. +Since node are expected to send PEX requests every `ensurePeersPeriod`, +the minimum accepted interval between requests from the same peer is set +to `ensurePeersPeriod / 3`, 10 seconds by default. + +The `receiveRequest` method is responsible for verifying this condition. +The node keeps a `lastReceivedRequests` map with the time of the last PEX +request received from every peer. +If the interval between successive requests is less than the minimum accepted +one, the peer is disconnected and [marked as a bad peer](./addressbook.md#bad-peers). +An exception is made for the first two PEX requests received from a peer. + +> The probably reason is that, when a new peer is added, the two conditions for +> a node to request peer addresses can be triggered with an interval lower than +> the minimum accepted interval. +> Since this is a legit behavior, it should not be punished. + +## Seed nodes + +A seed node is a node configured to operate in `SeedMode`. + +### Crawling peers + +Seed nodes crawl the network, connecting to random peers and sending PEX +requests to them, in order to learn as many peer addresses as possible. +More specifically, a node operating in seed mode sends PEX requests in two cases: + +1. When an outbound peer is added, and the seed node needs more peer addresses, + it requests peer addresses to the new peer +2. Periodically, the `crawlPeersRoutine` sends PEX requests to a random set of + peers, whose addresses are registered in the Address Book + +The first case also applies for nodes not operating in seed mode. +The second case replaces the second for regular nodes, as seed nodes do not +run the `ensurePeersRoutine`, as regular nodes, +but run the `crawlPeersRoutine`, which is not run by regular nodes. + +The `crawlPeersRoutine` periodically, every 30 seconds (`crawlPeerPeriod`), +starts a new peer discovery round. +First, the seed node retrieves a random selection of peer addresses from its +Address Book. +This selection is produced in the same way as in the random selection of peer +addresses that are [provided](#providing-addresses) to a requesting peer. +Peers that the seed node has crawled recently, +less than 2 minutes ago (`minTimeBetweenCrawls`), are removed from this selection. +The remaining peer addresses are registered in the `crawlPeerInfos` table. + +The seed node is not necessarily connected to the peer whose address is +selected for each round of crawling. +So, the seed node dials the selected peer addresses. +This is performed in foreground, one peer at a time. +As a result, a round of crawling can take a substantial amount of time. +For each selected peer it succeeds dialing to, this include already connected +peers, the seed node sends a PEX request. + +Dialing a selected peer address can fail for multiple reasons. +The seed node might have attempted to dial the peer too many times. +In this case, the peer address is marked as [bad in the address book](./addressbook.md#bad-peers). +The seed node might have attempted to dial the peer recently, without success, +and the exponential `backoffDuration` has not yet passed. +Or the current connection attempt might fail, which is registered in the address book. + +Failures to dial to a peer address produce an information that is important for +a seed node. +They indicate that a peer is unreachable, or is not operating correctly, and +therefore its address should not be provided to other nodes. +This occurs when, due to multiple failed connection attempts or authentication +failures, the peer address ends up being removed from the address book. +As a result, the periodically crawling of selected peers not only enables the +discovery of new peers, but also allows the seed node to stop providing +addresses of bad peers. + +### Offering addresses + +Nodes operating in seed mode handle PEX requests differently than regular +nodes, whose operation is described [here](#providing-addresses). + +This distinction exists because nodes dial a seed node with the main, if not +exclusive goal of retrieving peer addresses. +In other words, nodes do not dial a seed node because they intend to have it as +a peer in the multiple CometBFT protocols, but because they believe that a +seed node is a good source of addresses of nodes to which they can establish +connections and interact in the multiple CometBFT protocols. + +So, when a seed node receives a `PexRequest` message from an inbound peer, +it sends a `PexAddrs` message, containing a selection of peer +addresses, back to the peer and *disconnects* from it. +Seed nodes therefore treat inbound connections from peers as a short-term +connections, exclusively intended to retrieve peer addresses. +Once the requested peer addresses are sent, the connection with the peer is closed. + +Moreover, the selection of peer addresses provided to inbound peers by a seed +node, although still essentially random, has a [bias toward old +addresses](./addressbook.md#random-selection-with-bias). +The selection bias is defined by `biasToSelectNewPeers`, hard-coded to `30%`, +meaning that `70%` of the peer addresses provided by a seed node are expected +to be old addresses. +Although this nomenclature is not clear, *old* addresses are the addresses that +survived the most in the address book, that is, are addresses that the seed +node believes being from *good* peers (more details [here](./addressbook.md#good-peers)). + +Another distinction is on the handling of potential [misbehavior](#misbehavior-1) +of peers requesting addresses. +A seed node does not enforce, a priori, a minimal interval between PEX requests +from inbound peers. +Instead, it does not reply to more than one PEX request per peer inbound +connection, and, as above mentioned, it disconnects from incoming peers after +responding to them. +If the same peer dials again to the seed node and requests peer addresses, the +seed node will reply to this peer like it was the first time it has requested +peer addresses. + +> This is more an implementation restriction than a desired behavior. +> The `lastReceivedRequests` map stores the last time a PEX request was +> received from a peer, and the entry relative to a peer is removed from this +> map when the peer is disconnected. +> +> It is debatable whether this approach indeed prevents abuse against seed nodes. + +### Disconnecting from peers + +Seed nodes treat connections with peers as short-term connections, which are +mainly, if not exclusively, intended to exchange peer addresses. + +In the case of inbound peers, that have dialed the seed node, the intent of the +connection is achieved once a PEX response is sent to the peer. +The seed node thus disconnects from an inbound peer after sending a `PexAddrs` +message to it. + +In the case of outbound peers, which the seed node has dialed for crawling peer +addresses, the intent of the connection is essentially achieved when a PEX +response is received from the peer. +The seed node, however, does not disconnect from a peer after receiving a +selection of peer addresses from it. +As a result, after some rounds of crawling, a seed node will have established +connections to a substantial amount of peers. + +To couple with the existence of multiple connections with peers that have no +longer purpose for the seed node, the `crawlPeersRoutine` also invokes, after +each round of crawling, the `attemptDisconnects` method. +This method retrieves the list of connected peers from the switch, and +disconnects from peers that are not persistent peers, and with which a +connection is established for more than `SeedDisconnectWaitPeriod`. +This period is a configuration parameter, set to 28 hours when the PEX reactor +is created by the default node constructor. diff --git a/spec/p2p/implementation/pex.md b/spec/p2p/implementation/pex.md new file mode 100644 index 0000000000..8243eaa559 --- /dev/null +++ b/spec/p2p/implementation/pex.md @@ -0,0 +1,111 @@ +# PEX Reactor + +The PEX reactor is one of the reactors running in a CometBFT node. + +Its implementation is located in the `p2p/pex` package, and it is considered +part of the implementation of the p2p layer. + +This document overviews the implementation of the PEX reactor, describing how +the methods from the `Reactor` interface are implemented. + +The actual operation of the PEX reactor is presented in documents describing +the roles played by the PEX reactor in the p2p layer: + +- [Address Book](./addressbook.md): stores known peer addresses and information + about peers to which the node is connected or has attempted to connect +- [Peer Manager](./peer_manager.md): manages connections established with peers, + defining when a node should dial peers and which peers it should dial +- [Peer Exchange protocol](./pex-protocol.md): enables nodes to exchange peer + addresses, thus implementing a peer discovery service + +## OnStart + +The `OnStart` method implements `BaseService` and starts the PEX reactor. + +The [address book](./addressbook.md), which is a `Service` is started. +This loads the address book content from disk, +and starts a routine that periodically persists the address book content to disk. + +The PEX reactor is configured with the addresses of a number of seed nodes, +the `Seeds` parameter of the `ReactorConfig`. +The addresses of seed nodes are parsed into `NetAddress` instances and resolved +into IP addresses, which is implemented by the `checkSeeds` method. +Valid seed node addresses are stored in the `seedAddrs` field, +and are used by the `dialSeeds` method to contact the configured seed nodes. + +The last action is to start one of the following persistent routines, based on +the `SeedMode` configuration parameter: + +- Regular nodes run the `ensurePeersRoutine` to check whether the node has + enough outbound peers, dialing peers when necessary +- Seed nodes run the `crawlPeersRoutine` to periodically start a new round + of [crawling](./pex-protocol.md#Crawling-peers) to discover as many peer + addresses as possible + +### Errors + +Errors encountered when loading the address book from disk are returned, +and prevent the reactor from being started. +An exception is made for the `service.ErrAlreadyStarted` error, which is ignored. + +Errors encountered when parsing the configured addresses of seed nodes +are returned and cause the reactor startup to fail. +An exception is made for DNS resolution `ErrNetAddressLookup` errors, +which are not deemed fatal and are only logged as invalid addresses. + +If none of the configured seed node addresses is valid, and the loaded address +book is empty, the reactor is not started and an error is returned. + +## OnStop + +The `OnStop` method implements `BaseService` and stops the PEX reactor. + +The address book routine that periodically saves its content to disk is stopped. + +## GetChannels + +The `GetChannels` method, from the `Reactor` interface, returns the descriptor +of the channel used by the PEX protocol. + +The channel ID is `PexChannel` (0), with priority `1`, send queue capacity of +`10`, and maximum message size of `64000` bytes. + +## AddPeer + +The `AddPeer` method, from the `Reactor` interface, +adds a new peer to the PEX protocol. + +If the new peer is an **inbound peer**, i.e., if the peer has dialed the node, +the peer's address is [added to the address book](./addressbook.md#adding-addresses). +Since the peer was authenticated when establishing a secret connection with it, +the source of the peer address is trusted, and its source is set by the peer itself. +In the case of an outbound peer, the node should already have its address in +the address book, as the switch has dialed the peer. + +If the peer is an **outbound peer**, i.e., if the node has dialed the peer, +and the PEX protocol needs more addresses, +the node [sends a PEX request](./pex-protocol.md#Requesting-Addresses) to the peer. +The same is not done when inbound peers are added because they are deemed least +trustworthy than outbound peers. + +## RemovePeer + +The `RemovePeer` method, from the `Reactor` interface, +removes a peer from the PEX protocol. + +The peer's ID is removed from the tables tracking PEX requests +[sent](./pex-protocol.md#misbehavior) but not yet replied +and PEX requests [received](./pex-protocol.md#misbehavior-1). + +## Receive + +The `Receive` method, from the `Reactor` interface, +handles a message received by the PEX protocol. + +A node receives two type of messages as part of the PEX protocol: + +- `PexRequest`: a request for addresses received from a peer, handled as + described [here](./pex-protocol.md#providing-addresses) +- `PexAddrs`: a list of addresses received from a peer, as a reponse to a PEX + request sent by the node, as described [here](./pex-protocol.md#responses) + diff --git a/spec/p2p/implementation/switch.md b/spec/p2p/implementation/switch.md new file mode 100644 index 0000000000..4497fef96e --- /dev/null +++ b/spec/p2p/implementation/switch.md @@ -0,0 +1,238 @@ +# Switch + +The switch is a core component of the p2p layer. +It manages the procedures for [dialing peers](#dialing-peers) and +[accepting](#accepting-peers) connections from peers, which are actually +implemented by the [transport](./transport.md). +It also manages the reactors, i.e., protocols implemented by the node that +interact with its peers. +Once a connection with a peer is established, the peer is [added](#add-peer) to +the switch and all registered reactors. +Reactors may also instruct the switch to [stop a peer](#stop-peer), namely +disconnect from it. +The switch, in this case, makes sure that the peer is removed from all +registered reactors. + +## Dialing peers + +Dialing a peer is implemented by the `DialPeerWithAddress` method. + +This method is invoked by the [peer manager](./peer_manager.md#ensure-peers) +to dial a peer address and establish a connection with an outbound peer. + +The switch keeps a single dialing routine per peer ID. +This is ensured by keeping a synchronized map `dialing` with the IDs of peers +to which the peer is dialing. +A peer ID is added to `dialing` when the `DialPeerWithAddress` method is called +for that peer, and it is removed when the method returns for whatever reason. +The method returns immediately when invoked for a peer which ID is already in +the `dialing` structure. + +The actual dialing is implemented by the [`Dial`](./transport.md#dial) method +of the transport configured for the switch, in the `addOutboundPeerWithConfig` +method. +If the transport succeeds establishing a connection, the returned `Peer` is +added to the switch using the [`addPeer`](#add-peer) method. +This operation can fail, returning an error. In this case, the switch invokes +the transport's [`Cleanup`](./transport.md#cleanup) method to clean any resources +associated with the peer. + +If the transport fails to establish a connection with the peer that is configured +as a persistent peer, the switch spawns a routine to [reconnect to the peer](#reconnect-to-peer). +If the peer is already in the `reconnecting` state, the spawned routine has no +effect and returns immediately. +This is in fact a likely scenario, as the `reconnectToPeer` routine relies on +this same `DialPeerWithAddress` method for dialing peers. + +### Manual operation + +The `DialPeersAsync` method receives a list of peer addresses (strings) +and dials all of them in parallel. +It is invoked in two situations: + +- In the [setup](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L987) +of a node, to establish connections with every configured persistent peer +- In the RPC package, to implement two unsafe RPC commands, not used in production: + [`DialSeeds`](https://github.com/cometbft/cometbft/blob/v0.34.x/rpc/core/net.go#L47) and + [`DialPeers`](https://github.com/cometbft/cometbft/blob/v0.34.x/rpc/core/net.go#L87) + +The received list of peer addresses to dial is parsed into `NetAddress` instances. +In case of parsing errors, the method returns. An exception is made for +DNS resolution `ErrNetAddressLookup` errors, which do not interrupt the procedure. + +As the peer addresses provided to this method are typically not known by the node, +contrarily to the addressed dialed using the `DialPeerWithAddress` method, +they are added to the node's address book, which is persisted to disk. + +The switch dials the provided peers in parallel. +The list of peer addresses is randomly shuffled, and for each peer a routine is +spawned. +Each routine sleeps for a random interval, up to 3 seconds, then invokes the +`DialPeerWithAddress` method that actually dials the peer. + +### Reconnect to peer + +The `reconnectToPeer` method is invoked when a connection attempt to a peer fails, +and the peer is configured as a persistent peer. + +The `reconnecting` synchronized map keeps the peer's in this state, identified +by their IDs (string). +This should ensure that a single instance of this method is running at any time. +The peer is kept in this map while this method is running for it: it is set on +the beginning, and removed when the method returns for whatever reason. +If the peer is already in the `reconnecting` state, nothing is done. + +The remaining of the method performs multiple connection attempts to the peer, +via `DialPeerWithAddress` method. +If a connection attempt succeeds, the methods returns and the routine finishes. +The same applies when an `ErrCurrentlyDialingOrExistingAddress` error is +returned by the dialing method, as it indicates that peer is already connected +or that another routine is attempting to (re)connect to it. + +A first set of connection attempts is done at (about) regular intervals. +More precisely, between two attempts, the switch waits for a interval of +`reconnectInterval`, hard-coded to 5 seconds, plus a random jitter up to +`dialRandomizerIntervalMilliseconds`, hard-coded to 3 seconds. +At most `reconnectAttempts`, hard-coded to 20, are made using this +regular-interval approach. + +A second set of connection attempts is done with exponentially increasing +intervals. +The base interval `reconnectBackOffBaseSeconds` is hard-coded to 3 seconds, +which is also the increasing factor. +The exponentially increasing dialing interval is adjusted as well by a random +jitter up to `dialRandomizerIntervalMilliseconds`. +At most `reconnectBackOffAttempts`, hard-coded to 10, are made using this approach. + +> Note: the first sleep interval, to which a random jitter is applied, is 1, +> not `reconnectBackOffBaseSeconds`, as the first exponent is `0`... + +## Accepting peers + +The `acceptRoutine` method is a persistent routine that handles connections +accepted by the transport configured for the switch. + +The [`Accept`](./transport.md#accept) method of the configured transport +returns a `Peer` with which an inbound connection was established. +The switch accepts a new peer if the maximum number of inbound peers was not +reached, or if the peer was configured as an _unconditional peer_. +The maximum number of inbound peers is determined by the `MaxNumInboundPeers` +configuration parameter, whose default value is `40`. + +If accepted, the peer is added to the switch using the [`addPeer`](#add-peer) method. +If the switch does not accept the established incoming connection, or if the +`addPeer` method returns an error, the switch invokes the transport's +[`Cleanup`](./transport.md#cleanup) method to clean any resources associated +with the peer. + +The transport's `Accept` method can also return a number of errors. +Errors of `ErrRejected` or `ErrFilterTimeout` types are ignored, +an `ErrTransportClosed` causes the accepted routine to be interrupted, +while other errors cause the routine to panic. + +> TODO: which errors can cause the routine to panic? + +## Add peer + +The `addPeer` method adds a peer to the switch, +either after dialing (by `addOutboundPeerWithConfig`, called by `DialPeerWithAddress`) +a peer and establishing an outbound connection, +or after accepting (`acceptRoutine`) a peer and establishing an inbound connection. + +The first step is to invoke the `filterPeer` method. +It checks whether the peer is already in the set of connected peers, +and whether any of the configured `peerFilter` methods reject the peer. +If the peer is already present or it is rejected by any filter, the `addPeer` +method fails and returns an error. + +Then, the new peer is started, added to the set of connected peers, and added +to all reactors. +More precisely, first the new peer's information is first provided to every +reactor (`InitPeer` method). +Next, the peer's sending and receiving routines are started, and the peer is +added to set of connected peers. +These two operations can fail, causing `addPeer` to return an error. +Then, in the absence of previous errors, the peer is added to every reactor (`AddPeer` method). + +> Adding the peer to the peer set returns a `ErrSwitchDuplicatePeerID` error +> when a peer with the same ID is already presented. +> +> TODO: Starting a peer could be reduced as starting the MConn with that peer? + +## Stop peer + +There are two methods for stopping a peer, namely disconnecting from it, and +removing it from the table of connected peers. + +The `StopPeerForError` method is invoked to stop a peer due to an external +error, which is provided to method as a generic "reason". + +The `StopPeerGracefully` method stops a peer in the absence of errors or, more +precisely, not providing to the switch any "reason" for that. + +In both cases the `Peer` instance is stopped, the peer is removed from all +registered reactors, and finally from the list of connected peers. + +> Issue is mentioned in +> the internal `stopAndRemovePeer` method explaining why removing the peer from +> the list of connected peers is the last action taken. + +When there is a "reason" for stopping the peer (`StopPeerForError` method) +and the peer is a persistent peer, the method creates a routine to attempt +reconnecting to the peer address, using the `reconnectToPeer` method. +If the peer is an outbound peer, the peer's address is know, since the switch +has dialed the peer. +Otherwise, the peer address is retrieved from the `NodeInfo` instance from the +connection handshake. + +## Add reactor + +The `AddReactor` method registers a `Reactor` to the switch. + +The reactor is associated to the set of channel ids it employs. +Two reactors (in the same node) cannot share the same channel id. + +There is a call back to the reactor, in which the switch passes itself to the +reactor. + +## Remove reactor + +The `RemoveReactor` method unregisters a `Reactor` from the switch. + +The reactor is disassociated from the set of channel ids it employs. + +There is a call back to the reactor, in which the switch passes `nil` to the +reactor. + +## OnStart + +This is a `BaseService` method. + +All registered reactors are started. + +The switch's `acceptRoutine` is started. + +## OnStop + +This is a `BaseService` method. + +All (connected) peers are stopped and removed from the peer's list using the +`stopAndRemovePeer` method. + +All registered reactors are stopped. + +## Broadcast + +This method broadcasts a message on a channel, by sending the message in +parallel to all connected peers. + +The method spawns a thread for each connected peer, invoking the `Send` method +provided by each `Peer` instance with the provided message and channel ID. +The return value (a boolean) of these calls are redirected to a channel that is +returned by the method. + +> TODO: detail where this method is invoked: +> +> - By the consensus protocol, in `broadcastNewRoundStepMessage`, +> `broadcastNewValidBlockMessage`, and `broadcastHasVoteMessage` +> - By the state sync protocol diff --git a/spec/p2p/implementation/transport.md b/spec/p2p/implementation/transport.md new file mode 100644 index 0000000000..20d4db87a4 --- /dev/null +++ b/spec/p2p/implementation/transport.md @@ -0,0 +1,222 @@ +# Transport + +The transport establishes secure and authenticated connections with peers. + +The transport [`Dial`](#dial)s peer addresses to establish outbound connections, +and [`Listen`](#listen)s in a configured network address +to [`Accept`](#accept) inbound connections from peers. + +The transport establishes raw TCP connections with peers +and [upgrade](#connection-upgrade) them into authenticated secret connections. +The established secret connection is then wrapped into `Peer` instance, which +is returned to the caller, typically the [switch](./switch.md). + +## Dial + +The `Dial` method is used by the switch to establish an outbound connection with a peer. +It is a synchronous method, which blocks until a connection is established or an error occurs. +The method returns an outbound `Peer` instance wrapping the established connection. + +The transport first dials the provided peer's address to establish a raw TCP connection. +The dialing maximum duration is determined by `dialTimeout`, hard-coded to 1 second. +The established raw connection is then submitted to a set of [filters](#connection-filtering), +which can reject it. +If the connection is not rejected, it is recorded in the table of established connections. + +The established raw TCP connection is then [upgraded](#connection-upgrade) into +an authenticated secret connection. +This procedure should ensure, in particular, that the public key of the remote peer +matches the ID of the dialed peer, which is part of peer address provided to this method. +In the absence of errors, +the established secret connection (`conn.SecretConnection` type) +and the information about the peer (`NodeInfo` record) retrieved and verified +during the version handshake, +are wrapped into an outbound `Peer` instance and returned to the switch. + +## Listen + +The `Listen` method produces a TCP listener instance for the provided network +address, and spawns an `acceptPeers` routine to handle the raw connections +accepted by the listener. +The `NetAddress` method exports the listen address configured for the transport. + +The maximum number of simultaneous incoming connections accepted by the listener +is bound to `MaxNumInboundPeer` plus the configured number of unconditional peers, +using the `MultiplexTransportMaxIncomingConnections` option, +in the node [initialization](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L563). + +This method is called when a node is [started](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L974). +In case of errors, the `acceptPeers` routine is not started and the error is returned. + +## Accept + +The `Accept` method returns to the switch inbound connections established with a peer. +It is a synchronous method, which blocks until a connection is accepted or an error occurs. +The method returns an inbound `Peer` instance wrapping the established connection. + +The transport handles incoming connections in the `acceptPeers` persistent routine. +This routine is started by the [`Listen`](#listen) method +and accepts raw connections from a TCP listener. +A new routine is spawned for each accepted connection. +The raw connection is submitted to a set of [filters](#connection-filtering), +which can reject it. +If the connection is not rejected, it is recorded in the table of established connections. + +The established raw TCP connection is then [upgraded](#connection-upgrade) into +an authenticated secret connection. +The established secret connection (`conn.SecretConnection` type), +the information about the peer (`NodeInfo` record) retrieved and verified +during the version handshake, +as well any error returned in this process are added to a queue of accepted connections. +This queue is consumed by the `Accept` method. + +> Handling accepted connection asynchronously was introduced due to this issue: +> + +## Connection Filtering + +The `filterConn` method is invoked for every new raw connection established by the transport. +Its main goal is avoid the transport to maintain duplicated connections with the same peer. +It also runs a set of configured connection filters. + +The transports keeps a table `conns` of established connections. +The table maps the remote address returned by a generic connection to a list of +IP addresses, to which the connection remote address is resolved. +If the remote address of the new connection is already present in the table, +the connection is rejected. +Otherwise, the connection's remote address is resolved into a list of IPs, +which are recorded in the established connections table. + +The connection and the resolved IPs are then passed through a set of connection filters, +configured via the `MultiplexTransportConnFilters` transport option. +The maximum duration for the filters execution, which is performed in parallel, +is determined by `filterTimeout`. +Its default value is 5 seconds, +which can be changed using the `MultiplexTransportFilterTimeout` transport option. + +If the connection and the resolved remote addresses are not filtered out, +the transport registers them into the `conns` table and returns. + +In case of errors, the connection is removed from the table of established +connections and closed. + +### Errors + +If the address of the new connection is already present in the `conns` table, +an `ErrRejected` error with the `isDuplicate` reason is returned. + +If the IP resolution of the connection's remote address fails, +an `AddrError` or `DNSError` error is returned. + +If any of the filters reject the connection, +an `ErrRejected` error with the `isRejected` reason is returned. + +If the filters execution times out, +an `ErrFilterTimeout` error is returned. + +## Connection Upgrade + +The `upgrade` method is invoked for every new raw connection established by the +transport that was not [filtered out](#connection-filtering). +It upgrades an established raw TCP connection into a secret authenticated +connection, and validates the information provided by the peer. + +This is a complex procedure, that can be summarized by the following three +message exchanges between the node and the new peer: + +1. Encryption: the nodes produce ephemeral key pairs and exchange ephemeral + public keys, from which are derived: (i) a pair of secret keys used to + encrypt the data exchanged between the nodes, and (ii) a challenge message. +1. Authentication: the nodes exchange their persistent public keys and a + signature of the challenge message produced with the their persistent + private keys. This allows validating the peer's persistent public key, + which plays the role of node ID. +1. Version handshake: nodes exchange and validate each other `NodeInfo` records. + This records contain, among other fields, their node IDs, the network/chain + ID they are part of, and the list of supported channel IDs. + +Steps (1) and (2) are implemented in the `conn` package. +In case of success, they produce the secret connection that is actually used by +the node to communicate with the peer. +An overview of this procedure, which implements the station-to-station (STS) +[protocol][sts-paper] ([PDF][sts-paper-pdf]), can be found [here][peer-sts]. +The maximum duration for establishing a secret connection with the peer is +defined by `handshakeTimeout`, hard-coded to 3 seconds. + +The established secret connection stores the persistent public key of the peer, +which has been validated via the challenge authentication of step (2). +If the connection being upgraded is an outbound connection, i.e., if the node has +dialed the peer, the dialed peer's ID is compared to the peer's persistent public key: +if they do not match, the connection is rejected. +This verification is not performed in the case of inbound (accepted) connections, +as the node does not know a priori the remote node's ID. + +Step (3), the version handshake, is performed by the transport. +Its maximum duration is also defined by `handshakeTimeout`, hard-coded to 3 seconds. +The version handshake retrieves the `NodeInfo` record of the new peer, +which can be rejected for multiple reasons, listed [here][peer-handshake]. + +If the connection upgrade succeeds, the method returns the established secret +connection, an instance of `conn.SecretConnection` type, +and the `NodeInfo` record of the peer. + +In case of errors, the connection is removed from the table of established +connections and closed. + +### Errors + +The timeouts for steps (1) and (2), and for step (3), are configured as the +deadline for operations on the TCP connection that is being upgraded. +If this deadline it is reached, the connection produces an +`os.ErrDeadlineExceeded` error, returned by the corresponding step. + +Any error produced when establishing a secret connection with the peer (steps 1 and 2) or +during the version handshake (step 3), including timeouts, +is encapsulated into an `ErrRejected` error with reason `isAuthFailure` and returned. + +If the upgraded connection is an outbound connection, and the peer ID learned in step (2) +does not match the dialed peer's ID, +an `ErrRejected` error with reason `isAuthFailure` is returned. + +If the peer's `NodeInfo` record, retrieved in step (3), is invalid, +or if reports a node ID that does not match peer ID learned in step (2), +an `ErrRejected` error with reason `isAuthFailure` is returned. +If it reports a node ID equals to the local node ID, +an `ErrRejected` error with reason `isSelf` is returned. +If it is not compatible with the local `NodeInfo`, +an `ErrRejected` error with reason `isIncompatible` is returned. + +## Close + +The `Close` method closes the TCP listener created by the `Listen` method, +and sends a signal for interrupting the `acceptPeers` routine. + +This method is called when a node is [stopped](https://github.com/cometbft/cometbft/blob/v0.34.x/node/node.go#L1023). + +## Cleanup + +The `Cleanup` method receives a `Peer` instance, +and removes the connection established with a peer from the table of established connections. +It also invokes the `Peer` interface method to close the connection associated with a peer. + +It is invoked when the connection with a peer is closed. + +## Supported channels + +The `AddChannel` method registers a channel in the transport. + +The channel ID is added to the list of supported channel IDs, +stored in the local `NodeInfo` record. + +The `NodeInfo` record is exchanged with peers in the version handshake. +For this reason, this method is not invoked with a started transport. + +> The only call to this method is performed in the `CustomReactors` constructor +> option of a node, i.e., before the node is started. +> Note that the default list of supported channel IDs, including the default reactors, +> is provided to the transport as its original `NodeInfo` record. + +[peer-sts]: ../legacy-docs/peer.md#authenticated-encryption-handshake +[peer-handshake]: ../legacy-docs/peer.md#cometbft-version-handshake +[sts-paper]: https://link.springer.com/article/10.1007/BF00124891 +[sts-paper-pdf]: https://github.com/tendermint/tendermint/blob/0.1/docs/sts-final.pdf diff --git a/spec/p2p/implementation/types.md b/spec/p2p/implementation/types.md new file mode 100644 index 0000000000..cef2632936 --- /dev/null +++ b/spec/p2p/implementation/types.md @@ -0,0 +1,233 @@ +# Types adopted in the p2p implementation + +This document lists the packages and source files, excluding test units, that +implement the p2p layer, and summarizes the main types they implement. +Types play the role of classes in Go. + +The reference version for this documentation is the branch +[`v0.34.x`](https://github.com/cometbft/cometbft/tree/v0.34.x/p2p). + +State of August 2022. + +## Package `p2p` + +Implementation of the p2p layer of CometBFT. + +### `base_reactor.go` + +`Reactor` interface. + +`BaseReactor` implements `Reactor`. + +**Not documented yet**. + +### `conn_set.go` + +`ConnSet` interface, a "lookup table for connections and their ips". + +Internal type `connSet` implements the `ConnSet` interface. + +Used by the [transport](#transportgo) to store connected peers. + +### `errors.go` + +Defines several error types. + +`ErrRejected` enumerates a number of reason for which a peer was rejected. +Mainly produced by the [transport](#transportgo), +but also by the [switch](#switchgo). + +`ErrSwitchDuplicatePeerID` is produced by the `PeerSet` used by the [switch](#switchgo). + +`ErrSwitchConnectToSelf` is handled by the [switch](#switchgo), +but currently is not produced outside tests. + +`ErrSwitchAuthenticationFailure` is handled by the [PEX reactor](#pex_reactorgo), +but currently is not produced outside tests. + +`ErrTransportClosed` is produced by the [transport](#transportgo) +and handled by the [switch](#switchgo). + +`ErrNetAddressNoID`, `ErrNetAddressInvalid`, and `ErrNetAddressLookup` +are parsing a string to create an instance of `NetAddress`. +It can be returned in the setup of the [switch](#switchgo) +and of the [PEX reactor](#pex_reactorgo), +as well when the [transport](#transportgo) validates a `NodeInfo`, as part of +the connection handshake. + +`ErrCurrentlyDialingOrExistingAddress` is produced by the [switch](#switchgo), +and handled by the switch and the [PEX reactor](#pex_reactorgo). + +### `fuzz.go` + +For testing purposes. + +`FuzzedConnection` wraps a `net.Conn` and injects random delays. + +### `key.go` + +`NodeKey` is the persistent key of a node, namely its private key. + +The `ID` of a node is a string representing the node's public key. + +### `metrics.go` + +Prometheus `Metrics` exposed by the p2p layer. + +### `netaddress.go` + +Type `NetAddress` contains the `ID` and the network address (IP and port) of a node. + +The API of the [address book](#addrbookgo) receives and returns `NetAddress` instances. + +This source file was adapted from [`btcd`](https://github.com/btcsuite/btcd), +a Go implementation of Bitcoin. + +### `node_info.go` + +Interface `NodeInfo` stores the basic information about a node exchanged with a +peer during the handshake. + +It is implemented by `DefaultNodeInfo` type. + +The [switch](#switchgo) stores the local `NodeInfo`. + +The `NodeInfo` of connected peers is produced by the +[transport](#transportgo) during the handshake, and stored in [`Peer`](#peergo) instances. + +### `peer.go` + +Interface `Peer` represents a connected peer. + +It is implemented by the internal `peer` type. + +The [transport](#transportgo) API methods return `Peer` instances, +wrapping established secure connection with peers. + +The [switch](#switchgo) API methods receive `Peer` instances. +The switch stores connected peers in a `PeerSet`. + +The [`Reactor`](#base_reactorgo) methods, invoked by the switch, receive `Peer` instances. + +### `peer_set.go` + +Interface `IPeerSet` offers methods to access a table of [`Peer`](#peergo) instances. + +Type `PeerSet` implements a thread-safe table of [`Peer`](#peergo) instances, +used by the [switch](#switchgo). + +The switch provides limited access to this table by returing a `IPeerSet` +instance, used by the [PEX reactor](#pex_reactorgo). + +### `switch.go` + +Documented in [switch](./switch.md). + +The `Switch` implements the [peer manager](./peer_manager.md) role for inbound peers. + +[`Reactor`](#base_reactorgo)s have access to the `Switch` and may invoke its methods. +This includes the [PEX reactor](#pex_reactorgo). + +### `transport.go` + +Documented in [transport](./transport.md). + +The `Transport` interface is implemented by `MultiplexTransport`. + +The [switch](#switchgo) contains a `Transport` and uses it to establish +connections with peers. + +### `types.go` + +Aliases for p2p's `conn` package types. + +## Package `p2p.conn` + +Implements the connection between CometBFT nodes, +which is encrypted, authenticated, and multiplexed. + +### `connection.go` + +Implements the `MConnection` type and the `Channel` abstraction. + +A `MConnection` multiplexes a generic network connection (`net.Conn`) into +multiple independent `Channel`s, used by different [`Reactor`](#base_reactorgo)s. + +A [`Peer`](#peergo) stores the `MConnection` instance used to interact with a +peer, which multiplex a [`SecretConnection`](#secret_connectiongo). + +### `conn_go110.go` + +Support for go 1.10. + +### `secret_connection.go` + +Implements the `SecretConnection` type, which is an encrypted authenticated +connection built atop a raw network (TCP) connection. + +A [`Peer`](#peergo) stores the `SecretConnection` established by the transport, +which is the underlying connection multiplexed by [`MConnection`](#connectiongo). + +As briefly documented in the [transport](./transport.md#Connection-Upgrade), +a `SecretConnection` implements the Station-To-Station (STS) protocol. + +The `SecretConnection` type implements the `net.Conn` interface, +which is a generic network connection. + +## Package `p2p.mock` + +Mock implementations of [`Peer`](#peergo) and [`Reactor`](#base_reactorgo) interfaces. + +## Package `p2p.mocks` + +Code generated by `mockery`. + +## Package `p2p.pex` + +Implementation of the [PEX reactor](./pex.md). + +### `addrbook.go` + +Documented in [address book](./addressbook.md). + +This source file was adapted from [`btcd`](https://github.com/btcsuite/btcd), +a Go implementation of Bitcoin. + +### `errors.go` + +A number of errors produced and handled by the [address book](#addrbookgo). + +`ErrAddrBookNilAddr` is produced by the address book, but handled (logged) by +the [PEX reactor](#pex_reactorgo). + +`ErrUnsolicitedList` is produced and handled by the [PEX protocol](#pex_reactorgo). + +### `file.go` + +Implements the [address book](#addrbookgo) persistence. + +### `known_address.go` + +Type `knownAddress` represents an address stored in the [address book](#addrbookgo). + +### `params.go` + +Constants used by the [address book](#addrbookgo). + +### `pex_reactor.go` + +Implementation of the [PEX reactor](./pex.md), which is a [`Reactor`](#base_reactorgo). + +This includes the implementation of the [PEX protocol](./pex-protocol.md) +and of the [peer manager](./peer_manager.md) role for outbound peers. + +The PEX reactor also manages an [address book](#addrbookgo) instance. + +## Package `p2p.trust` + +Go documentation of `Metric` type: + +> // Metric - keeps track of peer reliability +> // See cometbft/docs/architecture/adr-006-trust-metric.md for details + +Not imported by any other CometBFT source file. diff --git a/spec/p2p/config.md b/spec/p2p/legacy-docs/config.md similarity index 100% rename from spec/p2p/config.md rename to spec/p2p/legacy-docs/config.md diff --git a/spec/p2p/connection.md b/spec/p2p/legacy-docs/connection.md similarity index 100% rename from spec/p2p/connection.md rename to spec/p2p/legacy-docs/connection.md diff --git a/spec/p2p/messages/README.md b/spec/p2p/legacy-docs/messages/README.md similarity index 100% rename from spec/p2p/messages/README.md rename to spec/p2p/legacy-docs/messages/README.md diff --git a/spec/p2p/messages/block-sync.md b/spec/p2p/legacy-docs/messages/block-sync.md similarity index 96% rename from spec/p2p/messages/block-sync.md rename to spec/p2p/legacy-docs/messages/block-sync.md index 122702f4fc..187cc8af2d 100644 --- a/spec/p2p/messages/block-sync.md +++ b/spec/p2p/legacy-docs/messages/block-sync.md @@ -38,7 +38,7 @@ BlockResponse contains the block requested. | Name | Type | Description | Field Number | |-------|----------------------------------------------|-----------------|--------------| -| Block | [Block](../../core/data_structures.md#block) | Requested Block | 1 | +| Block | [Block](../../../core/data_structures.md#block) | Requested Block | 1 | ### StatusRequest diff --git a/spec/p2p/messages/consensus.md b/spec/p2p/legacy-docs/messages/consensus.md similarity index 88% rename from spec/p2p/messages/consensus.md rename to spec/p2p/legacy-docs/messages/consensus.md index 807eca64b1..c9b421f7e1 100644 --- a/spec/p2p/messages/consensus.md +++ b/spec/p2p/legacy-docs/messages/consensus.md @@ -24,13 +24,13 @@ next block in the blockchain should be. | Name | Type | Description | Field Number | |----------|----------------------------------------------------|----------------------------------------|--------------| -| proposal | [Proposal](../../core/data_structures.md#proposal) | Proposed Block to come to consensus on | 1 | +| proposal | [Proposal](../../../core/data_structures.md#proposal) | Proposed Block to come to consensus on | 1 | ### Vote Vote is sent to vote for some block (or to inform others that a process does not vote in the current round). Vote is defined in the -[Blockchain](https://github.com/cometbft/cometbft/blob/v0.37.x/spec/core/data_structures.md#blockidd) +[Blockchain](../../../core/data_structures.md#blockidd) section and contains validator's information (validator address and index), height and round for which the vote is sent, vote type, blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The @@ -38,7 +38,7 @@ message is signed by the validator private key. | Name | Type | Description | Field Number | |------|--------------------------------------------|---------------------------|--------------| -| vote | [Vote](../../core/data_structures.md#vote) | Vote for a proposed Block | 1 | +| vote | [Vote](../../../core/data_structures.md#vote) | Vote for a proposed Block | 1 | ### BlockPart @@ -49,7 +49,7 @@ and the block part. |--------|--------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block. | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| part | [Part](../../core/data_structures.md#part) | A part of the block. | 3 | +| part | [Part](../../../core/data_structures.md#part) | A part of the block. | 3 | ### NewRoundStep @@ -79,7 +79,7 @@ In case the block is also committed, then IsCommit flag is set to true. |-----------------------|--------------------------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| block_part_set_header | [PartSetHeader](../../core/data_structures.md#partsetheader) | | 3 | +| block_part_set_header | [PartSetHeader](../../../core/data_structures.md#partsetheader) | | 3 | | block_parts | int32 | | 4 | | is_commit | bool | | 5 | @@ -104,7 +104,7 @@ round, vote type and the index of the validator that is the originator of the co |--------|------------------------------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | +| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 | | index | int32 | | 4 | ### VoteSetMaj23 @@ -116,7 +116,7 @@ It contains height, round, vote type and the BlockID. |--------|------------------------------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | +| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 | ### VoteSetBits @@ -128,8 +128,8 @@ the votes a process has. |----------|------------------------------------------------------------------|----------------------------------------|--------------| | height | int64 | Height of corresponding block | 1 | | round | int32 | Round of voting to finalize the block. | 2 | -| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | -| block_id | [BlockID](../../core/data_structures.md#blockid) | | 4 | +| type | [SignedMessageType](../../../core/data_structures.md#signedmsgtype) | | 3 | +| block_id | [BlockID](../../../core/data_structures.md#blockid) | | 4 | | votes | BitArray | Round of voting to finalize the block. | 5 | ### Message diff --git a/spec/p2p/messages/evidence.md b/spec/p2p/legacy-docs/messages/evidence.md similarity index 78% rename from spec/p2p/messages/evidence.md rename to spec/p2p/legacy-docs/messages/evidence.md index 34fc40a915..7db104b347 100644 --- a/spec/p2p/messages/evidence.md +++ b/spec/p2p/legacy-docs/messages/evidence.md @@ -16,8 +16,8 @@ Evidence has one channel. The channel identifier is listed below. ### EvidenceList -EvidenceList consists of a list of verified evidence. This evidence will already have been propagated throughout the network. EvidenceList is used in two places, as a p2p message and within the block [block](../../core/data_structures.md#block) as well. +EvidenceList consists of a list of verified evidence. This evidence will already have been propagated throughout the network. EvidenceList is used in two places, as a p2p message and within the block [block](../../../core/data_structures.md#block) as well. | Name | Type | Description | Field Number | |----------|-------------------------------------------------------------|------------------------|--------------| -| evidence | repeated [Evidence](../../core/data_structures.md#evidence) | List of valid evidence | 1 | +| evidence | repeated [Evidence](../../../core/data_structures.md#evidence) | List of valid evidence | 1 | diff --git a/spec/p2p/messages/mempool.md b/spec/p2p/legacy-docs/messages/mempool.md similarity index 100% rename from spec/p2p/messages/mempool.md rename to spec/p2p/legacy-docs/messages/mempool.md diff --git a/spec/p2p/messages/pex.md b/spec/p2p/legacy-docs/messages/pex.md similarity index 100% rename from spec/p2p/messages/pex.md rename to spec/p2p/legacy-docs/messages/pex.md diff --git a/spec/p2p/messages/state-sync.md b/spec/p2p/legacy-docs/messages/state-sync.md similarity index 94% rename from spec/p2p/messages/state-sync.md rename to spec/p2p/legacy-docs/messages/state-sync.md index cfc958e08d..e7be056c44 100644 --- a/spec/p2p/messages/state-sync.md +++ b/spec/p2p/legacy-docs/messages/state-sync.md @@ -89,9 +89,9 @@ if necessary. The light block at the height of the snapshot will be used to veri | Name | Type | Description | Field Number | |---------------|---------------------------------------------------------|--------------------------------------|--------------| -| light_block | [LightBlock](../../core/data_structures.md#lightblock) | Light block at the height requested | 1 | +| light_block | [LightBlock](../../../core/data_structures.md#lightblock) | Light block at the height requested | 1 | -State sync will use [light client verification](../../../spec/light-client/verification/README.md) to verify +State sync will use [light client verification](../../../light-client/verification/README.md) to verify the light blocks. If no state sync is in progress (i.e. during normal operation), any unsolicited response messages @@ -113,7 +113,7 @@ A reciever to the request will use the state store to fetch the consensus params | Name | Type | Description | Field Number | |----------|--------|---------------------------------|--------------| | height | uint64 | Height of the consensus params | 1 | -| consensus_params | [ConsensusParams](../../core/data_structures.md#ConsensusParams) | Consensus params at the height requested | 2 | +| consensus_params | [ConsensusParams](../../../core/data_structures.md#ConsensusParams) | Consensus params at the height requested | 2 | ### Message diff --git a/spec/p2p/node.md b/spec/p2p/legacy-docs/node.md similarity index 100% rename from spec/p2p/node.md rename to spec/p2p/legacy-docs/node.md diff --git a/spec/p2p/peer.md b/spec/p2p/legacy-docs/peer.md similarity index 100% rename from spec/p2p/peer.md rename to spec/p2p/legacy-docs/peer.md diff --git a/spec/p2p/reactor-api/README.md b/spec/p2p/reactor-api/README.md new file mode 100644 index 0000000000..401805c4b9 --- /dev/null +++ b/spec/p2p/reactor-api/README.md @@ -0,0 +1,43 @@ +# Reactors + +Reactor is the generic name for a component that employs the p2p communication layer. + +This section documents the interaction of the p2p communication layer with the +reactors. +The diagram below summarizes this interaction, namely the **northbound interface** +of the p2p communication layer, representing some relevant event flows: + + + +Each of the protocols running a CometBFT node implements a reactor and registers +the implementation with the p2p layer. +The p2p layer provides network events to the registered reactors, the main +two being new connections with peers and received messages. +The reactors provide to the p2p layer messages to be sent to +peers and commands to control the operation of the p2p layer. + +It is worth noting that the components depicted in the diagram below run +multiple routines and that the illustrated actions happen in parallel. +For instance, the connection establishment routines run in parallel, invoking +the depicted `AddPeer` method concurrently. +Once a connection is fully established, each `Peer` instance runs a send and a +receive routines. +The send routine collects messages from multiple reactors to a peer, packaging +then into raw messages which are transmitted to the peer. +The receive routine processes incoming messages and forwards them to the +destination reactors, invoking the depicted `Receive` methods. +In addition, the reactors run multiple routines for interacting +with the peers (for example, to send messages to them) or with the `Switch`. + +The remaining of the documentation is organized as follows: + +- [Reactor API](./reactor.md): documents the [`p2p.Reactor`][reactor-interface] + interface and specifies the behaviour of the p2p layer when interacting with + a reactor. + In other words, the interaction of the p2p layer with the protocol layer (bottom-up). + +- [P2P API](./p2p-api.md): documents the interface provided by the p2p + layer to the reactors, through the `Switch` and `Peer` abstractions. + In other words, the interaction of the protocol layer with the p2p layer (top-down). + +[reactor-interface]: ../../../p2p/base_reactor.go diff --git a/spec/p2p/reactor-api/p2p-api.md b/spec/p2p/reactor-api/p2p-api.md new file mode 100644 index 0000000000..927e416c72 --- /dev/null +++ b/spec/p2p/reactor-api/p2p-api.md @@ -0,0 +1,311 @@ +# API for Reactors + +This document describes the API provided by the p2p layer to the protocol +layer, namely to the registered reactors. + +This API consists of two interfaces: the one provided by the `Switch` instance, +and the ones provided by multiple `Peer` instances, one per connected peer. +The `Switch` instance is provided to every reactor as part of the reactor's +[registration procedure][reactor-registration]. +The multiple `Peer` instances are provided to every registered reactor whenever +a [new connection with a peer][reactor-addpeer] is established. + +> **Note** +> +> The practical reasons that lead to the interface to be provided in two parts, +> `Switch` and `Peer` instances are discussed in more datail in the +> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/switch-peer.md). + +## `Switch` API + +The [`Switch`][switch-type] is the central component of the p2p layer +implementation. It manages all the reactors running in a node and keeps track +of the connections with peers. +The table below summarizes the interaction of the standard reactors with the `Switch`: + +| `Switch` API method | consensus | block sync | state sync | mempool | evidence | PEX | +|--------------------------------------------|-----------|------------|------------|---------|-----------|-------| +| `Peers() IPeerSet` | x | x | | | | x | +| `NumPeers() (int, int, int)` | | x | | | | x | +| `Broadcast(Envelope) chan bool` | x | x | x | | | | +| `MarkPeerAsGood(Peer)` | x | | | | | | +| `StopPeerForError(Peer, interface{})` | x | x | x | x | x | x | +| `StopPeerGracefully(Peer)` | | | | | | x | +| `Reactor(string) Reactor` | | x | | | | | + +The above list is not exhaustive as it does not include all the `Switch` methods +invoked by the PEX reactor, a special component that should be considered part +of the p2p layer. This document does not cover the operation of the PEX reactor +as a connection manager. + +### Peers State + +The first two methods in the switch API allow reactors to query the state of +the p2p layer: the set of connected peers. + + func (sw *Switch) Peers() IPeerSet + +The `Peers()` method returns the current set of connected peers. +The returned `IPeerSet` is an immutable concurrency-safe copy of this set. +Observe that the `Peer` handlers returned by this method were previously +[added to the reactor][reactor-addpeer] via the `InitPeer(Peer)` method, +but not yet removed via the `RemovePeer(Peer)` method. +Thus, a priori, reactors should already have this information. + + func (sw *Switch) NumPeers() (outbound, inbound, dialing int) + +The `NumPeers()` method returns the current number of connected peers, +distinguished between `outbound` and `inbound` peers. +An `outbound` peer is a peer the node has dialed to, while an `inbound` peer is +a peer the node has accepted a connection from. +The third field `dialing` reports the number of peers to which the node is +currently attempting to connect, so not (yet) connected peers. + +> **Note** +> +> The third field returned by `NumPeers()`, the number of peers in `dialing` +> state, is not an information that should regard the protocol layer. +> In fact, with the exception of the PEX reactor, which can be considered part +> of the p2p layer implementation, no standard reactor actually uses this +> information, that could be removed when this interface is refactored. + +### Broadcast + +The switch provides, mostly for historical or retro-compatibility reasons, +a method for sending a message to all connected peers: + + func (sw *Switch) Broadcast(e Envelope) chan bool + +The `Broadcast()` method is not blocking and returns a channel of booleans. +For every connected `Peer`, it starts a background thread for sending the +message to that peer, using the `Peer.Send()` method +(which is blocking, as detailed in [Send Methods](#send-methods)). +The result of each unicast send operation (success or failure) is added to the +returned channel, which is closed when all operations are completed. + +> **Note** +> +> - The current _implementation_ of the `Switch.Broadcast(Envelope)` method is +> not efficient, as the marshalling of the provided message is performed as +> part of the `Peer.Send(Envelope)` helper method, that is, once per +> connected peer. +> - The return value of the broadcast method is not considered by any of the +> standard reactors that employ the method. One of the reasons is that is is +> not possible to associate each of the boolean outputs added to the +> returned channel to a peer. + +### Vetting Peers + +The p2p layer relies on the registered reactors to gauge the _quality_ of peers. +The following method can be invoked by a reactor to inform the p2p layer that a +peer has presented a "good" behaviour. +This information is registered in the node's address book and influences the +operation of the Peer Exchange (PEX) protocol, as node discovery adopts a bias +towards "good" peers: + + func (sw *Switch) MarkPeerAsGood(peer Peer) + +At the moment, it is up to the consensus reactor to vet a peer. +In the current logic, a peer is marked as good whenever the consensus protocol +collects a multiple of `votesToContributeToBecomeGoodPeer = 10000` useful votes +or `blocksToContributeToBecomeGoodPeer = 10000` useful block parts from that peer. +By "useful", the consensus implementation considers messages that are valid and +that are received by the node when the node is expected for such information, +which excludes duplicated or late received messages. + +> **Note** +> +> The switch doesn't currently provide a method to mark a peer as a bad peer. +> In fact, the peer quality management is really implemented in the current +> version of the p2p layer. +> This topic is being discussed in the [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/peer-quality.md). + +### Stopping Peers + +Reactors can instruct the p2p layer to disconnect from a peer. +Using the p2p layer's nomenclature, the reactor requests a peer to be stopped. +The peer's send and receive routines are in fact stopped, interrupting the +communication with the peer. +The `Peer` is then [removed from every registered reactor][reactor-removepeer], +using the `RemovePeer(Peer)` method, and from the set of connected peers. + + func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) + +All the standard reactors employ the above method for disconnecting from a peer +in case of errors. +These are errors that occur when processing a message received from a `Peer`. +The produced `error` is provided to the method as the `reason`. + +The `StopPeerForError()` method has an important *caveat*: if the peer to be +stopped is configured as a _persistent peer_, the switch will attempt +reconnecting to that same peer. +While this behaviour makes sense when the method is invoked by other components +of the p2p layer (e.g., in the case of communication errors), it does not make +sense when it is invoked by a reactor. + +> **Note** +> +> A more comprehensive discussion regarding this topic can be found on the +> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/stop-peer.md). + + func (sw *Switch) StopPeerGracefully(peer Peer) + +The second method instructs the switch to disconnect from a peer for no +particular reason. +This method is only adopted by the PEX reactor of a node operating in _seed mode_, +as seed nodes disconnect from a peer after exchanging peer addresses with it. + +### Reactors Table + +The switch keeps track of all registered reactors, indexed by unique reactor names. +A reactor can therefore use the switch to access another `Reactor` from its `name`: + + func (sw *Switch) Reactor(name string) Reactor + +This method is currently only used by the Block Sync reactor to access the +Consensus reactor implementation, from which it uses the exported +`SwitchToConsensus()` method. +While available, this inter-reactor interaction approach is discouraged and +should be avoided, as it violates the assumption that reactors are independent. + + +## `Peer` API + +The [`Peer`][peer-interface] interface represents a connected peer. +A `Peer` instance encapsulates a multiplex connection that implements the +actual communication (sending and receiving messages) with a peer. +When a connection is established with a peer, the `Switch` provides the +corresponding `Peer` instance to all registered reactors. +From this point, reactors can use the methods of the new `Peer` instance. + +The table below summarizes the interaction of the standard reactors with +connected peers, with the `Peer` methods used by them: + +| `Peer` API method | consensus | block sync | state sync | mempool | evidence | PEX | +|--------------------------------------------|-----------|------------|------------|---------|-----------|-------| +| `ID() ID` | x | x | x | x | x | x | +| `IsRunning() bool` | x | | | x | x | | +| `Quit() <-chan struct{}` | | | | x | x | | +| `Get(string) interface{}` | x | | | x | x | | +| `Set(string, interface{})` | x | | | | | | +| `Send(Envelope) bool` | x | x | x | x | x | x | +| `TrySend(Envelope) bool` | x | x | | | | | + +The above list is not exhaustive as it does not include all the `Peer` methods +invoked by the PEX reactor, a special component that should be considered part +of the p2p layer. This document does not cover the operation of the PEX reactor +as a connection manager. + +### Identification + +Nodes in the p2p network are configured with a unique cryptographic key pair. +The public part of this key pair is verified when establishing a connection +with the peer, as part of the authentication handshake, and constitutes the +peer's `ID`: + + func (p Peer) ID() p2p.ID + +Observe that each time the node connects to a peer (e.g., after disconnecting +from it), a new (distinct) `Peer` handler is provided to the reactors via +`InitPeer(Peer)` method. +In fact, the `Peer` handler is associated to a _connection_ with a peer, not to +the actual _node_ in the network. +To keep track of actual peers, the unique peer `p2p.ID` provided by the above +method should be employed. + +### Peer state + +The switch starts the peer's send and receive routines before adding the peer +to every registered reactor using the `AddPeer(Peer)` method. +The reactors then usually start routines to interact with the new connected +peer using the received `Peer` handler. +For these routines it is useful to check whether the peer is still connected +and its send and receive routines are still running: + + func (p Peer) IsRunning() bool + func (p Peer) Quit() <-chan struct{} + +The above two methods provide the same information about the state of a `Peer` +instance in two different ways. +Both of them are defined in the [`Service`][service-interface] interface. +The `IsRunning()` method is synchronous and returns whether the peer has been +started and has not been stopped. +The `Quit()` method returns a channel that is closed when the peer is stopped; +it is an asynchronous state query. + +### Key-value store + +Each `Peer` instance provides a synchronized key-value store that allows +sharing peer-specific state between reactors: + + + func (p Peer) Get(key string) interface{} + func (p Peer) Set(key string, data interface{}) + +This key-value store can be seen as an asynchronous mechanism to exchange the +state of a peer between reactors. +In the current use-case of this mechanism, the Consensus reactor populates the +key-value store with a `PeerState` instance for each connected peer. +The Consensus reactor routines interacting with a peer read and update the +shared peer state. +The Evidence and Mempool reactors, in their turn, periodically query the +key-value store of each peer for retrieving, in particular, the last height +reported by the peer. +This information, produced by the Consensus reactor, influences the interaction +of these two reactors with their peers. + +> **Note** +> +> More details of how this key-value store is used to share state between reactors can be found on the +> [knowledge-base repository](https://github.com/cometbft/knowledge-base/blob/main/p2p/reactors/peer-kvstore.md). + +### Send methods + +Finally, a `Peer` instance allows a reactor to send messages to companion +reactors running at that peer. +This is ultimately the goal of the switch when it provides `Peer` instances to +the registered reactors. +There are two methods for sending messages: + + func (p Peer) Send(e Envelope) bool + func (p Peer) TrySend(e Envelope) bool + +The two message-sending methods receive an `Envelope`, whose content should be +set as follows: + +- `ChannelID`: the channel the message should be sent through, which defines + the reactor that will process the message; +- `Src`: this field represents the source of an incoming message, which is + irrelevant for outgoing messages; +- `Message`: the actual message's payload, which is marshalled using protocol buffers. + +The two message-sending methods attempt to add the message (`e.Payload`) to the +send queue of the peer's destination channel (`e.ChannelID`). +There is a send queue for each registered channel supported by the peer, and +each send queue has a capacity. +The capacity of the send queues for each channel are [configured][reactor-channels] +by reactors via the corresponding `ChannelDescriptor`. + +The two message-sending methods return whether it was possible to enqueue +the marshalled message to the channel's send queue. +The most common reason for these methods to return `false` is the channel's +send queue being full. +Further reasons for returning `false` are: the peer being stopped, providing a +non-registered channel ID, or errors when marshalling the message's payload. + +The difference between the two message-sending methods is _when_ they return `false`. +The `Send()` method is a _blocking_ method, it returns `false` if the message +could not be enqueued, because the channel's send queue is still full, after a +10-second _timeout_. +The `TrySend()` method is a _non-blocking_ method, it _immediately_ returns +`false` when the channel's send queue is full. + +[peer-interface]: ../../../p2p/peer.go +[service-interface]: ../../../libs/service/service.go +[switch-type]: ../../../p2p/switch.go + +[reactor-interface]: ../../../p2p/base_reactor.go +[reactor-registration]: ./reactor.md#registration +[reactor-channels]: ./reactor.md#registration +[reactor-addpeer]: ./reactor.md#peer-management +[reactor-removepeer]: ./reactor.md#stop-peer diff --git a/spec/p2p/reactor-api/reactor.md b/spec/p2p/reactor-api/reactor.md new file mode 100644 index 0000000000..9d85e7ccd0 --- /dev/null +++ b/spec/p2p/reactor-api/reactor.md @@ -0,0 +1,230 @@ +# Reactor API + +A component has to implement the [`p2p.Reactor` interface][reactor-interface] +in order to use communication services provided by the p2p layer. +This interface is currently the main source of documentation for a reactor. + +The goal of this document is to specify the behaviour of the p2p communication +layer when interacting with a reactor. +So while the [`Reactor interface`][reactor-interface] declares the methods +invoked and determines what the p2p layer expects from a reactor, +this documentation focuses on the **temporal behaviour** that a reactor implementation +should expect from the p2p layer. (That is, in which orders the functions may be called) + +This specification is accompanied by the [`reactor.qnt`](./reactor.qnt) file, +a more comprehensive model of the reactor's operation written in +[Quint][quint-repo], an executable specification language. +The methods declared in the [`Reactor`][reactor-interface] interface are +modeled in Quint, in the form of `pure def` methods, providing some examples of +how they should be implemented. +The behaviour of the p2p layer when interacting with a reactor, by invoking the +interface methods, is modeled in the form of state transitions, or `action`s in +the Quint nomenclature. + +## Overview + +The following _grammar_ is a simplified representation of the expected sequence of calls +from the p2p layer to a reactor. +Note that the grammar represents events referring to a _single reactor_, while +the p2p layer supports the execution of multiple reactors. +For a more detailed representation of the sequence of calls from the p2p layer +to reactors, please refer to the companion Quint model. + +While useful to provide an overview of the operation of a reactor, +grammars have some limitations in terms of the behaviour they can express. +For instance, the following grammar only represents the management of _a single peer_, +namely of a peer with a given ID which can connect, disconnect, and reconnect +multiple times to the node. +The p2p layer and every reactor should be able to handle multiple distinct peers in parallel. +This means that multiple occurrences of non-terminal `peer-management` of the +grammar below can "run" independently and in parallel, each one referring and +producing events associated to a different peer: + +```abnf +start = registration on-start *peer-management on-stop +registration = get-channels set-switch + +; Refers to a single peer, a reactor must support multiple concurrent peers +peer-management = init-peer start-peer stop-peer +start-peer = [*receive] (connected-peer / start-error) +connected-peer = add-peer *receive +stop-peer = [peer-error] remove-peer + +; Service interface +on-start = %s"OnStart()" +on-stop = %s"OnStop()" +; Reactor interface +get-channels = %s"GetChannels()" +set-switch = %s"SetSwitch(*Switch)" +init-peer = %s"InitPeer(Peer)" +add-peer = %s"AddPeer(Peer)" +remove-peer = %s"RemovePeer(Peer, reason)" +receive = %s"Receive(Envelope)" + +; Errors, for reference +start-error = %s"log(Error starting peer)" +peer-error = %s"log(Stopping peer for error)" +``` + +The grammar is written in case-sensitive Augmented Backus–Naur form (ABNF, +specified in [IETF RFC 7405](https://datatracker.ietf.org/doc/html/rfc7405)). +It is inspired on the grammar produced to specify the interaction of CometBFT +with an ABCI++ application, available [here](../../abci/abci%2B%2B_comet_expected_behavior.md). + +## Registration + +To become a reactor, a component has first to implement the +[`Reactor`][reactor-interface] interface, +then to register the implementation with the p2p layer, using the +`Switch.AddReactor(name string, reactor Reactor)` method, +with a global unique `name` for the reactor. + +The registration must happen before the node, in general, and the p2p layer, +in particular, are started. +In other words, there is no support for registering a reactor on a running node: +reactors must be registered as part of the setup of a node. + +```abnf +registration = get-channels set-switch +``` + +The p2p layer retrieves from the reactor a list of channels the reactor is +responsible for, using the `GetChannels()` method. +The reactor implementation should thereafter expect the delivery of every +message received by the p2p layer in the informed channels. + +The second method `SetSwitch(Switch)` concludes the handshake between the +reactor and the p2p layer. +The `Switch` is the main component of the p2p layer, being responsible for +establishing connections with peers and routing messages. +The `Switch` instance provides a number of methods for all registered reactors, +documented in the companion [API for Reactors](./p2p-api.md#switch-api) document. + +## Service interface + +A reactor must implement the [`Service`](../../../libs/service/service.go) interface, +in particular, a startup `OnStart()` and a shutdown `OnStop()` methods: + +```abnf +start = registration on-start *peer-management on-stop +``` + +As part of the startup of a node, all registered reactors are started by the p2p layer. +And when the node is shut down, all registered reactors are stopped by the p2p layer. +Observe that the `Service` interface specification establishes that a service +can be started and stopped only once. +So before being started or once stopped by the p2p layer, the reactor should +not expect any interaction. + +## Peer management + +The core of a reactor's operation is the interaction with peers or, more +precisely, with companion reactors operating on the same channels in peers connected to the node. +The grammar extract below represents the interaction of the reactor with a +single peer: + +```abnf +; Refers to a single peer, a reactor must support multiple concurrent peers +peer-management = init-peer start-peer stop-peer +``` + +The p2p layer informs all registered reactors when it establishes a connection +with a `Peer`, using the `InitPeer(Peer)` method. +When this method is invoked, the `Peer` has not yet been started, namely the +routines for sending messages to and receiving messages from the peer are not running. +This method should be used to initialize state or data related to the new +peer, but not to interact with it. + +The next step is to start the communication routines with the new `Peer`. +As detailed in the following, this procedure may or may not succeed. +In any case, the peer is eventually stopped, which concludes the management of +that `Peer` instance. + +## Start peer + +Once `InitPeer(Peer)` is invoked for every registered reactor, the p2p layer starts the peer's +communication routines and adds the `Peer` to the set of connected peers. +If both steps are concluded without errors, the reactor's `AddPeer(Peer)` is invoked: + +```abnf +start-peer = [*receive] (connected-peer / start-error) +connected-peer = add-peer *receive +``` + +In case of errors, a message is logged informing that the p2p layer failed to start the peer. +This is not a common scenario and it is only expected to happen when +interacting with a misbehaving or slow peer. A practical example is reported on this +[issue](https://github.com/tendermint/tendermint/pull/9500). + +It is up to the reactor to define how to process the `AddPeer(Peer)` event. +The typical behavior is to start routines that, given some conditions or events, +send messages to the added peer, using the provided `Peer` instance. +The companion [API for Reactors](./p2p-api.md#peer-api) documents the methods +provided by `Peer` instances, available from when they are added to the reactors. + +## Stop Peer + +The p2p layer informs all registered reactors when it disconnects from a `Peer`, +using the `RemovePeer(Peer, reason)` method: + +```abnf +stop-peer = [peer-error] remove-peer +``` + +This method is invoked after the p2p layer has stopped peer's send and receive routines. +Depending of the `reason` for which the peer was stopped, different log +messages can be produced. +After removing a peer from all reactors, the `Peer` instance is also removed from +the set of connected peers. +This enables the same peer to reconnect and `InitPeer(Peer)` to be invoked for +the new connection. + +From the removal of a `Peer` , the reactor should not receive any further message +from the peer and must not try sending messages to the removed peer. +This usually means stopping the routines that were started by the companion +`Add(Peer)` method. + +## Receive messages + +The main duty of a reactor is to handle incoming messages on the channels it +has registered with the p2p layer. + +The _pre-condition_ for receiving a message from a `Peer` is that the p2p layer +has previously invoked `InitPeer(Peer)`. +This means that the reactor must be able to receive a message from a `Peer` +_before_ `AddPeer(Peer)` is invoked. +This happens because the peer's send and receive routines are started before, +and should be already running when the p2p layer adds the peer to every +registered reactor. + +```abnf +start-peer = [*receive] (connected-peer / start-error) +connected-peer = add-peer *receive +``` + +The most common scenario, however, is to start receiving messages from a peer +after `AddPeer(Peer)` is invoked. +An arbitrary number of messages can be received, until the peer is stopped and +`RemovePeer(Peer)` is invoked. + +When a message is received from a connected peer on any of the channels +registered by the reactor, the p2p layer will deliver the message to the +reactor via the `Receive(Envelope)` method. +The message is packed into an `Envelope` that contains: + +- `ChannelID`: the channel the message belongs to +- `Src`: the source `Peer` handler, from which the message was received +- `Message`: the actual message's payload, unmarshalled using protocol buffers + +Two important observations regarding the implementation of the `Receive` method: + +1. Concurrency: the implementation should consider concurrent invocations of + the `Receive` method carrying messages from different peers, as the + interaction with different peers is independent and messages can be received in parallel. +1. Non-blocking: the implementation of the `Receive` method is expected not to block, + as it is invoked directly by the receive routines. + In other words, while `Receive` does not return, other messages from the + same sender are not delivered to any reactor. + +[reactor-interface]: ../../../p2p/base_reactor.go +[quint-repo]: https://github.com/informalsystems/quint diff --git a/spec/p2p/reactor-api/reactor.qnt b/spec/p2p/reactor-api/reactor.qnt new file mode 100644 index 0000000000..002c57023a --- /dev/null +++ b/spec/p2p/reactor-api/reactor.qnt @@ -0,0 +1,276 @@ +// -*- mode: Bluespec; -*- +/* + * Reactor is responsible for handling incoming messages on one or more + * Channel. Switch calls GetChannels when reactor is added to it. When a new + * peer joins our node, InitPeer and AddPeer are called. RemovePeer is called + * when the peer is stopped. Receive is called when a message is received on a + * channel associated with this reactor. + */ +// Code: https://github.com/cometbft/cometbft/blob/main/p2p/base_reactor.go +module reactor { + + // Unique ID of a node. + type NodeID = str + + /* + * Peer is an interface representing a peer connected on a reactor. + */ + type Peer = { + ID: NodeID, + + // Other fields can be added to represent the p2p operation. + } + + // Byte ID used by channels, must be globally unique. + type Byte = str + + // Channel configuration. + type ChannelDescriptor = { + ID: Byte, + Priority: int, + } + + /* + * Envelope contains a message with sender routing info. + */ + type Envelope = { + Src: Peer, // Sender + Message: str, // Payload + ChannelID: Byte, + } + + // A Routine is used to interact with an active Peer. + type Routine = { + name: str, + peer: Peer, + } + + type ReactorState = { + // Peers that have been initialized but not yet removed. + // The reactor should expect receiving messages from them. + peers: Set[NodeID], + + // The reactor runs multiple routines. + routines: Set[Routine], + + // Values: init -> registered -> running -> stopped + state: str, + + // Name with which the reactor was registered. + name: str, + + // Channels the reactor is responsible for. + channels: Set[ChannelDescriptor], + } + + // Produces a new, uninitialized reactor. + pure def NewReactor(): ReactorState = { + { + peers: Set(), + routines: Set(), + state: "init", + name: "", + channels: Set(), + } + } + + // Pure definitions below represent the `p2p.Reactor` interface methods: + + /* + * GetChannels returns the list of MConnection.ChannelDescriptor. Make sure + * that each ID is unique across all the reactors added to the switch. + */ + pure def GetChannels(s: ReactorState): Set[ChannelDescriptor] = { + s.channels // Static list, configured at initialization. + } + + /* + * SetSwitch allows setting a switch. + */ + pure def SetSwitch(s: ReactorState, switch: bool): ReactorState = { + s.with("state", "registered") + } + + /* + * Start the service. + * If it's already started or stopped, will return an error. + */ + pure def OnStart(s: ReactorState): ReactorState = { + // Startup procedures should come here. + s.with("state", "running") + } + + /* + * Stop the service. + * If it's already stopped, will return an error. + */ + pure def OnStop(s: ReactorState): ReactorState = { + // Shutdown procedures should come here. + s.with("state", "stopped") + } + + /* + * InitPeer is called by the switch before the peer is started. Use it to + * initialize data for the peer (e.g. peer state). + */ + pure def InitPeer(s: ReactorState, peer: Peer): (ReactorState, Peer) = { + // This method can update the received peer, which is returned. + val updatedPeer = peer + (s.with("peers", s.peers.union(Set(peer.ID))), updatedPeer) + } + + /* + * AddPeer is called by the switch after the peer is added and successfully + * started. Use it to start goroutines communicating with the peer. + */ + pure def AddPeer(s: ReactorState, peer: Peer): ReactorState = { + // This method can be used to start routines to handle the peer. + // Below an example of an arbitrary 'ioRoutine' routine. + val startedRoutines = Set( {name: "ioRoutine", peer: peer} ) + s.with("routines", s.routines.union(startedRoutines)) + } + + /* + * RemovePeer is called by the switch when the peer is stopped (due to error + * or other reason). + */ + pure def RemovePeer(s: ReactorState, peer: Peer, reason: str): ReactorState = { + // This method should stop routines created by `AddPeer(Peer)`. + val stoppedRoutines = s.routines.filter(r => r.peer.ID == peer.ID) + s.with("peers", s.peers.exclude(Set(peer.ID))) + .with("routines", s.routines.exclude(stoppedRoutines)) + } + + /* + * Receive is called by the switch when an envelope is received from any connected + * peer on any of the channels registered by the reactor. + */ + pure def Receive(s: ReactorState, e: Envelope): ReactorState = { + // This method should process the message payload: e.Message. + s + } + + // Global state + + // Reactors are uniquely identified by their names. + var reactors: str -> ReactorState + + // Reactor (name) assigned to each channel ID. + var reactorsByCh: Byte -> str + + // Helper action to (only) update the state of a given reactor. + action updateReactorTo(reactor: ReactorState): bool = all { + reactors' = reactors.set(reactor.name, reactor), + reactorsByCh' = reactorsByCh + } + + // State transitions performed by the p2p layer, invoking `p2p.Reactor` methods: + + // Code: Switch.AddReactor(name string, reactor Reactor) + action register(name: str, reactor: ReactorState): bool = all { + reactor.state == "init", + // Assign the reactor as responsible for its channel IDs, which + // should not be already assigned to another reactor. + val chIDs = reactor.GetChannels().map(c => c.ID) + all { + size(chIDs.intersect(reactorsByCh.keys())) == 0, + reactorsByCh' = reactorsByCh.keys().union(chIDs). + mapBy(id => if (id.in(chIDs)) name + else reactorsByCh.get(id)), + }, + // Register the reactor by its name, which must be unique. + not(name.in(reactors.keys())), + reactors' = reactors.put(name, + reactor.SetSwitch(true).with("name", name)) + } + + // Code: Switch.OnStart() + action start(reactor: ReactorState): bool = all { + reactor.state == "registered", + updateReactorTo(reactor.OnStart()) + } + + // Code: Switch.addPeer(p Peer): preamble + action initPeer(reactor: ReactorState, peer: Peer): bool = all { + reactor.state == "running", + not(peer.ID.in(reactor.peers)), + updateReactorTo(reactor.InitPeer(peer)._1) + } + + // Code: Switch.addPeer(p Peer): conclusion + action addPeer(reactor: ReactorState, peer: Peer): bool = all { + reactor.state == "running", + peer.ID.in(reactor.peers), // InitPeer(peer) and not RemovePeer(peer) + reactor.routines.filter(r => r.peer.ID == peer.ID).size() == 0, + updateReactorTo(reactor.AddPeer(peer)) + } + + // Code: Switch.stopAndRemovePeer(peer Peer, reason interface{}) + action removePeer(reactor: ReactorState, peer: Peer, reason: str): bool = all { + reactor.state == "running", + peer.ID.in(reactor.peers), // InitPeer(peer) and not RemovePeer(peer) + // Routines might not be started, namely: not AddPeer(peer) + // Routines could also be already stopped if Peer has erroed. + updateReactorTo(reactor.RemovePeer(peer, reason)) + } + + // Code: Peer type, onReceive := func(chID byte, msgBytes []byte) + action receive(reactor: ReactorState, e: Envelope): bool = all { + reactor.state == "running", + // The message's sender is an active peer + e.Src.ID.in(reactor.peers), + // Reactor is assigned to the message's channel ID + e.ChannelID.in(reactorsByCh.keys()), + reactorsByCh.get(e.ChannelID) == reactor.name, + reactor.GetChannels().exists(c => c.ID == e.ChannelID), + updateReactorTo(reactor.Receive(e)) + } + + // Code: Switch.OnStop() + action stop(reactor: ReactorState): bool = all { + reactor.state == "running", + // Either no peer was added or all peers were removed + reactor.peers.size() == 0, + updateReactorTo(reactor.OnStop()) + } + + // Simulation support + + action init = all { + reactors' = Map(), + reactorsByCh' = Map(), + } + + // Modelled reactor configuration + pure val reactorName = "myReactor" + pure val reactorChannels = Set({ID: "3", Priority: 1}, {ID: "7", Priority: 2}) + + // For retro-compatibility: the state of the modelled reactor + def state(): ReactorState = { + reactors.get(reactorName) + } + + pure val samplePeers = Set({ID: "p1"}, {ID: "p3"}) + pure val sampleChIDs = Set("1", "3", "7") // ChannelID 1 not registered + pure val sampleMsgs = Set("ping", "pong") + + action step = any { + register(reactorName, NewReactor.with("channels", reactorChannels)), + val reactor = reactors.get(reactorName) + any { + reactor.start(), + reactor.stop(), + nondet peer = oneOf(samplePeers) + any { + // Peer-specific actions + reactor.initPeer(peer), + reactor.addPeer(peer), + reactor.removePeer(peer, "no reason"), + reactor.receive({Src: peer, + ChannelID: oneOf(sampleChIDs), + Message: oneOf(sampleMsgs)}), + } + } + } + +} diff --git a/spec/p2p/readme.md b/spec/p2p/readme.md deleted file mode 100644 index 9b39f3fc12..0000000000 --- a/spec/p2p/readme.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -order: 1 -parent: - title: P2P - order: 6 ---- - diff --git a/state/execution.go b/state/execution.go index 91f1719ca5..4d312a99ea 100644 --- a/state/execution.go +++ b/state/execution.go @@ -224,7 +224,7 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, 0, err } if len(validatorUpdates) > 0 { - blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) + blockExec.logger.Info("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) blockExec.metrics.ValidatorSetUpdates.Add(1) } if abciResponses.EndBlock.ConsensusParamUpdates != nil { diff --git a/state/execution_test.go b/state/execution_test.go index 5ae3c3c872..076e4e0599 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -776,6 +776,59 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { mp.AssertExpectations(t) } +// TestPrepareProposalCountSerializationOverhead tests that the block creation logic returns +// an error if the ResponsePrepareProposal returned from the application is at the limit of +// its size and will go beyond the limit upon serialization. +func TestPrepareProposalCountSerializationOverhead(t *testing.T) { + const height = 2 + + state, stateDB, privVals := makeState(1, height) + // limit max block size + var bytesPerTx int64 = 4 + const nValidators = 1 + nonDataSize := 5000 - types.MaxDataBytes(5000, 0, nValidators) + state.ConsensusParams.Block.MaxBytes = bytesPerTx*1024 + nonDataSize + maxDataBytes := types.MaxDataBytes(state.ConsensusParams.Block.MaxBytes, 0, nValidators) + + stateStore := sm.NewStore(stateDB, sm.StoreOptions{ + DiscardABCIResponses: false, + }) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := test.MakeNTxs(height, maxDataBytes/bytesPerTx) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs)) + + app := &abcimocks.Application{} + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(abci.ResponsePrepareProposal{ + Txs: types.Txs(txs).ToSliceOfBytes(), + }, nil) + + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) + err := proxyApp.Start() + require.NoError(t, err) + defer proxyApp.Stop() //nolint:errcheck // ignore for tests + + blockExec := sm.NewBlockExecutor( + stateStore, + log.NewNopLogger(), + proxyApp.Consensus(), + mp, + evpool, + ) + pa, _ := state.Validators.GetByIndex(0) + commit, err := makeValidCommit(height, types.BlockID{}, state.Validators, privVals) + require.NoError(t, err) + block, err := blockExec.CreateProposalBlock(height, state, commit, pa) + require.Nil(t, block) + require.ErrorContains(t, err, "transaction data size exceeds maximum") + + mp.AssertExpectations(t) +} + // TestPrepareProposalErrorOnPrepareProposalError tests when the client returns an error // upon calling PrepareProposal on it. func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) { diff --git a/state/export_test.go b/state/export_test.go index a15414c926..941cf32f3b 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -45,3 +45,11 @@ func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *type stateStore := dbStore{db, StoreOptions{DiscardABCIResponses: false}} return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet) } + +func Int64ToBytes(val int64) []byte { + return int64ToBytes(val) +} + +func Int64FromBytes(val []byte) int64 { + return int64FromBytes(val) +} diff --git a/state/indexer/query_range.go b/state/indexer/query_range.go index b4af7c7169..15e523c2a5 100644 --- a/state/indexer/query_range.go +++ b/state/indexer/query_range.go @@ -87,14 +87,13 @@ func (qr QueryRange) UpperBoundValue() interface{} { func LookForRangesWithHeight(conditions []query.Condition) (queryRange QueryRanges, indexes []int, heightRange QueryRange) { queryRange = make(QueryRanges) for i, c := range conditions { - heightKey := false if IsRangeOperation(c.Op) { + heightKey := c.CompositeKey == types.BlockHeightKey || c.CompositeKey == types.TxHeightKey r, ok := queryRange[c.CompositeKey] if !ok { r = QueryRange{Key: c.CompositeKey} if c.CompositeKey == types.BlockHeightKey || c.CompositeKey == types.TxHeightKey { heightRange = QueryRange{Key: c.CompositeKey} - heightKey = true } } diff --git a/state/store.go b/state/store.go index 61bdc160da..3df10f13ef 100644 --- a/state/store.go +++ b/state/store.go @@ -1,6 +1,7 @@ package state import ( + "encoding/binary" "errors" "fmt" @@ -41,6 +42,7 @@ func calcABCIResponsesKey(height int64) []byte { //---------------------- var lastABCIResponseKey = []byte("lastABCIResponseKey") +var offlineStateSyncHeight = []byte("offlineStateSyncHeightKey") //go:generate ../scripts/mockery_generate.sh Store @@ -94,6 +96,14 @@ type StoreOptions struct { var _ Store = (*dbStore)(nil) +func IsEmpty(store dbStore) (bool, error) { + state, err := store.Load() + if err != nil { + return false, err + } + return state.IsEmpty(), nil +} + // NewStore creates the dbStore of the state pkg. func NewStore(db dbm.DB, options StoreOptions) Store { return dbStore{db, options} @@ -658,6 +668,61 @@ func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, par return nil } +/* Custom struct to handle offline state sync + Contains reference to a store so that it can access the DB +*/ + +type BootstrapStore struct { + dbStore +} + +func NewBootstrapStore(db dbm.DB, options StoreOptions) BootstrapStore { + return BootstrapStore{ + dbStore{ + db: db, + StoreOptions: options, + }, + } +} + +func (store BootstrapStore) SetOfflineStateSyncHeight(height int64) error { + err := store.db.SetSync(offlineStateSyncHeight, int64ToBytes(height)) + if err != nil { + return err + } + return nil + +} + +// Gets the height at which the store is bootstrapped after out of band statesync +func (store BootstrapStore) GetOfflineStateSyncHeight() (int64, error) { + + buf, err := store.db.Get(offlineStateSyncHeight) + if err != nil { + return 0, err + } + + if len(buf) == 0 { + return 0, errors.New("value empty") + } + + height := int64FromBytes(buf) + if height < 0 { + return 0, errors.New("invalid value for height: height cannot be negative") + } + return height, nil +} + func (store dbStore) Close() error { return store.db.Close() } +func int64FromBytes(bz []byte) int64 { + v, _ := binary.Varint(bz) + return v +} + +func int64ToBytes(i int64) []byte { + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutVarint(buf, i) + return buf[:n] +} diff --git a/state/store_test.go b/state/store_test.go index 1c4fcbc7ce..a8f38be2df 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -12,9 +12,7 @@ import ( abci "github.com/cometbft/cometbft/abci/types" cfg "github.com/cometbft/cometbft/config" - "github.com/cometbft/cometbft/crypto" "github.com/cometbft/cometbft/crypto/ed25519" - cmtrand "github.com/cometbft/cometbft/libs/rand" cmtstate "github.com/cometbft/cometbft/proto/tendermint/state" sm "github.com/cometbft/cometbft/state" "github.com/cometbft/cometbft/types" @@ -117,7 +115,7 @@ func TestPruneStates(t *testing.T) { // Generate a bunch of state data. Validators change for heights ending with 3, and // parameters when ending with 5. - validator := &types.Validator{Address: cmtrand.Bytes(crypto.AddressSize), VotingPower: 100, PubKey: pk} + validator := &types.Validator{Address: pk.Address(), VotingPower: 100, PubKey: pk} validatorSet := &types.ValidatorSet{ Validators: []*types.Validator{validator}, Proposer: validator, @@ -303,3 +301,8 @@ func TestLastABCIResponses(t *testing.T) { assert.Equal(t, sm.ErrABCIResponsesNotPersisted, err) }) } +func TestIntConversion(t *testing.T) { + x := int64(10) + b := sm.Int64ToBytes(x) + require.Equal(t, x, sm.Int64FromBytes(b)) +} diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go index 4b0cfb90ce..b5d1eb52eb 100644 --- a/state/txindex/indexer_service.go +++ b/state/txindex/indexer_service.go @@ -94,7 +94,7 @@ func (is *IndexerService) OnStart() error { return } } else { - is.Logger.Info("indexed block exents", "height", height) + is.Logger.Info("indexed block events", "height", height) } if err = is.txIdxr.AddBatch(batch); err != nil { diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 34ae6b11c8..e8246ece04 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -322,6 +322,89 @@ func TestTxSearchEventMatch(t *testing.T) { }) } } + +func TestTxSearchEventMatchByHeight(t *testing.T) { + + indexer := NewTxIndex(db.NewMemDB()) + + txResult := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}, {Key: "owner", Value: "Ana", Index: true}}}, + }) + + err := indexer.Index(txResult) + require.NoError(t, err) + + txResult10 := txResultWithEvents([]abci.Event{ + {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}, {Key: "owner", Value: "/Ivan/.test", Index: true}}}, + }) + txResult10.Tx = types.Tx("HELLO WORLD 10") + txResult10.Height = 10 + + err = indexer.Index(txResult10) + require.NoError(t, err) + + testCases := map[string]struct { + q string + resultsLength int + }{ + "Return all events from a height 1": { + q: "tx.height = 1", + resultsLength: 1, + }, + "Return all events from a height 10": { + q: "tx.height = 10", + resultsLength: 1, + }, + "Return all events from a height 5": { + q: "tx.height = 5", + resultsLength: 0, + }, + "Return all events from a height in [2; 5]": { + q: "tx.height >= 2 AND tx.height <= 5", + resultsLength: 0, + }, + "Return all events from a height in [1; 5]": { + q: "tx.height >= 1 AND tx.height <= 5", + resultsLength: 1, + }, + "Return all events from a height in [1; 10]": { + q: "tx.height >= 1 AND tx.height <= 10", + resultsLength: 2, + }, + "Return all events from a height in [1; 5] by account.number": { + q: "tx.height >= 1 AND tx.height <= 5 AND account.number=1", + resultsLength: 1, + }, + "Return all events from a height in [1; 10] by account.number 2": { + q: "tx.height >= 1 AND tx.height <= 10 AND account.number=1", + resultsLength: 2, + }, + } + + ctx := context.Background() + + for _, tc := range testCases { + tc := tc + t.Run(tc.q, func(t *testing.T) { + results, err := indexer.Search(ctx, query.MustParse(tc.q)) + assert.NoError(t, err) + + assert.Len(t, results, tc.resultsLength) + if tc.resultsLength > 0 { + for _, txr := range results { + if txr.Height == 1 { + assert.True(t, proto.Equal(txResult, txr)) + } else if txr.Height == 10 { + assert.True(t, proto.Equal(txResult10, txr)) + } else { + assert.True(t, false) + } + } + } + }) + } +} + func TestTxSearchWithCancelation(t *testing.T) { indexer := NewTxIndex(db.NewMemDB()) diff --git a/store/store.go b/store/store.go index 125e11389e..19583496d0 100644 --- a/store/store.go +++ b/store/store.go @@ -54,6 +54,12 @@ func NewBlockStore(db dbm.DB) *BlockStore { } } +func (bs *BlockStore) IsEmpty() bool { + bs.mtx.RLock() + defer bs.mtx.RUnlock() + return bs.base == bs.height && bs.base == 0 +} + // Base returns the first known contiguous block height, or 0 for empty block stores. func (bs *BlockStore) Base() int64 { bs.mtx.RLock() diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index a832541771..ed79243b00 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -13,6 +13,7 @@ import ( "github.com/cometbft/cometbft/abci/example/code" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/log" + cmttypes "github.com/cometbft/cometbft/types" "github.com/cometbft/cometbft/version" ) @@ -162,6 +163,19 @@ func (app *Application) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDelive return abci.ResponseDeliverTx{Code: code.CodeTypeOK} } +func (app *Application) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { + for _, ev := range req.ByzantineValidators { + app.logger.Info("Misbehavior. Slashing validator", + "validator_address", ev.GetValidator().Address, + "type", ev.GetType(), + "height", ev.GetHeight(), + "time", ev.GetTime(), + "total_voting_power", ev.GetTotalVotingPower(), + ) + } + return abci.ResponseBeginBlock{} +} + // EndBlock implements ABCI. func (app *Application) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { valUpdates, err := app.validatorUpdates(uint64(req.Height)) @@ -275,10 +289,11 @@ func (app *Application) PrepareProposal( txs := make([][]byte, 0, len(req.Txs)) var totalBytes int64 for _, tx := range req.Txs { - totalBytes += int64(len(tx)) - if totalBytes > req.MaxTxBytes { + txLen := cmttypes.ComputeProtoSizeForTxs([]cmttypes.Tx{tx}) + if totalBytes+txLen > req.MaxTxBytes { break } + totalBytes += txLen txs = append(txs, tx) } diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 2ffe56d0b9..677490d251 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,7 +1,7 @@ # We need to build in a Linux environment to support C libraries, e.g. RocksDB. # We use Debian instead of Alpine, so that we can use binary database packages # instead of spending time compiling them. -FROM golang:1.20-bullseye +FROM golang:1.21-bullseye RUN apt-get -qq update -y && apt-get -qq upgrade -y >/dev/null RUN apt-get -qq install -y libleveldb-dev librocksdb-dev >/dev/null diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 364a836e1c..2ca6399a8d 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -80,6 +80,10 @@ type Manifest struct { // Enable or disable Prometheus metrics on all nodes. // Defaults to false (disabled). Prometheus bool `toml:"prometheus"` + + // Maximum number of peers to which the node gossips transactions + ExperimentalMaxGossipConnectionsToPersistentPeers uint `toml:"experimental_max_gossip_connections_to_persistent_peers"` + ExperimentalMaxGossipConnectionsToNonPersistentPeers uint `toml:"experimental_max_gossip_connections_to_non_persistent_peers"` } // ManifestNode represents a node in a testnet manifest. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index eb65c0e750..ecc9a5c166 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -65,26 +65,28 @@ const ( // Testnet represents a single testnet. type Testnet struct { - Name string - File string - Dir string - IP *net.IPNet - InitialHeight int64 - InitialState map[string]string - Validators map[*Node]int64 - ValidatorUpdates map[int64]map[*Node]int64 - Nodes []*Node - KeyType string - Evidence int - LoadTxSizeBytes int - LoadTxBatchSize int - LoadTxConnections int - ABCIProtocol string - PrepareProposalDelay time.Duration - ProcessProposalDelay time.Duration - CheckTxDelay time.Duration - UpgradeVersion string - Prometheus bool + Name string + File string + Dir string + IP *net.IPNet + InitialHeight int64 + InitialState map[string]string + Validators map[*Node]int64 + ValidatorUpdates map[int64]map[*Node]int64 + Nodes []*Node + KeyType string + Evidence int + LoadTxSizeBytes int + LoadTxBatchSize int + LoadTxConnections int + ABCIProtocol string + PrepareProposalDelay time.Duration + ProcessProposalDelay time.Duration + CheckTxDelay time.Duration + UpgradeVersion string + Prometheus bool + ExperimentalMaxGossipConnectionsToPersistentPeers uint + ExperimentalMaxGossipConnectionsToNonPersistentPeers uint } // Node represents a CometBFT node in a testnet. @@ -150,6 +152,8 @@ func LoadTestnet(manifest Manifest, fname string, ifd InfrastructureData) (*Test CheckTxDelay: manifest.CheckTxDelay, UpgradeVersion: manifest.UpgradeVersion, Prometheus: manifest.Prometheus, + ExperimentalMaxGossipConnectionsToPersistentPeers: manifest.ExperimentalMaxGossipConnectionsToPersistentPeers, + ExperimentalMaxGossipConnectionsToNonPersistentPeers: manifest.ExperimentalMaxGossipConnectionsToNonPersistentPeers, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index accb63480c..1eadf5b0a2 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -25,7 +25,7 @@ import ( // 1 in 4 evidence is light client evidence, the rest is duplicate vote evidence const lightClientEvidenceRatio = 4 -// InjectEvidence takes a running testnet and generates an amount of valid +// InjectEvidence takes a running testnet and generates an amount of valid/invalid // evidence and broadcasts it to a random node through the rpc endpoint `/broadcast_evidence`. // Evidence is random and can be a mixture of LightClientAttackEvidence and // DuplicateVoteEvidence. @@ -88,10 +88,12 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } var ev types.Evidence - for i := 1; i <= amount; i++ { + for i := 0; i < amount; i++ { + validEv := true if i%lightClientEvidenceRatio == 0 { + validEv = i%(lightClientEvidenceRatio*2) != 0 // Alternate valid and invalid evidence ev, err = generateLightClientAttackEvidence( - ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, + ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, validEv, ) } else { ev, err = generateDuplicateVoteEvidence( @@ -103,7 +105,15 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } _, err := client.BroadcastEvidence(ctx, ev) - if err != nil { + if !validEv { + // The tests will count committed evidences later on, + // and only valid evidences will make it + amount++ + } + if validEv != (err == nil) { + if err == nil { + return errors.New("submitting invalid evidence didn't return an error") + } return err } } @@ -148,6 +158,7 @@ func generateLightClientAttackEvidence( vals *types.ValidatorSet, chainID string, evTime time.Time, + validEvidence bool, ) (*types.LightClientAttackEvidence, error) { // forge a random header forgedHeight := height + 2 @@ -157,7 +168,7 @@ func generateLightClientAttackEvidence( // add a new bogus validator and remove an existing one to // vary the validator set slightly - pv, conflictingVals, err := mutateValidatorSet(ctx, privVals, vals) + pv, conflictingVals, err := mutateValidatorSet(ctx, privVals, vals, !validEvidence) if err != nil { return nil, err } @@ -172,6 +183,11 @@ func generateLightClientAttackEvidence( return nil, err } + // malleate the last signature of the commit by adding one to its first byte + if !validEvidence { + commit.Signatures[len(commit.Signatures)-1].Signature[0]++ + } + ev := &types.LightClientAttackEvidence{ ConflictingBlock: &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -286,7 +302,11 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc } } -func mutateValidatorSet(ctx context.Context, privVals []types.MockPV, vals *types.ValidatorSet, +func mutateValidatorSet( + ctx context.Context, + privVals []types.MockPV, + vals *types.ValidatorSet, + nop bool, ) ([]types.PrivValidator, *types.ValidatorSet, error) { newVal, newPrivVal, err := test.Validator(ctx, 10) if err != nil { @@ -294,10 +314,14 @@ func mutateValidatorSet(ctx context.Context, privVals []types.MockPV, vals *type } var newVals *types.ValidatorSet - if vals.Size() > 2 { - newVals = types.NewValidatorSet(append(vals.Copy().Validators[:vals.Size()-1], newVal)) + if nop { + newVals = types.NewValidatorSet(vals.Copy().Validators) } else { - newVals = types.NewValidatorSet(append(vals.Copy().Validators, newVal)) + if vals.Size() > 2 { + newVals = types.NewValidatorSet(append(vals.Copy().Validators[:vals.Size()-1], newVal)) + } else { + newVals = types.NewValidatorSet(append(vals.Copy().Validators, newVal)) + } } // we need to sort the priv validators with the same index as the validator set diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 0ec74fe8c8..fcb7a24b06 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -169,6 +169,8 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.P2P.AddrBookStrict = false cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second + cfg.Mempool.ExperimentalMaxGossipConnectionsToNonPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToNonPersistentPeers) + cfg.Mempool.ExperimentalMaxGossipConnectionsToPersistentPeers = int(node.Testnet.ExperimentalMaxGossipConnectionsToPersistentPeers) switch node.ABCIProtocol { case e2e.ProtocolUNIX: diff --git a/types/tx.go b/types/tx.go index 0ba14dbadd..5cbb2cc40d 100644 --- a/types/tx.go +++ b/types/tx.go @@ -107,7 +107,7 @@ func ToTxs(txl [][]byte) Txs { func (txs Txs) Validate(maxSizeBytes int64) error { var size int64 for _, tx := range txs { - size += int64(len(tx)) + size += ComputeProtoSizeForTxs([]Tx{tx}) if size > maxSizeBytes { return fmt.Errorf("transaction data size exceeds maximum %d", maxSizeBytes) } diff --git a/types/validator.go b/types/validator.go index 886b32756d..3e95c467bc 100644 --- a/types/validator.go +++ b/types/validator.go @@ -46,8 +46,9 @@ func (v *Validator) ValidateBasic() error { return errors.New("validator has negative voting power") } - if len(v.Address) != crypto.AddressSize { - return fmt.Errorf("validator address is the wrong size: %v", v.Address) + addr := v.PubKey.Address() + if !bytes.Equal(v.Address, addr) { + return fmt.Errorf("validator address is incorrectly derived from pubkey. Exp: %v, got %v", addr, v.Address) } return nil diff --git a/types/validator_set.go b/types/validator_set.go index 019834ac1d..64536ec487 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -717,10 +717,36 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, // VerifyCommitLight verifies +2/3 of the set had signed the given commit. // -// This method is primarily used by the light client and does not check all the +// This method is primarily used by the light client and does NOT check all the // signatures. -func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, - height int64, commit *Commit) error { +func (vals *ValidatorSet) VerifyCommitLight( + chainID string, + blockID BlockID, + height int64, + commit *Commit, +) error { + return vals.verifyCommitLightInternal(chainID, blockID, height, commit, false) +} + +// VerifyCommitLightAllSignatures verifies +2/3 of the set had signed the given commit. +// +// This method DOES check all the signatures. +func (vals *ValidatorSet) VerifyCommitLightAllSignatures( + chainID string, + blockID BlockID, + height int64, + commit *Commit, +) error { + return vals.verifyCommitLightInternal(chainID, blockID, height, commit, true) +} + +func (vals *ValidatorSet) verifyCommitLightInternal( + chainID string, + blockID BlockID, + height int64, + commit *Commit, + countAllSignatures bool, +) error { if vals.Size() != len(commit.Signatures) { return NewErrInvalidCommitSignatures(vals.Size(), len(commit.Signatures)) @@ -738,8 +764,9 @@ func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, talliedVotingPower := int64(0) votingPowerNeeded := vals.TotalVotingPower() * 2 / 3 for idx, commitSig := range commit.Signatures { - // No need to verify absent or nil votes. - if !commitSig.ForBlock() { + // No need to verify absent or nil votes if not counting all signatures, + // but need to verify nil votes if counting all signatures. + if (!countAllSignatures && !commitSig.ForBlock()) || (countAllSignatures && commitSig.Absent()) { continue } @@ -756,11 +783,14 @@ func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, talliedVotingPower += val.VotingPower // return as soon as +2/3 of the signatures are verified - if talliedVotingPower > votingPowerNeeded { + if !countAllSignatures && talliedVotingPower > votingPowerNeeded { return nil } } + if talliedVotingPower > votingPowerNeeded { + return nil + } return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} } @@ -770,9 +800,37 @@ func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, // NOTE the given validators do not necessarily correspond to the validator set // for this commit, but there may be some intersection. // -// This method is primarily used by the light client and does not check all the +// This method is primarily used by the light client and does NOT check all the // signatures. -func (vals *ValidatorSet) VerifyCommitLightTrusting(chainID string, commit *Commit, trustLevel cmtmath.Fraction) error { +func (vals *ValidatorSet) VerifyCommitLightTrusting( + chainID string, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { + return vals.verifyCommitLightTrustingInternal(chainID, commit, trustLevel, false) +} + +// VerifyCommitLightTrustingAllSignatures verifies that trustLevel of the validator +// set signed this commit. +// +// NOTE the given validators do not necessarily correspond to the validator set +// for this commit, but there may be some intersection. +// +// This method DOES check all the signatures. +func (vals *ValidatorSet) VerifyCommitLightTrustingAllSignatures( + chainID string, + commit *Commit, + trustLevel cmtmath.Fraction, +) error { + return vals.verifyCommitLightTrustingInternal(chainID, commit, trustLevel, true) +} + +func (vals *ValidatorSet) verifyCommitLightTrustingInternal( + chainID string, + commit *Commit, + trustLevel cmtmath.Fraction, + countAllSignatures bool, +) error { // sanity check if trustLevel.Denominator == 0 { return errors.New("trustLevel has zero Denominator") @@ -791,8 +849,9 @@ func (vals *ValidatorSet) VerifyCommitLightTrusting(chainID string, commit *Comm votingPowerNeeded := totalVotingPowerMulByNumerator / int64(trustLevel.Denominator) for idx, commitSig := range commit.Signatures { - // No need to verify absent or nil votes. - if !commitSig.ForBlock() { + // No need to verify absent or nil votes if not counting all signatures, + // but need to verify nil votes if counting all signatures. + if (!countAllSignatures && !commitSig.ForBlock()) || (countAllSignatures && commitSig.Absent()) { continue } @@ -816,12 +875,14 @@ func (vals *ValidatorSet) VerifyCommitLightTrusting(chainID string, commit *Comm talliedVotingPower += val.VotingPower - if talliedVotingPower > votingPowerNeeded { + if !countAllSignatures && talliedVotingPower > votingPowerNeeded { return nil } } } - + if talliedVotingPower > votingPowerNeeded { + return nil + } return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} } diff --git a/types/validator_set_test.go b/types/validator_set_test.go index d2e734ff38..94232f7b0e 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -5,6 +5,7 @@ import ( "fmt" "math" "sort" + "strconv" "strings" "testing" "testing/quick" @@ -300,18 +301,22 @@ func TestProposerSelection2(t *testing.T) { } func TestProposerSelection3(t *testing.T) { - vset := NewValidatorSet([]*Validator{ + vals := []*Validator{ newValidator([]byte("avalidator_address12"), 1), newValidator([]byte("bvalidator_address12"), 1), newValidator([]byte("cvalidator_address12"), 1), newValidator([]byte("dvalidator_address12"), 1), - }) + } - proposerOrder := make([]*Validator, 4) for i := 0; i < 4; i++ { - // need to give all validators to have keys pk := ed25519.GenPrivKey().PubKey() - vset.Validators[i].PubKey = pk + vals[i].PubKey = pk + vals[i].Address = pk.Address() + } + sort.Sort(ValidatorsByAddress(vals)) + vset := NewValidatorSet(vals) + proposerOrder := make([]*Validator, 4) + for i := 0; i < 4; i++ { proposerOrder[i] = vset.GetProposer() vset.IncrementProposerPriority(1) } @@ -721,7 +726,8 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { for _, tc := range testCases { tc := tc - t.Run(tc.description, func(t *testing.T) { + countAllSignatures := false + f := func(t *testing.T) { err := vset.VerifyCommit(tc.chainID, tc.blockID, tc.height, tc.commit) if tc.expErr { if assert.Error(t, err, "VerifyCommit") { @@ -731,7 +737,11 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { assert.NoError(t, err, "VerifyCommit") } - err = vset.VerifyCommitLight(tc.chainID, tc.blockID, tc.height, tc.commit) + if countAllSignatures { + err = vset.VerifyCommitLightAllSignatures(tc.chainID, tc.blockID, tc.height, tc.commit) + } else { + err = vset.VerifyCommitLight(tc.chainID, tc.blockID, tc.height, tc.commit) + } if tc.expErr { if assert.Error(t, err, "VerifyCommitLight") { assert.Contains(t, err.Error(), tc.description, "VerifyCommitLight") @@ -739,7 +749,10 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { } else { assert.NoError(t, err, "VerifyCommitLight") } - }) + } + t.Run(tc.description+"/"+strconv.FormatBool(countAllSignatures), f) + countAllSignatures = true + t.Run(tc.description+"/"+strconv.FormatBool(countAllSignatures), f) } } @@ -768,7 +781,7 @@ func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) { } } -func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSigned(t *testing.T) { +func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajOfVotingPowerSignedIffNotAllSigs(t *testing.T) { var ( chainID = "test_chain_id" h = int64(3) @@ -779,6 +792,9 @@ func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSign commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now()) require.NoError(t, err) + err = valSet.VerifyCommitLightAllSignatures(chainID, blockID, h, commit) + assert.NoError(t, err) + // malleate 4th signature (3 signatures are enough for 2/3+) vote := voteSet.GetByIndex(3) v := vote.ToProto() @@ -789,9 +805,11 @@ func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSign err = valSet.VerifyCommitLight(chainID, blockID, h, commit) assert.NoError(t, err) + err = valSet.VerifyCommitLightAllSignatures(chainID, blockID, h, commit) + assert.Error(t, err) // counting all signatures detects the malleated signature } -func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotingPowerSigned(t *testing.T) { +func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelSignedIffNotAllSigs(t *testing.T) { var ( chainID = "test_chain_id" h = int64(3) @@ -802,6 +820,13 @@ func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotin commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now()) require.NoError(t, err) + err = valSet.VerifyCommitLightTrustingAllSignatures( + chainID, + commit, + cmtmath.Fraction{Numerator: 1, Denominator: 3}, + ) + assert.NoError(t, err) + // malleate 3rd signature (2 signatures are enough for 1/3+ trust level) vote := voteSet.GetByIndex(2) v := vote.ToProto() @@ -812,6 +837,12 @@ func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotin err = valSet.VerifyCommitLightTrusting(chainID, commit, cmtmath.Fraction{Numerator: 1, Denominator: 3}) assert.NoError(t, err) + err = valSet.VerifyCommitLightTrustingAllSignatures( + chainID, + commit, + cmtmath.Fraction{Numerator: 1, Denominator: 3}, + ) + assert.Error(t, err) // counting all signatures detects the malleated signature } func TestEmptySet(t *testing.T) { diff --git a/types/validator_test.go b/types/validator_test.go index 5eb2ed7bf1..954e8ec23b 100644 --- a/types/validator_test.go +++ b/types/validator_test.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -74,7 +75,7 @@ func TestValidatorValidateBasic(t *testing.T) { Address: nil, }, err: true, - msg: "validator address is the wrong size: ", + msg: fmt.Sprintf("validator address is incorrectly derived from pubkey. Exp: %v, got ", pubKey.Address()), }, { val: &Validator{ @@ -82,7 +83,7 @@ func TestValidatorValidateBasic(t *testing.T) { Address: []byte{'a'}, }, err: true, - msg: "validator address is the wrong size: 61", + msg: fmt.Sprintf("validator address is incorrectly derived from pubkey. Exp: %v, got 61", pubKey.Address()), }, } diff --git a/version/version.go b/version/version.go index 0c9057a7ac..9b2a4974dd 100644 --- a/version/version.go +++ b/version/version.go @@ -5,7 +5,7 @@ const ( // The default version of TMCoreSemVer is the value used as the // fallback version of CometBFT when not using git describe. // It is formatted with semantic versioning. - TMCoreSemVer = "0.37.2" + TMCoreSemVer = "0.37.4" // ABCISemVer is the semantic version of the ABCI protocol ABCISemVer = "1.0.0" ABCIVersion = ABCISemVer