From 443c235e242b4e276ef50479cf87fa86d66a1bd8 Mon Sep 17 00:00:00 2001 From: chatton Date: Thu, 12 Mar 2026 10:34:16 +0000 Subject: [PATCH 1/3] feat: add TestDeFiSimulation benchmark for Uniswap V2 workload --- test/e2e/benchmark/spamoor_defi_test.go | 117 ++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 test/e2e/benchmark/spamoor_defi_test.go diff --git a/test/e2e/benchmark/spamoor_defi_test.go b/test/e2e/benchmark/spamoor_defi_test.go new file mode 100644 index 000000000..ce5a65c71 --- /dev/null +++ b/test/e2e/benchmark/spamoor_defi_test.go @@ -0,0 +1,117 @@ +//go:build evm + +package benchmark + +import ( + "context" + "fmt" + "time" + + "github.com/celestiaorg/tastora/framework/docker/evstack/spamoor" +) + +// TestDeFiSimulation measures throughput under a realistic DeFi workload using +// Uniswap V2 swaps. Each tx involves deep call chains, event emission, and +// multi-contract storage operations — representative of production L2 traffic. +// +// Shares system configuration with TestERC20Throughput (100ms blocks, 100M gas, +// 25ms scrape) so the only variable is workload type. The gap between ERC20 +// and Uniswap MGas/s shows the cost of workload complexity. +// +// Primary metrics: MGas/s, TPS. +// Diagnostic metrics: per-span latency breakdown, ev-node overhead %. +func (s *SpamoorSuite) TestDeFiSimulation() { + const ( + numSpammers = 4 + countPerSpammer = 10000 + totalCount = numSpammers * countPerSpammer + waitTimeout = 10 * time.Minute + ) + + cfg := newBenchConfig("ev-node-defi") + + t := s.T() + ctx := t.Context() + w := newResultWriter(t, "DeFiSimulation") + defer w.flush() + + e := s.setupEnv(cfg) + + uniswapConfig := map[string]any{ + "throughput": 30, // 30 tx per 100ms slot = 300 tx/s per spammer, 1200 tx/s total + "total_count": countPerSpammer, + "max_pending": 50000, + "max_wallets": 200, + "base_fee": 20, + "tip_fee": 2, + "refill_amount": "10000000000000000000", // 10 ETH (swaps need ETH for WETH wrapping and router approvals) + "refill_balance": "5000000000000000000", // 5 ETH + "refill_interval": 600, + } + + s.Require().NoError(deleteAllSpammers(e.spamoorAPI), "failed to delete stale spammers") + + // launch all spammers before recording startBlock so warm-up + // (Uniswap contract deploys + liquidity provision + wallet funding) + // is excluded from the measurement window. + var spammerIDs []int + for i := range numSpammers { + name := fmt.Sprintf("bench-defi-%d", i) + id, err := e.spamoorAPI.CreateSpammer(name, spamoor.ScenarioUniswapSwaps, uniswapConfig, true) + s.Require().NoError(err, "failed to create spammer %s", name) + spammerIDs = append(spammerIDs, id) + t.Cleanup(func() { _ = e.spamoorAPI.DeleteSpammer(id) }) + } + + // give spammers time to deploy contracts and provision liquidity, + // then verify none failed during warmup. + time.Sleep(5 * time.Second) + assertSpammersRunning(t, e.spamoorAPI, spammerIDs) + + // wait for warmup transactions (contract deploys, liquidity adds) to land + // before recording start block. + pollSentTotal := func() (float64, error) { + metrics, mErr := e.spamoorAPI.GetMetrics() + if mErr != nil { + return 0, mErr + } + return sumCounter(metrics["spamoor_transactions_sent_total"]), nil + } + waitForMetricTarget(t, "spamoor_transactions_sent_total (warmup)", pollSentTotal, float64(cfg.WarmupTxs), cfg.WaitTimeout) + + // reset trace window to exclude warmup spans + e.traces.resetStartTime() + + startHeader, err := e.ethClient.HeaderByNumber(ctx, nil) + s.Require().NoError(err, "failed to get start block header") + startBlock := startHeader.Number.Uint64() + loadStart := time.Now() + t.Logf("start block: %d (after warmup)", startBlock) + + // wait for all transactions to be sent + waitForMetricTarget(t, "spamoor_transactions_sent_total", pollSentTotal, float64(totalCount), cfg.WaitTimeout) + + // wait for pending txs to drain + drainCtx, drainCancel := context.WithTimeout(ctx, 30*time.Second) + defer drainCancel() + if err := waitForDrain(drainCtx, t.Logf, e.ethClient, 10); err != nil { + t.Logf("warning: %v", err) + } + wallClock := time.Since(loadStart) + + endHeader, err := e.ethClient.HeaderByNumber(ctx, nil) + s.Require().NoError(err, "failed to get end block header") + endBlock := endHeader.Number.Uint64() + t.Logf("end block: %d (range %d blocks)", endBlock, endBlock-startBlock) + + // collect block-level gas/tx metrics + bm, err := collectBlockMetrics(ctx, e.ethClient, startBlock, endBlock) + s.Require().NoError(err, "failed to collect block metrics") + + traces := s.collectTraces(e, cfg.ServiceName) + + result := newBenchmarkResult("DeFiSimulation", bm, traces) + s.Require().Greater(result.summary.SteadyState, time.Duration(0), "expected non-zero steady-state duration") + result.log(t, wallClock) + w.addEntries(result.entries()) +} From e7ce827b638ec0a1641b59fee5f78abd13c22284 Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 23 Mar 2026 10:01:50 +0000 Subject: [PATCH 2/3] refactor: use benchConfig for TestDeFiSimulation spamoor parameters Replace hardcoded spammer config with benchConfig fields so all parameters are controllable via BENCH_* env vars. Add pair_count and rebroadcast as configurable options for the uniswap-swaps scenario. --- test/e2e/benchmark/spamoor_defi_test.go | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/test/e2e/benchmark/spamoor_defi_test.go b/test/e2e/benchmark/spamoor_defi_test.go index ce5a65c71..0e9e755a2 100644 --- a/test/e2e/benchmark/spamoor_defi_test.go +++ b/test/e2e/benchmark/spamoor_defi_test.go @@ -21,27 +21,23 @@ import ( // Primary metrics: MGas/s, TPS. // Diagnostic metrics: per-span latency breakdown, ev-node overhead %. func (s *SpamoorSuite) TestDeFiSimulation() { - const ( - numSpammers = 4 - countPerSpammer = 10000 - totalCount = numSpammers * countPerSpammer - waitTimeout = 10 * time.Minute - ) - cfg := newBenchConfig("ev-node-defi") t := s.T() ctx := t.Context() + cfg.log(t) w := newResultWriter(t, "DeFiSimulation") defer w.flush() e := s.setupEnv(cfg) uniswapConfig := map[string]any{ - "throughput": 30, // 30 tx per 100ms slot = 300 tx/s per spammer, 1200 tx/s total - "total_count": countPerSpammer, + "throughput": cfg.Throughput, + "total_count": cfg.CountPerSpammer, "max_pending": 50000, - "max_wallets": 200, + "max_wallets": cfg.MaxWallets, + "pair_count": envInt("BENCH_PAIR_COUNT", 1), + "rebroadcast": envInt("BENCH_REBROADCAST", 0), "base_fee": 20, "tip_fee": 2, "refill_amount": "10000000000000000000", // 10 ETH (swaps need ETH for WETH wrapping and router approvals) @@ -55,7 +51,7 @@ func (s *SpamoorSuite) TestDeFiSimulation() { // (Uniswap contract deploys + liquidity provision + wallet funding) // is excluded from the measurement window. var spammerIDs []int - for i := range numSpammers { + for i := range cfg.NumSpammers { name := fmt.Sprintf("bench-defi-%d", i) id, err := e.spamoorAPI.CreateSpammer(name, spamoor.ScenarioUniswapSwaps, uniswapConfig, true) s.Require().NoError(err, "failed to create spammer %s", name) @@ -66,7 +62,7 @@ func (s *SpamoorSuite) TestDeFiSimulation() { // give spammers time to deploy contracts and provision liquidity, // then verify none failed during warmup. time.Sleep(5 * time.Second) - assertSpammersRunning(t, e.spamoorAPI, spammerIDs) + requireSpammersRunning(t, e.spamoorAPI, spammerIDs) // wait for warmup transactions (contract deploys, liquidity adds) to land // before recording start block. @@ -89,7 +85,7 @@ func (s *SpamoorSuite) TestDeFiSimulation() { t.Logf("start block: %d (after warmup)", startBlock) // wait for all transactions to be sent - waitForMetricTarget(t, "spamoor_transactions_sent_total", pollSentTotal, float64(totalCount), cfg.WaitTimeout) + waitForMetricTarget(t, "spamoor_transactions_sent_total", pollSentTotal, float64(cfg.totalCount()), cfg.WaitTimeout) // wait for pending txs to drain drainCtx, drainCancel := context.WithTimeout(ctx, 30*time.Second) From eeeafe92e18a271d91b4363a98158c83f71c1151 Mon Sep 17 00:00:00 2001 From: chatton Date: Mon, 23 Mar 2026 10:36:08 +0000 Subject: [PATCH 3/3] ci: add DeFi simulation benchmark to CI workflow --- .github/workflows/benchmark.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index c2eed7091..0c0ee74de 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -100,6 +100,28 @@ jobs: -run='^TestSpamoorSuite$/^TestERC20Throughput$' -v -timeout=15m \ ./benchmark/ --evm-binary=../../../build/evm + defi-benchmark: + name: DeFi Simulation Benchmark + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 + - name: Set up Go + uses: actions/setup-go@4b73464bb391d4059bd26b0524d20df3927bd417 # v6.3.0 + with: + go-version-file: ./go.mod + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@4d04d5d9486b7bd6fa91e7baf45bbb4f8b9deedd # v4.0.0 + - name: Install just + uses: extractions/setup-just@v3 + - name: Build binaries + run: just build-evm build-da + - name: Run DeFi simulation test + run: | + cd test/e2e && go test -tags evm \ + -run='^TestSpamoorSuite$/^TestDeFiSimulation$' -v -timeout=15m \ + ./benchmark/ --evm-binary=../../../build/evm + # single job to push all results to gh-pages sequentially, avoiding race conditions publish-benchmarks: name: Publish Benchmark Results