From a6ca48446fa6b5aa6930d5544584fcde4c97ee47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A9ter=20Garamv=C3=B6lgyi?= Date: Thu, 10 Oct 2024 14:18:18 +0200 Subject: [PATCH] chore: merge upstream b787d9e52 (#18) * chore(provider): use `get_in_memory_or_storage` on `transactions_by_block_range` (#11453) * chore(exex): adjust WAL gauge metric names (#11454) * chore(provider): use `get_in_memory_or_storage_by_block` on `fn block_body_indices` (#11452) * chore(provider): find `last_database_block_number` with `BlockState` anchor instead (#11455) * feat: add metrics for failed deliveries (#11456) * chore: release 1.0.8 (#11457) * chore(provider): use `block_ref` instead on `BlockState` (#11458) * feat(rpc): Add codes in execution witness return (#11443) * fix: ensure the request's gas limit does not exceed the target gas limit (#11462) * feat(grafana): ExEx WAL (#11461) * fix: use correct rpc errors (#11463) * chore(provider): clone after filtering on `sealed_headers_while` (#11459) Co-authored-by: Matthias Seitz * Map `TransferKind::EofCreate` => `OperationType::OpEofCreate` (#11090) Co-authored-by: Matthias Seitz * fix: windows build (#11465) * chore: use `block_ref` on `CanonicalInMemoryState` (#11467) * chore(rpc): remove include_preimage param on debug_execution_witness (#11466) * feat: make addons stateful (#11204) Co-authored-by: Arsenii Kulikov * Relax Trait Bounds on TransactionPool::Transaction and EthPoolTransaction (#11079) Co-authored-by: Matthias Seitz * test: add unit tests for `CanonicalChain` (#11472) * feat(perf): integrate OnStateHook in executor (#11345) * feat: cleaned up prepare_call_env() (#11469) Co-authored-by: Matthias Seitz * chore: use block.body directly (#11474) * feat: Add metrics to track transactions by type in txpool (#11403) Co-authored-by: garwah Co-authored-by: Matthias Seitz * chore: op chainspec (#11415) * chore(exex): more backfill debug logs (#11476) * fix(exex): use thresholds in stream backfill (#11478) * chore(db): capture tx opening backtrace in debug mode (#11477) * chore(sdk): `SealedHeader` generic over header (#11429) * chore(lint): fix lint primitives (#11487) * chore(provider): add more test coverage on `BlockchainProvider::*by_tx_range` queries (#11480) * test: ensure default hash matches (#11486) * chore: rm deposit contract config for op (#11479) * chore(lint): fix lint storage (#11485) * chore(provider): add more test coverage on `BlockchainProvider::*by_block_range` queries (#11488) * fix(rpc-eth-types): incorrect error msg(; -> :) (#11503) Signed-off-by: jsvisa * Reexport optimism specific crates from `op-reth` (#11499) * feat: rpc replace function created (#11501) Co-authored-by: Matthias Seitz * Add metrics for failed deliveries to Grafana dashboard (#11481) * chore: Remove duplicate EthereumChainSpecParser in favor of existing EthChainSpecParser (#11412) Co-authored-by: garwah Co-authored-by: Matthias Seitz * chore(lint): fix `clippy::needles_lifetimes` (#11496) * chore: rm from genesis impl (#11509) * fix: cap gas limit properly (#11505) * feat: add PoolBuilderConfigOverrides (#11507) * feat: expose Op node network_config helper (#11506) * chore(deps): weekly `cargo update` (#11518) Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> * test: add unit tests for `PruneLimiter` (#11517) * feat(provider): add `test_race` to `BlockchainProvider2` tests (#11523) * fix(tree): make state methods work for historical blocks (#11265) Co-authored-by: Roman Krasiuk Co-authored-by: Federico Gimenez * rpc: use `eth_api()` method (#11516) * chore: delete rpc-types (#11528) * feat: add get_highest_tx_by_sender to pools (#11514) Co-authored-by: Matthias Seitz * ci: add `windows` cargo check (#11468) * fix: acquire permit first (#11537) * chore: dont fail on ttd (#11539) * grafana: add metrics of all transactions in pool by type (#11515) Co-authored-by: Emilia Hane Co-authored-by: Emilia Hane * feat(exex): subscribe to notifications with head using `ExExContext` (#11500) * chore: enforce window (#11540) * Refactor get_payload_bodies_by_hash_with to be non-blocking (#11511) Co-authored-by: Matthias Seitz * Introduce Eth PayloadTypes Impl (#11519) Co-authored-by: Matthias Seitz * fix(op-reth): add jemalloc feature to optimism-cli for version (#11543) * fix(grafana): remove rate function from panel "Transactions by Type in Pool" (#11542) * chore: move ethfiltererror (#11552) * chore: rm redundant type hint (#11557) * Introduce Op PayloadTypes Impl (#11558) Co-authored-by: Matthias Seitz * chore: chain manual serialisation implementation (#11538) * chore: rm unused optimism feature (#11559) * feat(trie): expose storage proofs (#11550) * chore: rm unused optimism feature from compat (#11560) * Added InternalBlockExecutionError to execute.rs exports (#11525) Co-authored-by: Matthias Seitz * docs(exex): include code for ExEx book from real files (#11545) * fix: actually configure the custom gas limit (#11565) * chore: relax trait bound for `EthTransactions` (#11571) * fix(provider): fix sub overflow on `tx_range` queries for empty blocks (#11568) * chore(provider): add more test coverage on `BlockchainProvider` non-range queries (#11564) * feat: adding a new method to network config builder (#11569) * chore: rm unused optimism feature from engine api (#11577) * chore: replace some revm deps (#11579) * chore: rm bad cap function (#11562) * feat: impl `Encodable2718` and `Decodable2718` for `PooledTransactionsElement` (#11482) * fix(exex): exhaust backfill job when using a stream (#11578) * fix: in-memory trie updates pruning (#11580) Co-authored-by: Matthias Seitz * chore(providers): test race condition on all `BlockchainProvider2` macro tests (#11574) * chore: also derive arb for test (#11588) * chore: bump alloy primitives 0 8 7 (#11586) * chore(ci): remove expected failures related to checksummed addresses (#11589) * chore(rpc): use `block_hash` instead on fetching `debug_trace_block` block (#11587) * feat: add mul support for SubPoolLimit (#11591) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> * docs: delete missing part path (#11590) Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> * fix: simplify reorg handling (#11592) * fix: use original bytes for codes (#11593) * perf(rpc): use `Arc * perf(rpc): optimistically retrieve block if near the tip on `eth_getLogs` (#11582) * update fork base commit * remove new book tests --------- Signed-off-by: jsvisa Co-authored-by: joshieDo <93316087+joshieDo@users.noreply.github.com> Co-authored-by: Alexey Shekhirin Co-authored-by: Matthias Seitz Co-authored-by: Francis Li Co-authored-by: Emilia Hane Co-authored-by: Arsenii Kulikov Co-authored-by: Eric Woolsey Co-authored-by: Thomas Coratger <60488569+tcoratger@users.noreply.github.com> Co-authored-by: Federico Gimenez Co-authored-by: Varun Doshi <61531351+varun-doshi@users.noreply.github.com> Co-authored-by: garwah <14845405+garwahl@users.noreply.github.com> Co-authored-by: garwah Co-authored-by: greged93 <82421016+greged93@users.noreply.github.com> Co-authored-by: Delweng Co-authored-by: Parikalp Bhardwaj <53660958+Parikalp-Bhardwaj@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: github-merge-queue <118344674+github-merge-queue@users.noreply.github.com> Co-authored-by: Dan Cline <6798349+Rjected@users.noreply.github.com> Co-authored-by: Roman Krasiuk Co-authored-by: crazykissshout Co-authored-by: tedison <76473430+edisontim@users.noreply.github.com> Co-authored-by: David <39963997+No0key@users.noreply.github.com> Co-authored-by: Emilia Hane Co-authored-by: Steven <112043913+stevencartavia@users.noreply.github.com> Co-authored-by: Debjit Bhowal Co-authored-by: Oliver Co-authored-by: Luca Provini Co-authored-by: John --- .github/assets/hive/expected_failures.yaml | 17 - .../hive/expected_failures_experimental.yaml | 17 - .github/workflows/lint.yml | 12 +- .github/workflows/windows.yml | 47 + .gitignore | 5 +- Cargo.lock | 282 +- Cargo.toml | 5 +- bin/reth/Cargo.toml | 1 + bin/reth/src/cli/mod.rs | 7 +- .../src/commands/debug_cmd/build_block.rs | 2 +- bin/reth/src/lib.rs | 1 + bin/reth/src/main.rs | 5 +- book/developers/exex/hello-world.md | 86 +- book/sources/Cargo.toml | 9 + book/sources/exex/hello-world/Cargo.toml | 13 + book/sources/exex/hello-world/src/bin/1.rs | 9 + book/sources/exex/hello-world/src/bin/2.rs | 20 + book/sources/exex/hello-world/src/bin/3.rs | 39 + crates/blockchain-tree/src/bundle.rs | 4 +- crates/blockchain-tree/src/canonical_chain.rs | 176 +- crates/chain-state/src/in_memory.rs | 53 +- crates/chain-state/src/memory_overlay.rs | 17 +- crates/chainspec/Cargo.toml | 9 +- crates/chainspec/src/spec.rs | 407 +-- crates/cli/cli/Cargo.toml | 4 + crates/cli/cli/src/chainspec.rs | 20 +- crates/cli/commands/Cargo.toml | 1 + crates/cli/commands/src/db/mod.rs | 2 +- crates/cli/commands/src/dump_genesis.rs | 2 +- crates/cli/commands/src/import.rs | 2 +- crates/cli/commands/src/node.rs | 7 +- crates/cli/commands/src/stage/unwind.rs | 2 +- .../engine/invalid-block-hooks/src/witness.rs | 8 +- crates/engine/tree/src/tree/metrics.rs | 8 + crates/engine/tree/src/tree/mod.rs | 21 +- crates/ethereum/cli/Cargo.toml | 7 - crates/ethereum/cli/src/chainspec.rs | 43 +- crates/ethereum/engine-primitives/src/lib.rs | 31 +- .../ethereum/engine-primitives/src/payload.rs | 2 +- crates/ethereum/evm/src/execute.rs | 56 +- crates/ethereum/node/src/node.rs | 7 +- crates/ethereum/node/tests/it/builder.rs | 4 +- crates/ethereum/node/tests/it/exex.rs | 2 +- crates/etl/src/lib.rs | 4 +- crates/evm/execution-types/src/chain.rs | 56 +- crates/evm/src/either.rs | 19 +- crates/evm/src/execute.rs | 27 +- crates/evm/src/noop.rs | 16 +- crates/evm/src/system_calls/mod.rs | 18 +- crates/evm/src/test_utils.rs | 18 +- crates/exex/exex/src/backfill/job.rs | 30 +- crates/exex/exex/src/backfill/stream.rs | 123 +- crates/exex/exex/src/context.rs | 15 +- crates/exex/exex/src/manager.rs | 119 +- crates/exex/exex/src/notifications.rs | 227 +- crates/exex/exex/src/wal/metrics.rs | 8 +- crates/exex/exex/src/wal/mod.rs | 4 +- crates/exex/test-utils/src/lib.rs | 5 + crates/exex/types/src/notification.rs | 2 +- crates/fs-util/src/lib.rs | 3 + crates/net/eth-wire-types/Cargo.toml | 2 +- crates/net/eth-wire-types/src/status.rs | 26 +- crates/net/eth-wire/Cargo.toml | 1 + .../net/eth-wire/tests/pooled_transactions.rs | 3 +- crates/net/network/src/config.rs | 20 + crates/net/network/src/peers.rs | 2 +- crates/net/network/src/transactions/config.rs | 28 + .../net/network/src/transactions/fetcher.rs | 92 +- crates/net/network/src/transactions/mod.rs | 30 +- crates/node/builder/src/builder/add_ons.rs | 2 + crates/node/builder/src/builder/mod.rs | 51 +- crates/node/builder/src/builder/states.rs | 4 +- crates/node/builder/src/components/pool.rs | 62 +- crates/node/builder/src/launch/engine.rs | 2 +- crates/node/builder/src/launch/mod.rs | 2 +- crates/node/builder/src/node.rs | 24 +- crates/node/builder/src/rpc.rs | 2 +- crates/node/core/Cargo.toml | 10 +- crates/node/core/src/args/mod.rs | 2 - crates/node/core/src/args/network.rs | 1 + crates/node/core/src/args/utils.rs | 99 - crates/node/types/src/lib.rs | 1 + crates/optimism/bin/Cargo.toml | 8 +- crates/optimism/bin/src/lib.rs | 73 + crates/optimism/bin/src/main.rs | 3 +- crates/optimism/chainspec/Cargo.toml | 5 +- crates/optimism/chainspec/src/lib.rs | 244 +- crates/optimism/cli/Cargo.toml | 9 + crates/optimism/cli/src/chainspec.rs | 37 +- crates/optimism/cli/src/lib.rs | 9 +- crates/optimism/evm/src/execute.rs | 62 +- crates/optimism/node/Cargo.toml | 1 - crates/optimism/node/src/engine.rs | 47 +- crates/optimism/node/src/node.rs | 74 +- crates/optimism/node/src/txpool.rs | 2 +- crates/optimism/node/tests/it/builder.rs | 2 +- crates/optimism/payload/Cargo.toml | 1 - crates/optimism/payload/src/payload.rs | 2 +- crates/optimism/rpc/Cargo.toml | 1 - crates/optimism/rpc/src/eth/transaction.rs | 12 +- crates/payload/builder/src/database.rs | 4 +- crates/primitives-traits/src/header/sealed.rs | 12 +- crates/primitives-traits/src/storage.rs | 2 +- crates/primitives-traits/src/withdrawal.rs | 2 +- crates/primitives/benches/validate_blob_tx.rs | 7 +- crates/primitives/src/alloy_compat.rs | 3 +- crates/primitives/src/block.rs | 15 +- crates/primitives/src/receipt.rs | 6 +- crates/primitives/src/transaction/mod.rs | 4 +- crates/primitives/src/transaction/pooled.rs | 396 +-- crates/primitives/src/transaction/sidecar.rs | 11 +- crates/primitives/src/transaction/util.rs | 12 +- .../prune/src/segments/static_file/headers.rs | 2 +- crates/prune/types/src/limiter.rs | 267 ++ crates/revm/src/test_utils.rs | 12 +- crates/rpc/rpc-api/src/debug.rs | 7 +- crates/rpc/rpc-builder/src/lib.rs | 260 +- crates/rpc/rpc-engine-api/Cargo.toml | 5 +- crates/rpc/rpc-engine-api/src/engine_api.rs | 51 +- crates/rpc/rpc-eth-api/Cargo.toml | 5 - crates/rpc/rpc-eth-api/src/helpers/block.rs | 17 +- crates/rpc/rpc-eth-api/src/helpers/call.rs | 66 +- crates/rpc/rpc-eth-api/src/helpers/state.rs | 73 +- .../rpc-eth-api/src/helpers/transaction.rs | 17 +- crates/rpc/rpc-eth-types/src/cache/db.rs | 25 +- crates/rpc/rpc-eth-types/src/cache/mod.rs | 53 +- crates/rpc/rpc-eth-types/src/error.rs | 12 +- crates/rpc/rpc-eth-types/src/lib.rs | 1 - crates/rpc/rpc-eth-types/src/logs_utils.rs | 123 +- crates/rpc/rpc-eth-types/src/revm_utils.rs | 16 +- crates/rpc/rpc-eth-types/src/utils.rs | 5 +- crates/rpc/rpc-layer/src/auth_layer.rs | 2 +- crates/rpc/rpc-testing-util/src/debug.rs | 8 +- crates/rpc/rpc-testing-util/src/trace.rs | 28 +- crates/rpc/rpc-types-compat/Cargo.toml | 5 +- crates/rpc/rpc/Cargo.toml | 2 - crates/rpc/rpc/src/debug.rs | 78 +- crates/rpc/rpc/src/eth/bundle.rs | 16 +- crates/rpc/rpc/src/eth/filter.rs | 138 +- crates/rpc/rpc/src/eth/helpers/signer.rs | 3 +- crates/rpc/rpc/src/eth/helpers/state.rs | 21 +- crates/rpc/rpc/src/otterscan.rs | 5 +- crates/rpc/rpc/src/trace.rs | 32 +- crates/rpc/rpc/src/txpool.rs | 8 +- crates/storage/db-api/src/cursor.rs | 16 +- crates/storage/db/Cargo.toml | 4 +- .../storage/db/src/implementation/mdbx/tx.rs | 20 +- crates/storage/db/src/lib.rs | 46 +- crates/storage/db/src/static_file/cursor.rs | 2 +- crates/storage/libmdbx-rs/src/codec.rs | 2 +- crates/storage/libmdbx-rs/src/cursor.rs | 8 +- .../storage/nippy-jar/src/compression/zstd.rs | 12 +- crates/storage/nippy-jar/src/cursor.rs | 2 +- .../src/providers/blockchain_provider.rs | 2685 +++++++---------- .../src/providers/bundle_state_provider.rs | 36 +- .../src/providers/database/provider.rs | 17 +- .../src/providers/state/historical.rs | 32 +- .../provider/src/providers/state/latest.rs | 33 +- .../provider/src/providers/state/macros.rs | 3 +- .../provider/src/providers/static_file/jar.rs | 10 +- .../src/providers/static_file/writer.rs | 4 +- .../storage/provider/src/test_utils/mock.rs | 12 +- .../storage/provider/src/test_utils/noop.rs | 9 + .../storage/provider/src/writer/database.rs | 2 +- crates/storage/provider/src/writer/mod.rs | 10 +- .../provider/src/writer/static_file.rs | 2 +- crates/storage/storage-api/src/storage.rs | 11 + crates/storage/storage-api/src/trie.rs | 12 +- crates/transaction-pool/src/config.rs | 24 +- crates/transaction-pool/src/lib.rs | 9 + crates/transaction-pool/src/maintain.rs | 17 +- crates/transaction-pool/src/metrics.rs | 12 +- crates/transaction-pool/src/noop.rs | 7 + crates/transaction-pool/src/ordering.rs | 11 +- crates/transaction-pool/src/pool/mod.rs | 30 +- crates/transaction-pool/src/pool/txpool.rs | 35 +- crates/transaction-pool/src/traits.rs | 52 +- crates/transaction-pool/src/validate/eth.rs | 3 +- crates/transaction-pool/src/validate/mod.rs | 14 +- crates/trie/db/src/hashed_cursor.rs | 4 +- crates/trie/db/src/lib.rs | 2 +- crates/trie/db/src/prefix_set.rs | 2 +- crates/trie/db/src/proof.rs | 50 +- crates/trie/db/src/trie_cursor.rs | 4 +- crates/trie/trie/src/forward_cursor.rs | 2 +- .../trie/trie/src/hashed_cursor/post_state.rs | 6 +- crates/trie/trie/src/prefix_set.rs | 21 +- crates/trie/trie/src/proof.rs | 37 +- crates/trie/trie/src/trie_cursor/in_memory.rs | 6 +- crates/trie/trie/src/updates.rs | 4 +- crates/trie/trie/src/witness.rs | 2 +- docs/repo/layout.md | 3 +- etc/grafana/dashboards/overview.json | 432 ++- etc/grafana/dashboards/reth-mempool.json | 208 +- etc/grafana/dashboards/reth-performance.json | 28 +- .../beacon-api-sidecar-fetcher/src/main.rs | 2 +- examples/beacon-api-sse/src/main.rs | 2 +- examples/custom-engine-types/src/main.rs | 4 + examples/custom-evm/src/main.rs | 2 +- examples/custom-inspector/src/main.rs | 2 +- examples/custom-node-components/src/main.rs | 2 +- examples/custom-payload-builder/src/main.rs | 2 +- examples/node-custom-rpc/src/main.rs | 2 +- examples/stateful-precompile/src/main.rs | 2 +- examples/txpool-tracing/src/main.rs | 2 +- fork.yaml | 2 +- 206 files changed, 5284 insertions(+), 3816 deletions(-) create mode 100644 .github/workflows/windows.yml create mode 100644 book/sources/Cargo.toml create mode 100644 book/sources/exex/hello-world/Cargo.toml create mode 100644 book/sources/exex/hello-world/src/bin/1.rs create mode 100644 book/sources/exex/hello-world/src/bin/2.rs create mode 100644 book/sources/exex/hello-world/src/bin/3.rs delete mode 100644 crates/node/core/src/args/utils.rs create mode 100644 crates/optimism/bin/src/lib.rs diff --git a/.github/assets/hive/expected_failures.yaml b/.github/assets/hive/expected_failures.yaml index ddf1383ff4d9f..7a212a51dd5ef 100644 --- a/.github/assets/hive/expected_failures.yaml +++ b/.github/assets/hive/expected_failures.yaml @@ -20,23 +20,6 @@ rpc-compat: - eth_getBlockByNumber/get-latest (reth) - eth_getBlockByNumber/get-safe (reth) - - eth_createAccessList/create-al-contract-eip1559 (reth) - - eth_createAccessList/create-al-contract (reth) - - eth_getProof/get-account-proof-blockhash (reth) - - eth_getProof/get-account-proof-latest (reth) - - eth_getProof/get-account-proof-with-storage (reth) - - eth_getTransactionByBlockHashAndIndex/get-block-n (reth) - - eth_getTransactionByBlockNumberAndIndex/get-block-n (reth) - - eth_getTransactionByHash/get-access-list (reth) - - eth_getTransactionByHash/get-blob-tx (reth) - - eth_getTransactionByHash/get-dynamic-fee (reth) - - eth_getTransactionByHash/get-legacy-create (reth) - - eth_getTransactionByHash/get-legacy-input (reth) - - eth_getTransactionByHash/get-legacy-tx (reth) - - eth_getTransactionReceipt/get-legacy-contract (reth) - - eth_getTransactionReceipt/get-legacy-input (reth) - - eth_getTransactionReceipt/get-legacy-receipt (reth)' - # https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: - Withdrawals Fork On Genesis (Paris) (reth) diff --git a/.github/assets/hive/expected_failures_experimental.yaml b/.github/assets/hive/expected_failures_experimental.yaml index 91fd1a88ab834..d4b3d2bcbd3c1 100644 --- a/.github/assets/hive/expected_failures_experimental.yaml +++ b/.github/assets/hive/expected_failures_experimental.yaml @@ -20,23 +20,6 @@ rpc-compat: - eth_getBlockByNumber/get-latest (reth) - eth_getBlockByNumber/get-safe (reth) - - eth_createAccessList/create-al-contract-eip1559 (reth) - - eth_createAccessList/create-al-contract (reth) - - eth_getProof/get-account-proof-blockhash (reth) - - eth_getProof/get-account-proof-latest (reth) - - eth_getProof/get-account-proof-with-storage (reth) - - eth_getTransactionByBlockHashAndIndex/get-block-n (reth) - - eth_getTransactionByBlockNumberAndIndex/get-block-n (reth) - - eth_getTransactionByHash/get-access-list (reth) - - eth_getTransactionByHash/get-blob-tx (reth) - - eth_getTransactionByHash/get-dynamic-fee (reth) - - eth_getTransactionByHash/get-legacy-create (reth) - - eth_getTransactionByHash/get-legacy-input (reth) - - eth_getTransactionByHash/get-legacy-tx (reth) - - eth_getTransactionReceipt/get-legacy-contract (reth) - - eth_getTransactionReceipt/get-legacy-input (reth) - - eth_getTransactionReceipt/get-legacy-receipt (reth)' - # https://github.com/paradigmxyz/reth/issues/8732 engine-withdrawals: - Withdrawals Fork On Genesis (Paris) (reth) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a547c986fe45a..4a4c53add9c73 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -30,7 +30,12 @@ jobs: - uses: Swatinem/rust-cache@v2 with: cache-on-failure: true - - run: cargo clippy --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }} asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" + - name: Run clippy on binaries + run: cargo clippy --bin "${{ matrix.binary }}" --workspace --features "${{ matrix.network }} asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs" + env: + RUSTFLAGS: -D warnings + - name: Run clippy on book binary sources + run: cargo clippy --manifest-path book/sources/Cargo.toml --workspace --bins env: RUSTFLAGS: -D warnings @@ -128,7 +133,10 @@ jobs: - uses: dtolnay/rust-toolchain@nightly with: components: rustfmt - - run: cargo fmt --all --check + - name: Run fmt + run: cargo fmt --all --check + - name: Run fmt on book sources + run: cargo fmt --manifest-path book/sources/Cargo.toml --all --check udeps: name: udeps diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml new file mode 100644 index 0000000000000..03c491b368a65 --- /dev/null +++ b/.github/workflows/windows.yml @@ -0,0 +1,47 @@ +# Windows build + +name: windows + +on: + push: + branches: [main] + pull_request: + branches: [main] + merge_group: + +jobs: + check-reth: + runs-on: ubuntu-20.04 + timeout-minutes: 60 + + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: x86_64-pc-windows-gnu + - uses: taiki-e/install-action@cross + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: mingw-w64 + run: sudo apt-get install -y mingw-w64 + - name: Check Reth + run: cargo check --target x86_64-pc-windows-gnu + + check-op-reth: + runs-on: ubuntu-20.04 + timeout-minutes: 60 + + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@stable + with: + target: x86_64-pc-windows-gnu + - uses: taiki-e/install-action@cross + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - name: mingw-w64 + run: sudo apt-get install -y mingw-w64 + - name: Check OP-Reth + run: cargo check -p op-reth --features optimism --target x86_64-pc-windows-gnu diff --git a/.gitignore b/.gitignore index 2d5d851a5055e..00f7765424220 100644 --- a/.gitignore +++ b/.gitignore @@ -49,4 +49,7 @@ jwttoken/ crates/storage/libmdbx-rs/mdbx-sys/libmdbx/cmake-build-debug # Rust bug report -rustc-ice-* \ No newline at end of file +rustc-ice-* + +# Book sources should be able to build with the latest version +book/sources/Cargo.lock diff --git a/Cargo.lock b/Cargo.lock index d4fa4c6a6a261..863f0f35c18d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -282,9 +282,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.8.5" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "260d3ff3bff0bb84599f032a2f2c6828180b0ea0cd41fdaf44f39cef3ba41861" +checksum = "8ecb848c43f6b06ae3de2e4a67496cbbabd78ae87db0f1248934f15d76192c6a" dependencies = [ "alloy-rlp", "arbitrary", @@ -293,8 +293,9 @@ dependencies = [ "const-hex", "derive_arbitrary", "derive_more 1.0.0", + "foldhash", "getrandom 0.2.15", - "hashbrown 0.14.5", + "hashbrown 0.15.0", "hex-literal", "indexmap 2.5.0", "itoa", @@ -2563,7 +2564,7 @@ dependencies = [ [[package]] name = "ef-tests" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -3101,6 +3102,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -3433,6 +3440,15 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", +] + +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +dependencies = [ + "foldhash", "serde", ] @@ -5252,13 +5268,19 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.0.7" +version = "1.0.8" dependencies = [ "clap", "reth-cli-util", "reth-node-builder", + "reth-optimism-chainspec", "reth-optimism-cli", + "reth-optimism-consensus", + "reth-optimism-evm", + "reth-optimism-forks", "reth-optimism-node", + "reth-optimism-payload-builder", + "reth-optimism-primitives", "reth-optimism-rpc", "reth-provider", "tracing", @@ -6208,7 +6230,7 @@ dependencies = [ [[package]] name = "reth" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6237,6 +6259,7 @@ dependencies = [ "reth-downloaders", "reth-engine-util", "reth-errors", + "reth-ethereum-cli", "reth-ethereum-payload-builder", "reth-evm", "reth-execution-types", @@ -6280,7 +6303,7 @@ dependencies = [ [[package]] name = "reth-auto-seal-consensus" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -6310,7 +6333,7 @@ dependencies = [ [[package]] name = "reth-basic-payload-builder" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6333,7 +6356,7 @@ dependencies = [ [[package]] name = "reth-beacon-consensus" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -6384,7 +6407,7 @@ dependencies = [ [[package]] name = "reth-bench" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -6419,7 +6442,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6457,7 +6480,7 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "reth-consensus", @@ -6469,7 +6492,7 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6497,7 +6520,7 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-chains", "alloy-eips", @@ -6508,28 +6531,29 @@ dependencies = [ "auto_impl", "derive_more 1.0.0", "once_cell", - "op-alloy-rpc-types", "reth-ethereum-forks", "reth-network-peers", "reth-optimism-forks", "reth-primitives-traits", "reth-trie-common", - "serde", "serde_json", ] [[package]] name = "reth-cli" -version = "1.0.7" +version = "1.0.8" dependencies = [ + "alloy-genesis", "clap", "eyre", "reth-cli-runner", + "serde_json", + "shellexpand", ] [[package]] name = "reth-cli-commands" -version = "1.0.7" +version = "1.0.8" dependencies = [ "ahash", "alloy-eips", @@ -6561,6 +6585,7 @@ dependencies = [ "reth-downloaders", "reth-ecies", "reth-eth-wire", + "reth-ethereum-cli", "reth-evm", "reth-exex", "reth-fs-util", @@ -6589,7 +6614,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.0.7" +version = "1.0.8" dependencies = [ "reth-tasks", "tokio", @@ -6598,7 +6623,7 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6615,7 +6640,7 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6638,7 +6663,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.0.7" +version = "1.0.8" dependencies = [ "convert_case", "proc-macro2", @@ -6649,7 +6674,7 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "eyre", @@ -6665,7 +6690,7 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "auto_impl", @@ -6675,7 +6700,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -6690,7 +6715,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6713,7 +6738,7 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "arbitrary", @@ -6725,6 +6750,7 @@ dependencies = [ "iai-callgrind", "metrics", "page_size", + "parking_lot 0.12.3", "paste", "pprof", "proptest", @@ -6753,7 +6779,7 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -6780,7 +6806,7 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -6808,7 +6834,7 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "arbitrary", @@ -6824,7 +6850,7 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6850,7 +6876,7 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -6874,7 +6900,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-chains", "alloy-primitives", @@ -6902,7 +6928,7 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -6939,7 +6965,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -6977,7 +7003,7 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.0.7" +version = "1.0.8" dependencies = [ "aes", "alloy-primitives", @@ -7007,7 +7033,7 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7037,7 +7063,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "reth-execution-types", @@ -7049,7 +7075,7 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.0.7" +version = "1.0.8" dependencies = [ "futures", "pin-project", @@ -7077,7 +7103,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7124,7 +7150,7 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7154,7 +7180,7 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.0.7" +version = "1.0.8" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -7166,8 +7192,9 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.0.7" +version = "1.0.8" dependencies = [ + "alloy-eips", "alloy-primitives", "alloy-rlp", "arbitrary", @@ -7200,7 +7227,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-chains", "alloy-consensus", @@ -7223,21 +7250,18 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.0.7" +version = "1.0.8" dependencies = [ - "alloy-genesis", "clap", "eyre", "reth-chainspec", "reth-cli", "reth-cli-commands", - "serde_json", - "shellexpand", ] [[package]] name = "reth-ethereum-consensus" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "reth-chainspec", @@ -7249,7 +7273,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7268,7 +7292,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-chains", "alloy-primitives", @@ -7287,7 +7311,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "reth-basic-payload-builder", @@ -7311,7 +7335,7 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "rayon", @@ -7321,7 +7345,7 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7343,7 +7367,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7366,7 +7390,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7381,7 +7405,7 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7398,7 +7422,7 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7443,7 +7467,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.0.7" +version = "1.0.8" dependencies = [ "eyre", "futures-util", @@ -7475,7 +7499,7 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7491,7 +7515,7 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.0.7" +version = "1.0.8" dependencies = [ "serde", "serde_json", @@ -7500,7 +7524,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7524,7 +7548,7 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.0.7" +version = "1.0.8" dependencies = [ "async-trait", "bytes", @@ -7546,7 +7570,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.0.7" +version = "1.0.8" dependencies = [ "bitflags 2.6.0", "byteorder", @@ -7567,7 +7591,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.0.7" +version = "1.0.8" dependencies = [ "bindgen 0.70.1", "cc", @@ -7575,7 +7599,7 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.0.7" +version = "1.0.8" dependencies = [ "futures", "metrics", @@ -7586,14 +7610,14 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", ] [[package]] name = "reth-net-nat" -version = "1.0.7" +version = "1.0.8" dependencies = [ "futures-util", "if-addrs", @@ -7607,7 +7631,7 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -7667,7 +7691,7 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types-admin", @@ -7689,7 +7713,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -7709,7 +7733,7 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -7725,7 +7749,7 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "humantime-serde", "reth-ethereum-forks", @@ -7738,7 +7762,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.0.7" +version = "1.0.8" dependencies = [ "anyhow", "bincode", @@ -7756,7 +7780,7 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "reth-engine-primitives", "reth-evm", @@ -7773,7 +7797,7 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -7837,9 +7861,8 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.0.7" +version = "1.0.8" dependencies = [ - "alloy-genesis", "alloy-primitives", "alloy-rpc-types-engine", "clap", @@ -7852,19 +7875,16 @@ dependencies = [ "proptest", "rand 0.8.5", "reth-chainspec", - "reth-cli", "reth-cli-util", "reth-config", "reth-consensus-common", "reth-db", "reth-discv4", "reth-discv5", - "reth-fs-util", "reth-net-nat", "reth-network", "reth-network-p2p", "reth-network-peers", - "reth-optimism-chainspec", "reth-primitives", "reth-prune-types", "reth-rpc-api", @@ -7879,7 +7899,6 @@ dependencies = [ "reth-transaction-pool", "secp256k1", "serde", - "serde_json", "shellexpand", "strum", "tempfile", @@ -7892,7 +7911,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -7928,7 +7947,7 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -7950,7 +7969,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.0.7" +version = "1.0.8" dependencies = [ "eyre", "http", @@ -7976,7 +7995,7 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "reth-chainspec", "reth-db-api", @@ -7985,7 +8004,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-chains", "alloy-genesis", @@ -8003,7 +8022,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8026,6 +8045,7 @@ dependencies = [ "reth-node-builder", "reth-node-core", "reth-node-events", + "reth-node-metrics", "reth-optimism-chainspec", "reth-optimism-evm", "reth-optimism-node", @@ -8046,7 +8066,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "reth-chainspec", @@ -8061,7 +8081,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8087,7 +8107,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-chains", "alloy-primitives", @@ -8098,7 +8118,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-genesis", @@ -8151,7 +8171,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8184,7 +8204,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "reth-primitives", @@ -8193,7 +8213,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8232,7 +8252,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.0.7" +version = "1.0.8" dependencies = [ "reth-codecs", "reth-db-api", @@ -8243,7 +8263,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8264,7 +8284,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8285,7 +8305,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-rpc-types", "reth-chainspec", @@ -8295,7 +8315,7 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8340,7 +8360,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8368,7 +8388,7 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8418,7 +8438,7 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "assert_matches", @@ -8447,7 +8467,7 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "arbitrary", @@ -8467,7 +8487,7 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "reth-chainspec", @@ -8484,7 +8504,7 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -8552,7 +8572,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -8578,7 +8598,7 @@ dependencies = [ [[package]] name = "reth-rpc-api-testing-util" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -8597,7 +8617,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-network", "alloy-primitives", @@ -8649,7 +8669,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8685,7 +8705,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-dyn-abi", "alloy-eips", @@ -8725,7 +8745,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -8768,7 +8788,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-rpc-types-engine", "http", @@ -8783,7 +8803,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rpc-types-engine", @@ -8798,7 +8818,7 @@ dependencies = [ [[package]] name = "reth-rpc-types-compat" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8814,7 +8834,7 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -8864,7 +8884,7 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "aquamarine", @@ -8892,7 +8912,7 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "arbitrary", @@ -8909,7 +8929,7 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "assert_matches", @@ -8934,7 +8954,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "clap", @@ -8945,7 +8965,7 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8963,7 +8983,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -8975,7 +8995,7 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.0.7" +version = "1.0.8" dependencies = [ "auto_impl", "dyn-clone", @@ -8992,7 +9012,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9005,7 +9025,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.0.7" +version = "1.0.8" dependencies = [ "tokio", "tokio-stream", @@ -9014,7 +9034,7 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.0.7" +version = "1.0.8" dependencies = [ "clap", "eyre", @@ -9028,7 +9048,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9073,7 +9093,7 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9104,7 +9124,7 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9128,7 +9148,7 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9163,7 +9183,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.0.7" +version = "1.0.8" dependencies = [ "alloy-primitives", "alloy-rlp", diff --git a/Cargo.toml b/Cargo.toml index 6a268b0684d41..5c772a844429e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "1.0.7" +version = "1.0.8" edition = "2021" rust-version = "1.81" license = "MIT OR Apache-2.0" @@ -148,6 +148,7 @@ members = [ "testing/testing-utils", ] default-members = ["bin/reth"] +exclude = ["book/sources"] # Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 # https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html @@ -419,7 +420,7 @@ revm-primitives = { version = "10.0.0", features = [ # eth alloy-chains = "0.1.32" alloy-dyn-abi = "0.8.0" -alloy-primitives = { version = "0.8.4", default-features = false } +alloy-primitives = { version = "0.8.7", default-features = false } alloy-rlp = "0.3.4" alloy-sol-types = "0.8.0" alloy-trie = { version = "0.6", default-features = false } diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index 3d3cbd06f4d79..476f9cd5cec73 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -15,6 +15,7 @@ workspace = true [dependencies] # reth reth-cli.workspace = true +reth-ethereum-cli.workspace = true reth-chainspec.workspace = true reth-config.workspace = true reth-primitives.workspace = true diff --git a/bin/reth/src/cli/mod.rs b/bin/reth/src/cli/mod.rs index 8bb0971ec0817..cca801da36b27 100644 --- a/bin/reth/src/cli/mod.rs +++ b/bin/reth/src/cli/mod.rs @@ -15,8 +15,8 @@ use reth_cli_commands::{ }; use reth_cli_runner::CliRunner; use reth_db::DatabaseEnv; +use reth_ethereum_cli::chainspec::EthereumChainSpecParser; use reth_node_builder::{NodeBuilder, WithLaunchContext}; -use reth_node_core::args::utils::EthereumChainSpecParser; use reth_node_ethereum::{EthExecutorProvider, EthereumNode}; use reth_tracing::FileWorkerGuard; use std::{ffi::OsString, fmt, future::Future, sync::Arc}; @@ -117,7 +117,8 @@ impl, Ext: clap::Args + fmt::Debug> Cl /// /// ```no_run /// use clap::Parser; - /// use reth::{args::utils::EthereumChainSpecParser, cli::Cli}; + /// use reth::cli::Cli; + /// use reth_ethereum_cli::chainspec::EthereumChainSpecParser; /// /// #[derive(Debug, Parser)] /// pub struct MyArgs { @@ -238,7 +239,7 @@ mod tests { use super::*; use crate::args::ColorMode; use clap::CommandFactory; - use reth_node_core::args::utils::SUPPORTED_CHAINS; + use reth_ethereum_cli::chainspec::SUPPORTED_CHAINS; #[test] fn parse_color_mode() { diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index a7f75c02a8f82..455d8356aff94 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -194,7 +194,7 @@ impl> Command { ) .expect("should not fail to convert blob tx if it is already eip4844"); let pooled = PooledTransactionsElement::BlobTransaction(tx); - let encoded_length = pooled.length_without_header(); + let encoded_length = pooled.encode_2718_len(); // insert the blob into the store blob_store.insert(transaction.hash, sidecar)?; diff --git a/bin/reth/src/lib.rs b/bin/reth/src/lib.rs index 4ff2faa1d65f7..6b71f48de123e 100644 --- a/bin/reth/src/lib.rs +++ b/bin/reth/src/lib.rs @@ -90,6 +90,7 @@ pub mod dirs { /// Re-exported from `reth_chainspec` pub mod chainspec { pub use reth_chainspec::*; + pub use reth_ethereum_cli::chainspec::*; } /// Re-exported from `reth_provider`. diff --git a/bin/reth/src/main.rs b/bin/reth/src/main.rs index 578f2987d73fc..7153cdcc6c885 100644 --- a/bin/reth/src/main.rs +++ b/bin/reth/src/main.rs @@ -4,7 +4,8 @@ static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::new_allocator(); use clap::{Args, Parser}; -use reth::{args::utils::EthereumChainSpecParser, cli::Cli}; +use reth::cli::Cli; +use reth_ethereum_cli::chainspec::EthereumChainSpecParser; use reth_node_builder::{ engine_tree_config::{ TreeConfig, DEFAULT_MEMORY_BLOCK_BUFFER_TARGET, DEFAULT_PERSISTENCE_THRESHOLD, @@ -60,7 +61,7 @@ fn main() { let handle = builder .with_types_and_provider::>() .with_components(EthereumNode::components()) - .with_add_ons::() + .with_add_ons(EthereumAddOns::default()) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( builder.task_executor().clone(), diff --git a/book/developers/exex/hello-world.md b/book/developers/exex/hello-world.md index facb07e5307f2..c1f3e5af94430 100644 --- a/book/developers/exex/hello-world.md +++ b/book/developers/exex/hello-world.md @@ -14,19 +14,7 @@ cd my-exex And add Reth as a dependency in `Cargo.toml` ```toml -[package] -name = "my-exex" -version = "0.1.0" -edition = "2021" - -[dependencies] -reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth -reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } # Ethereum Node implementation -reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging - -eyre = "0.6" # Easy error handling -futures-util = "0.3" # Stream utilities for consuming notifications +{{#include ../../sources/exex/hello-world/Cargo.toml}} ``` ### Default Reth node @@ -34,15 +22,7 @@ futures-util = "0.3" # Stream utilities for consuming notifications Now, let's jump to our `main.rs` and start by initializing and launching a default Reth node ```rust,norun,noplayground,ignore -use reth_node_ethereum::EthereumNode; - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let handle = builder.node(EthereumNode::default()).launch().await?; - - handle.wait_for_node_exit().await - }) -} +{{#include ../../sources/exex/hello-world/src/bin/1.rs}} ``` You can already test that it works by running the binary and initializing the Holesky node in a custom datadir @@ -63,26 +43,7 @@ $ cargo run -- init --chain holesky --datadir data The simplest ExEx is just an async function that never returns. We need to install it into our node ```rust,norun,noplayground,ignore -use reth::api::FullNodeComponents; -use reth_exex::{ExExContext, ExExEvent, ExExNotification}; -use reth_node_ethereum::EthereumNode; -use reth_tracing::tracing::info; - -async fn my_exex(mut _ctx: ExExContext) -> eyre::Result<()> { - loop {} -} - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let handle = builder - .node(EthereumNode::default()) - .install_exex("my-exex", |ctx| async move { Ok(my_exex(ctx)) }) - .launch() - .await?; - - handle.wait_for_node_exit().await - }) -} +{{#include ../../sources/exex/hello-world/src/bin/2.rs}} ``` See that unused `_ctx`? That's the context that we'll use to listen to new notifications coming from the main node, @@ -103,46 +64,7 @@ If you try running a node with an ExEx that exits, the node will exit as well. Now, let's extend our simplest ExEx and start actually listening to new notifications, log them, and send events back to the main node ```rust,norun,noplayground,ignore -use futures_util::StreamExt; -use reth::api::FullNodeComponents; -use reth_exex::{ExExContext, ExExEvent, ExExNotification}; -use reth_node_ethereum::EthereumNode; -use reth_tracing::tracing::info; - -async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { - while let Some(notification) = ctx.notifications.next().await { - match ¬ification { - ExExNotification::ChainCommitted { new } => { - info!(committed_chain = ?new.range(), "Received commit"); - } - ExExNotification::ChainReorged { old, new } => { - info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); - } - ExExNotification::ChainReverted { old } => { - info!(reverted_chain = ?old.range(), "Received revert"); - } - }; - - if let Some(committed_chain) = notification.committed_chain() { - ctx.events - .send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; - } - } - - Ok(()) -} - -fn main() -> eyre::Result<()> { - reth::cli::Cli::parse_args().run(|builder, _| async move { - let handle = builder - .node(EthereumNode::default()) - .install_exex("my-exex", |ctx| async move { Ok(my_exex(ctx)) }) - .launch() - .await?; - - handle.wait_for_node_exit().await - }) -} +{{#include ../../sources/exex/hello-world/src/bin/3.rs}} ``` Woah, there's a lot of new stuff here! Let's go through it step by step: diff --git a/book/sources/Cargo.toml b/book/sources/Cargo.toml new file mode 100644 index 0000000000000..c04c8567f94d0 --- /dev/null +++ b/book/sources/Cargo.toml @@ -0,0 +1,9 @@ +[workspace] +members = [ + "exex/hello-world", +] + +# Explicitly set the resolver to version 2, which is the default for packages with edition >= 2021 +# https://doc.rust-lang.org/edition-guide/rust-2021/default-cargo-resolver.html +resolver = "2" + diff --git a/book/sources/exex/hello-world/Cargo.toml b/book/sources/exex/hello-world/Cargo.toml new file mode 100644 index 0000000000000..e5d32a1405499 --- /dev/null +++ b/book/sources/exex/hello-world/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "my-exex" +version = "0.1.0" +edition = "2021" + +[dependencies] +reth = { git = "https://github.com/paradigmxyz/reth.git" } # Reth +reth-exex = { git = "https://github.com/paradigmxyz/reth.git" } # Execution Extensions +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth.git" } # Ethereum Node implementation +reth-tracing = { git = "https://github.com/paradigmxyz/reth.git" } # Logging + +eyre = "0.6" # Easy error handling +futures-util = "0.3" # Stream utilities for consuming notifications diff --git a/book/sources/exex/hello-world/src/bin/1.rs b/book/sources/exex/hello-world/src/bin/1.rs new file mode 100644 index 0000000000000..794609bfd7d11 --- /dev/null +++ b/book/sources/exex/hello-world/src/bin/1.rs @@ -0,0 +1,9 @@ +use reth_node_ethereum::EthereumNode; + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder.node(EthereumNode::default()).launch().await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/book/sources/exex/hello-world/src/bin/2.rs b/book/sources/exex/hello-world/src/bin/2.rs new file mode 100644 index 0000000000000..6ab5fc49650ee --- /dev/null +++ b/book/sources/exex/hello-world/src/bin/2.rs @@ -0,0 +1,20 @@ +use reth::api::FullNodeComponents; +use reth_exex::ExExContext; +use reth_node_ethereum::EthereumNode; + +async fn my_exex(mut _ctx: ExExContext) -> eyre::Result<()> { + #[allow(clippy::empty_loop)] + loop {} +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("my-exex", |ctx| async move { Ok(my_exex(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/book/sources/exex/hello-world/src/bin/3.rs new file mode 100644 index 0000000000000..21bd25a56dbf3 --- /dev/null +++ b/book/sources/exex/hello-world/src/bin/3.rs @@ -0,0 +1,39 @@ +use futures_util::TryStreamExt; +use reth::api::FullNodeComponents; +use reth_exex::{ExExContext, ExExEvent, ExExNotification}; +use reth_node_ethereum::EthereumNode; +use reth_tracing::tracing::info; + +async fn my_exex(mut ctx: ExExContext) -> eyre::Result<()> { + while let Some(notification) = ctx.notifications.try_next().await? { + match ¬ification { + ExExNotification::ChainCommitted { new } => { + info!(committed_chain = ?new.range(), "Received commit"); + } + ExExNotification::ChainReorged { old, new } => { + info!(from_chain = ?old.range(), to_chain = ?new.range(), "Received reorg"); + } + ExExNotification::ChainReverted { old } => { + info!(reverted_chain = ?old.range(), "Received revert"); + } + }; + + if let Some(committed_chain) = notification.committed_chain() { + ctx.events.send(ExExEvent::FinishedHeight(committed_chain.tip().num_hash()))?; + } + } + + Ok(()) +} + +fn main() -> eyre::Result<()> { + reth::cli::Cli::parse_args().run(|builder, _| async move { + let handle = builder + .node(EthereumNode::default()) + .install_exex("my-exex", |ctx| async move { Ok(my_exex(ctx)) }) + .launch() + .await?; + + handle.wait_for_node_exit().await + }) +} diff --git a/crates/blockchain-tree/src/bundle.rs b/crates/blockchain-tree/src/bundle.rs index 6f62d4136bb76..3745753d3f473 100644 --- a/crates/blockchain-tree/src/bundle.rs +++ b/crates/blockchain-tree/src/bundle.rs @@ -18,7 +18,7 @@ pub struct BundleStateDataRef<'a> { pub canonical_fork: ForkBlock, } -impl<'a> ExecutionDataProvider for BundleStateDataRef<'a> { +impl ExecutionDataProvider for BundleStateDataRef<'_> { fn execution_outcome(&self) -> &ExecutionOutcome { self.execution_outcome } @@ -33,7 +33,7 @@ impl<'a> ExecutionDataProvider for BundleStateDataRef<'a> { } } -impl<'a> BlockExecutionForkProvider for BundleStateDataRef<'a> { +impl BlockExecutionForkProvider for BundleStateDataRef<'_> { fn canonical_fork(&self) -> ForkBlock { self.canonical_fork } diff --git a/crates/blockchain-tree/src/canonical_chain.rs b/crates/blockchain-tree/src/canonical_chain.rs index 7dcd466f7d64a..253f799fe0f87 100644 --- a/crates/blockchain-tree/src/canonical_chain.rs +++ b/crates/blockchain-tree/src/canonical_chain.rs @@ -32,15 +32,7 @@ impl CanonicalChain { /// Returns the block number of the (non-finalized) canonical block with the given hash. #[inline] pub(crate) fn canonical_number(&self, block_hash: &BlockHash) -> Option { - self.chain.iter().find_map( - |(number, hash)| { - if hash == block_hash { - Some(*number) - } else { - None - } - }, - ) + self.chain.iter().find_map(|(number, hash)| (hash == block_hash).then_some(*number)) } /// Extends all items from the given iterator to the chain. @@ -81,3 +73,169 @@ impl CanonicalChain { self.chain.into_iter() } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_replace_canonical_chain() { + // Initialize a chain with some blocks + let mut initial_chain = BTreeMap::new(); + initial_chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); + initial_chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); + + let mut canonical_chain = CanonicalChain::new(initial_chain.clone()); + + // Verify initial chain state + assert_eq!(canonical_chain.chain.len(), 2); + assert_eq!( + canonical_chain.chain.get(&BlockNumber::from(1u64)), + Some(&BlockHash::from([0x01; 32])) + ); + + // Replace with a new chain + let mut new_chain = BTreeMap::new(); + new_chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); + new_chain.insert(BlockNumber::from(4u64), BlockHash::from([0x04; 32])); + new_chain.insert(BlockNumber::from(5u64), BlockHash::from([0x05; 32])); + + canonical_chain.replace(new_chain.clone()); + + // Verify replaced chain state + assert_eq!(canonical_chain.chain.len(), 3); + assert!(!canonical_chain.chain.contains_key(&BlockNumber::from(1u64))); + assert_eq!( + canonical_chain.chain.get(&BlockNumber::from(3u64)), + Some(&BlockHash::from([0x03; 32])) + ); + } + + #[test] + fn test_canonical_hash_canonical_chain() { + // Initialize a chain with some blocks + let mut chain = BTreeMap::new(); + chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); + chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); + chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); + + // Create an instance of a canonical chain + let canonical_chain = CanonicalChain::new(chain.clone()); + + // Check that the function returns the correct hash for a given block number + let block_number = BlockNumber::from(2u64); + let expected_hash = BlockHash::from([0x02; 32]); + assert_eq!(canonical_chain.canonical_hash(&block_number), Some(expected_hash)); + + // Check that a non-existent block returns None + let non_existent_block = BlockNumber::from(5u64); + assert_eq!(canonical_chain.canonical_hash(&non_existent_block), None); + } + + #[test] + fn test_canonical_number_canonical_chain() { + // Initialize a chain with some blocks + let mut chain = BTreeMap::new(); + chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); + chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); + chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); + + // Create an instance of a canonical chain + let canonical_chain = CanonicalChain::new(chain.clone()); + + // Check that the function returns the correct block number for a given block hash + let block_hash = BlockHash::from([0x02; 32]); + let expected_number = BlockNumber::from(2u64); + assert_eq!(canonical_chain.canonical_number(&block_hash), Some(expected_number)); + + // Check that a non-existent block hash returns None + let non_existent_hash = BlockHash::from([0x05; 32]); + assert_eq!(canonical_chain.canonical_number(&non_existent_hash), None); + } + + #[test] + fn test_extend_canonical_chain() { + // Initialize an empty chain + let mut canonical_chain = CanonicalChain::new(BTreeMap::new()); + + // Create an iterator with some blocks + let blocks = vec![ + (BlockNumber::from(1u64), BlockHash::from([0x01; 32])), + (BlockNumber::from(2u64), BlockHash::from([0x02; 32])), + ] + .into_iter(); + + // Extend the chain with the created blocks + canonical_chain.extend(blocks); + + // Check if the blocks were added correctly + assert_eq!(canonical_chain.chain.len(), 2); + assert_eq!( + canonical_chain.chain.get(&BlockNumber::from(1u64)), + Some(&BlockHash::from([0x01; 32])) + ); + assert_eq!( + canonical_chain.chain.get(&BlockNumber::from(2u64)), + Some(&BlockHash::from([0x02; 32])) + ); + + // Test extending with additional blocks again + let more_blocks = vec![(BlockNumber::from(3u64), BlockHash::from([0x03; 32]))].into_iter(); + canonical_chain.extend(more_blocks); + + assert_eq!(canonical_chain.chain.len(), 3); + assert_eq!( + canonical_chain.chain.get(&BlockNumber::from(3u64)), + Some(&BlockHash::from([0x03; 32])) + ); + } + + #[test] + fn test_retain_canonical_chain() { + // Initialize a chain with some blocks + let mut chain = BTreeMap::new(); + chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); + chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); + chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); + + // Create an instance of CanonicalChain + let mut canonical_chain = CanonicalChain::new(chain); + + // Retain only blocks with even block numbers + canonical_chain.retain(|number, _| number % 2 == 0); + + // Check if the chain only contains the block with number 2 + assert_eq!(canonical_chain.chain.len(), 1); + assert_eq!( + canonical_chain.chain.get(&BlockNumber::from(2u64)), + Some(&BlockHash::from([0x02; 32])) + ); + + // Ensure that the blocks with odd numbers were removed + assert_eq!(canonical_chain.chain.get(&BlockNumber::from(1u64)), None); + assert_eq!(canonical_chain.chain.get(&BlockNumber::from(3u64)), None); + } + + #[test] + fn test_tip_canonical_chain() { + // Initialize a chain with some blocks + let mut chain = BTreeMap::new(); + chain.insert(BlockNumber::from(1u64), BlockHash::from([0x01; 32])); + chain.insert(BlockNumber::from(2u64), BlockHash::from([0x02; 32])); + chain.insert(BlockNumber::from(3u64), BlockHash::from([0x03; 32])); + + // Create an instance of a canonical chain + let canonical_chain = CanonicalChain::new(chain); + + // Call the tip method and verify the returned value + let tip = canonical_chain.tip(); + assert_eq!(tip.number, BlockNumber::from(3u64)); + assert_eq!(tip.hash, BlockHash::from([0x03; 32])); + + // Test with an empty chain + let empty_chain = CanonicalChain::new(BTreeMap::new()); + let empty_tip = empty_chain.tip(); + assert_eq!(empty_tip.number, BlockNumber::default()); + assert_eq!(empty_tip.hash, BlockHash::default()); + } +} diff --git a/crates/chain-state/src/in_memory.rs b/crates/chain-state/src/in_memory.rs index f2a73d27fa210..07120cf8ee310 100644 --- a/crates/chain-state/src/in_memory.rs +++ b/crates/chain-state/src/in_memory.rs @@ -177,7 +177,7 @@ impl CanonicalInMemoryState { let in_memory_state = InMemoryState::new(blocks, numbers, pending); let header = in_memory_state .head_state() - .map_or_else(SealedHeader::default, |state| state.block().block().header.clone()); + .map_or_else(SealedHeader::default, |state| state.block_ref().block().header.clone()); let chain_info_tracker = ChainInfoTracker::new(header, finalized); let (canon_state_notification_sender, _) = broadcast::channel(CANON_STATE_NOTIFICATION_CHANNEL_SIZE); @@ -219,7 +219,7 @@ impl CanonicalInMemoryState { /// Returns the header corresponding to the given hash. pub fn header_by_hash(&self, hash: B256) -> Option { - self.state_by_hash(hash).map(|block| block.block().block.header.clone()) + self.state_by_hash(hash).map(|block| block.block_ref().block.header.clone()) } /// Clears all entries in the in memory state. @@ -323,7 +323,7 @@ impl CanonicalInMemoryState { // height) let mut old_blocks = blocks .drain() - .filter(|(_, b)| b.block().block().number > persisted_height) + .filter(|(_, b)| b.block_ref().block().number > persisted_height) .map(|(_, b)| b.block.clone()) .collect::>(); @@ -345,7 +345,7 @@ impl CanonicalInMemoryState { // also shift the pending state if it exists self.inner.in_memory_state.pending.send_modify(|p| { if let Some(p) = p.as_mut() { - p.parent = blocks.get(&p.block().block.parent_hash).cloned(); + p.parent = blocks.get(&p.block_ref().block.parent_hash).cloned(); } }); } @@ -452,7 +452,7 @@ impl CanonicalInMemoryState { /// Returns the `SealedHeader` corresponding to the pending state. pub fn pending_sealed_header(&self) -> Option { - self.pending_state().map(|h| h.block().block().header.clone()) + self.pending_state().map(|h| h.block_ref().block().header.clone()) } /// Returns the `Header` corresponding to the pending state. @@ -462,20 +462,20 @@ impl CanonicalInMemoryState { /// Returns the `SealedBlock` corresponding to the pending state. pub fn pending_block(&self) -> Option { - self.pending_state().map(|block_state| block_state.block().block().clone()) + self.pending_state().map(|block_state| block_state.block_ref().block().clone()) } /// Returns the `SealedBlockWithSenders` corresponding to the pending state. pub fn pending_block_with_senders(&self) -> Option { self.pending_state() - .and_then(|block_state| block_state.block().block().clone().seal_with_senders()) + .and_then(|block_state| block_state.block_ref().block().clone().seal_with_senders()) } /// Returns a tuple with the `SealedBlock` corresponding to the pending /// state and a vector of its `Receipt`s. pub fn pending_block_and_receipts(&self) -> Option<(SealedBlock, Vec)> { self.pending_state().map(|block_state| { - (block_state.block().block().clone(), block_state.executed_block_receipts()) + (block_state.block_ref().block().clone(), block_state.executed_block_receipts()) }) } @@ -543,7 +543,7 @@ impl CanonicalInMemoryState { pub fn transaction_by_hash(&self, hash: TxHash) -> Option { for block_state in self.canonical_chain() { if let Some(tx) = - block_state.block().block().body.transactions().find(|tx| tx.hash() == hash) + block_state.block_ref().block().body.transactions().find(|tx| tx.hash() == hash) { return Some(tx.clone()) } @@ -559,7 +559,7 @@ impl CanonicalInMemoryState { ) -> Option<(TransactionSigned, TransactionMeta)> { for block_state in self.canonical_chain() { if let Some((index, tx)) = block_state - .block() + .block_ref() .block() .body .transactions() @@ -570,10 +570,10 @@ impl CanonicalInMemoryState { tx_hash, index: index as u64, block_hash: block_state.hash(), - block_number: block_state.block().block.number, - base_fee: block_state.block().block().header.base_fee_per_gas, - timestamp: block_state.block().block.timestamp, - excess_blob_gas: block_state.block().block.excess_blob_gas, + block_number: block_state.block_ref().block.number, + base_fee: block_state.block_ref().block.header.base_fee_per_gas, + timestamp: block_state.block_ref().block.timestamp, + excess_blob_gas: block_state.block_ref().block.excess_blob_gas, }; return Some((tx.clone(), meta)) } @@ -872,7 +872,7 @@ mod tests { AccountReader, BlockHashReader, StateProofProvider, StateProvider, StateRootProvider, StorageRootProvider, }; - use reth_trie::{AccountProof, HashedStorage, MultiProof, TrieInput}; + use reth_trie::{AccountProof, HashedStorage, MultiProof, StorageProof, TrieInput}; fn create_mock_state( test_block_builder: &mut TestBlockBuilder, @@ -973,6 +973,15 @@ mod tests { ) -> ProviderResult { Ok(B256::random()) } + + fn storage_proof( + &self, + _address: Address, + slot: B256, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageProof::new(slot)) + } } impl StateProofProvider for MockStateProvider { @@ -1156,13 +1165,19 @@ mod tests { let block2 = test_block_builder.get_executed_block_with_number(0, B256::random()); let chain = NewCanonicalChain::Commit { new: vec![block1.clone()] }; state.update_chain(chain); - assert_eq!(state.head_state().unwrap().block().block().hash(), block1.block().hash()); - assert_eq!(state.state_by_number(0).unwrap().block().block().hash(), block1.block().hash()); + assert_eq!(state.head_state().unwrap().block_ref().block().hash(), block1.block().hash()); + assert_eq!( + state.state_by_number(0).unwrap().block_ref().block().hash(), + block1.block().hash() + ); let chain = NewCanonicalChain::Reorg { new: vec![block2.clone()], old: vec![block1] }; state.update_chain(chain); - assert_eq!(state.head_state().unwrap().block().block().hash(), block2.block().hash()); - assert_eq!(state.state_by_number(0).unwrap().block().block().hash(), block2.block().hash()); + assert_eq!(state.head_state().unwrap().block_ref().block().hash(), block2.block().hash()); + assert_eq!( + state.state_by_number(0).unwrap().block_ref().block().hash(), + block2.block().hash() + ); assert_eq!(state.inner.in_memory_state.block_count(), 1); } diff --git a/crates/chain-state/src/memory_overlay.rs b/crates/chain-state/src/memory_overlay.rs index 2712d1259e85b..eb125dad115ee 100644 --- a/crates/chain-state/src/memory_overlay.rs +++ b/crates/chain-state/src/memory_overlay.rs @@ -133,11 +133,26 @@ impl StateRootProvider for MemoryOverlayStateProvider { impl StorageRootProvider for MemoryOverlayStateProvider { // TODO: Currently this does not reuse available in-memory trie nodes. fn storage_root(&self, address: Address, storage: HashedStorage) -> ProviderResult { + let state = &self.trie_state().state; let mut hashed_storage = - self.trie_state().state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); hashed_storage.extend(&storage); self.historical.storage_root(address, hashed_storage) } + + // TODO: Currently this does not reuse available in-memory trie nodes. + fn storage_proof( + &self, + address: Address, + slot: B256, + storage: HashedStorage, + ) -> ProviderResult { + let state = &self.trie_state().state; + let mut hashed_storage = + state.storages.get(&keccak256(address)).cloned().unwrap_or_default(); + hashed_storage.extend(&storage); + self.historical.storage_proof(address, slot, hashed_storage) + } } impl StateProofProvider for MemoryOverlayStateProvider { diff --git a/crates/chainspec/Cargo.toml b/crates/chainspec/Cargo.toml index de04ffd1e6aab..80636d139a1b5 100644 --- a/crates/chainspec/Cargo.toml +++ b/crates/chainspec/Cargo.toml @@ -27,13 +27,9 @@ alloy-genesis.workspace = true alloy-primitives = { workspace = true, features = ["rand", "rlp"] } alloy-trie.workspace = true -# op -op-alloy-rpc-types = { workspace = true, optional = true } - # misc auto_impl.workspace = true once_cell.workspace = true -serde = { workspace = true, optional = true } serde_json.workspace = true derive_more.workspace = true @@ -44,12 +40,9 @@ alloy-eips = { workspace = true, features = ["arbitrary"] } alloy-rlp = { workspace = true, features = ["arrayvec"] } alloy-genesis.workspace = true -# op -op-alloy-rpc-types.workspace = true - [features] default = ["std"] -optimism = ["serde", "dep:op-alloy-rpc-types", "reth-optimism-forks"] +optimism = ["reth-optimism-forks"] std = [ "alloy-chains/std", "alloy-eips/std", diff --git a/crates/chainspec/src/spec.rs b/crates/chainspec/src/spec.rs index 45070db197d44..d6ca92aa24ebe 100644 --- a/crates/chainspec/src/spec.rs +++ b/crates/chainspec/src/spec.rs @@ -590,14 +590,89 @@ impl ChainSpec { impl From for ChainSpec { fn from(genesis: Genesis) -> Self { - #[cfg(not(feature = "optimism"))] - { - into_ethereum_chain_spec(genesis) + // Block-based hardforks + let hardfork_opts = [ + (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), + (EthereumHardfork::Dao.boxed(), genesis.config.dao_fork_block), + (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), + (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), + (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), + (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), + (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), + (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), + (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), + (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), + (EthereumHardfork::London.boxed(), genesis.config.london_block), + (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), + (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), + ]; + let mut hardforks = hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) + .collect::>(); + + // Paris + let paris_block_and_final_difficulty = + if let Some(ttd) = genesis.config.terminal_total_difficulty { + hardforks.push(( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { + total_difficulty: ttd, + fork_block: genesis.config.merge_netsplit_block, + }, + )); + + genesis.config.merge_netsplit_block.map(|block| (block, ttd)) + } else { + None + }; + + // Time-based hardforks + let time_hardfork_opts = [ + (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), + (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), + (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), + ]; + + let mut time_hardforks = time_hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| { + opt.map(|time| (hardfork, ForkCondition::Timestamp(time))) + }) + .collect::>(); + + hardforks.append(&mut time_hardforks); + + // Ordered Hardforks + let mainnet_hardforks: ChainHardforks = EthereumHardfork::mainnet().into(); + let mainnet_order = mainnet_hardforks.forks_iter(); + + let mut ordered_hardforks = Vec::with_capacity(hardforks.len()); + for (hardfork, _) in mainnet_order { + if let Some(pos) = hardforks.iter().position(|(e, _)| **e == *hardfork) { + ordered_hardforks.push(hardforks.remove(pos)); + } } - #[cfg(feature = "optimism")] - { - into_optimism_chain_spec(genesis) + // append the remaining unknown hardforks to ensure we don't filter any out + ordered_hardforks.append(&mut hardforks); + + // NOTE: in full node, we prune all receipts except the deposit contract's. We do not + // have the deployment block in the genesis file, so we use block zero. We use the same + // deposit topic as the mainnet contract if we have the deposit contract address in the + // genesis json. + let deposit_contract = genesis.config.deposit_contract_address.map(|address| { + DepositContract { address, block: 0, topic: MAINNET_DEPOSIT_CONTRACT.topic } + }); + + Self { + chain: genesis.config.chain_id.into(), + genesis, + genesis_hash: OnceCell::new(), + hardforks: ChainHardforks::new(ordered_hardforks), + paris_block_and_final_difficulty, + deposit_contract, + ..Default::default() } } } @@ -637,194 +712,6 @@ impl EthereumHardforks for ChainSpec { #[cfg(feature = "optimism")] impl reth_optimism_forks::OptimismHardforks for ChainSpec {} -/// Convert the given [`Genesis`] into an Ethereum [`ChainSpec`]. -#[cfg(not(feature = "optimism"))] -fn into_ethereum_chain_spec(genesis: Genesis) -> ChainSpec { - // Block-based hardforks - let hardfork_opts = [ - (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), - (EthereumHardfork::Dao.boxed(), genesis.config.dao_fork_block), - (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), - (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), - (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), - (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), - (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), - (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), - (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), - (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), - (EthereumHardfork::London.boxed(), genesis.config.london_block), - (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), - (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), - ]; - let mut hardforks = hardfork_opts - .into_iter() - .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) - .collect::>(); - - // Paris - let paris_block_and_final_difficulty = - if let Some(ttd) = genesis.config.terminal_total_difficulty { - hardforks.push(( - EthereumHardfork::Paris.boxed(), - ForkCondition::TTD { - total_difficulty: ttd, - fork_block: genesis.config.merge_netsplit_block, - }, - )); - - genesis.config.merge_netsplit_block.map(|block| (block, ttd)) - } else { - None - }; - - // Time-based hardforks - let time_hardfork_opts = [ - (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), - (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), - (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), - ]; - - let mut time_hardforks = time_hardfork_opts - .into_iter() - .filter_map(|(hardfork, opt)| opt.map(|time| (hardfork, ForkCondition::Timestamp(time)))) - .collect::>(); - - hardforks.append(&mut time_hardforks); - - // Ordered Hardforks - let mainnet_hardforks: ChainHardforks = EthereumHardfork::mainnet().into(); - let mainnet_order = mainnet_hardforks.forks_iter(); - - let mut ordered_hardforks = Vec::with_capacity(hardforks.len()); - for (hardfork, _) in mainnet_order { - if let Some(pos) = hardforks.iter().position(|(e, _)| **e == *hardfork) { - ordered_hardforks.push(hardforks.remove(pos)); - } - } - - // append the remaining unknown hardforks to ensure we don't filter any out - ordered_hardforks.append(&mut hardforks); - - // NOTE: in full node, we prune all receipts except the deposit contract's. We do not - // have the deployment block in the genesis file, so we use block zero. We use the same - // deposit topic as the mainnet contract if we have the deposit contract address in the - // genesis json. - let deposit_contract = genesis.config.deposit_contract_address.map(|address| DepositContract { - address, - block: 0, - topic: MAINNET_DEPOSIT_CONTRACT.topic, - }); - - ChainSpec { - chain: genesis.config.chain_id.into(), - genesis, - genesis_hash: OnceCell::new(), - hardforks: ChainHardforks::new(ordered_hardforks), - paris_block_and_final_difficulty, - deposit_contract, - ..Default::default() - } -} - -#[cfg(feature = "optimism")] -/// Convert the given [`Genesis`] into an Optimism [`ChainSpec`]. -fn into_optimism_chain_spec(genesis: Genesis) -> ChainSpec { - use reth_optimism_forks::OptimismHardfork; - let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); - let genesis_info = optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); - - // Block-based hardforks - let hardfork_opts = [ - (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), - (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), - (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), - (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), - (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), - (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), - (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), - (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), - (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), - (EthereumHardfork::London.boxed(), genesis.config.london_block), - (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), - (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), - (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), - ]; - let mut block_hardforks = hardfork_opts - .into_iter() - .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) - .collect::>(); - - // Paris - let paris_block_and_final_difficulty = - if let Some(ttd) = genesis.config.terminal_total_difficulty { - block_hardforks.push(( - EthereumHardfork::Paris.boxed(), - ForkCondition::TTD { - total_difficulty: ttd, - fork_block: genesis.config.merge_netsplit_block, - }, - )); - - genesis.config.merge_netsplit_block.map(|block| (block, ttd)) - } else { - None - }; - - // Time-based hardforks - let time_hardfork_opts = [ - (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), - (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), - (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), - (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), - (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), - (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), - (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), - (OptimismHardfork::Granite.boxed(), genesis_info.granite_time), - ]; - - let mut time_hardforks = time_hardfork_opts - .into_iter() - .filter_map(|(hardfork, opt)| opt.map(|time| (hardfork, ForkCondition::Timestamp(time)))) - .collect::>(); - - block_hardforks.append(&mut time_hardforks); - - // Ordered Hardforks - let mainnet_hardforks = OptimismHardfork::op_mainnet(); - let mainnet_order = mainnet_hardforks.forks_iter(); - - let mut ordered_hardforks = Vec::with_capacity(block_hardforks.len()); - for (hardfork, _) in mainnet_order { - if let Some(pos) = block_hardforks.iter().position(|(e, _)| **e == *hardfork) { - ordered_hardforks.push(block_hardforks.remove(pos)); - } - } - - // append the remaining unknown hardforks to ensure we don't filter any out - ordered_hardforks.append(&mut block_hardforks); - - // NOTE: in full node, we prune all receipts except the deposit contract's. We do not - // have the deployment block in the genesis file, so we use block zero. We use the same - // deposit topic as the mainnet contract if we have the deposit contract address in the - // genesis json. - let deposit_contract = genesis.config.deposit_contract_address.map(|address| DepositContract { - address, - block: 0, - topic: MAINNET_DEPOSIT_CONTRACT.topic, - }); - - ChainSpec { - chain: genesis.config.chain_id.into(), - genesis, - genesis_hash: OnceCell::new(), - hardforks: ChainHardforks::new(ordered_hardforks), - paris_block_and_final_difficulty, - deposit_contract, - base_fee_params: optimism_genesis_info.base_fee_params, - ..Default::default() - } -} - /// A trait for reading the current chainspec. #[auto_impl::auto_impl(&, Arc)] pub trait ChainSpecProvider: Send + Sync { @@ -1102,59 +989,6 @@ impl DepositContract { } } -/// Genesis info for Optimism. -#[cfg(feature = "optimism")] -#[derive(Default, Debug, serde::Deserialize)] -#[serde(rename_all = "camelCase")] -struct OptimismGenesisInfo { - optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo, - #[serde(skip)] - base_fee_params: BaseFeeParamsKind, -} - -#[cfg(feature = "optimism")] -impl OptimismGenesisInfo { - fn extract_from(genesis: &Genesis) -> Self { - let mut info = Self { - optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo::extract_from( - &genesis.config.extra_fields, - ) - .unwrap_or_default(), - ..Default::default() - }; - if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info { - if let (Some(elasticity), Some(denominator)) = ( - optimism_base_fee_info.eip1559_elasticity, - optimism_base_fee_info.eip1559_denominator, - ) { - let base_fee_params = if let Some(canyon_denominator) = - optimism_base_fee_info.eip1559_denominator_canyon - { - BaseFeeParamsKind::Variable( - vec![ - ( - EthereumHardfork::London.boxed(), - BaseFeeParams::new(denominator as u128, elasticity as u128), - ), - ( - reth_optimism_forks::OptimismHardfork::Canyon.boxed(), - BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), - ), - ] - .into(), - ) - } else { - BaseFeeParams::new(denominator as u128, elasticity as u128).into() - }; - - info.base_fee_params = base_fee_params; - } - } - - info - } -} - /// Verifies [`ChainSpec`] configuration against expected data in given cases. #[cfg(any(test, feature = "test-utils"))] pub fn test_fork_ids(spec: &ChainSpec, cases: &[(Head, ForkId)]) { @@ -2477,7 +2311,6 @@ Post-merge hard forks (timestamp based): } #[test] - #[cfg(not(feature = "optimism"))] fn test_fork_order_ethereum_mainnet() { let genesis = Genesis { config: ChainConfig { @@ -2506,7 +2339,7 @@ Post-merge hard forks (timestamp based): ..Default::default() }; - let chain_spec = into_ethereum_chain_spec(genesis); + let chain_spec: ChainSpec = genesis.into(); let hardforks: Vec<_> = chain_spec.hardforks.forks_iter().map(|(h, _)| h).collect(); let expected_hardforks = vec![ @@ -2534,80 +2367,4 @@ Post-merge hard forks (timestamp based): .all(|(expected, actual)| &**expected == *actual)); assert_eq!(expected_hardforks.len(), hardforks.len()); } - - #[test] - #[cfg(feature = "optimism")] - fn test_fork_order_optimism_mainnet() { - use reth_optimism_forks::OptimismHardfork; - - let genesis = Genesis { - config: ChainConfig { - chain_id: 0, - homestead_block: Some(0), - dao_fork_block: Some(0), - dao_fork_support: false, - eip150_block: Some(0), - eip155_block: Some(0), - eip158_block: Some(0), - byzantium_block: Some(0), - constantinople_block: Some(0), - petersburg_block: Some(0), - istanbul_block: Some(0), - muir_glacier_block: Some(0), - berlin_block: Some(0), - london_block: Some(0), - arrow_glacier_block: Some(0), - gray_glacier_block: Some(0), - merge_netsplit_block: Some(0), - shanghai_time: Some(0), - cancun_time: Some(0), - terminal_total_difficulty: Some(U256::ZERO), - extra_fields: [ - (String::from("bedrockBlock"), 0.into()), - (String::from("regolithTime"), 0.into()), - (String::from("canyonTime"), 0.into()), - (String::from("ecotoneTime"), 0.into()), - (String::from("fjordTime"), 0.into()), - (String::from("graniteTime"), 0.into()), - ] - .into_iter() - .collect(), - ..Default::default() - }, - ..Default::default() - }; - - let chain_spec: ChainSpec = into_optimism_chain_spec(genesis); - - let hardforks: Vec<_> = chain_spec.hardforks.forks_iter().map(|(h, _)| h).collect(); - let expected_hardforks = vec![ - EthereumHardfork::Homestead.boxed(), - EthereumHardfork::Tangerine.boxed(), - EthereumHardfork::SpuriousDragon.boxed(), - EthereumHardfork::Byzantium.boxed(), - EthereumHardfork::Constantinople.boxed(), - EthereumHardfork::Petersburg.boxed(), - EthereumHardfork::Istanbul.boxed(), - EthereumHardfork::MuirGlacier.boxed(), - EthereumHardfork::Berlin.boxed(), - EthereumHardfork::London.boxed(), - EthereumHardfork::ArrowGlacier.boxed(), - EthereumHardfork::GrayGlacier.boxed(), - EthereumHardfork::Paris.boxed(), - OptimismHardfork::Bedrock.boxed(), - OptimismHardfork::Regolith.boxed(), - EthereumHardfork::Shanghai.boxed(), - OptimismHardfork::Canyon.boxed(), - EthereumHardfork::Cancun.boxed(), - OptimismHardfork::Ecotone.boxed(), - OptimismHardfork::Fjord.boxed(), - OptimismHardfork::Granite.boxed(), - ]; - - assert!(expected_hardforks - .iter() - .zip(hardforks.iter()) - .all(|(expected, actual)| &**expected == *actual)); - assert_eq!(expected_hardforks.len(), hardforks.len()); - } } diff --git a/crates/cli/cli/Cargo.toml b/crates/cli/cli/Cargo.toml index e8f7a1dcbe15a..7eb1f43b1e58a 100644 --- a/crates/cli/cli/Cargo.toml +++ b/crates/cli/cli/Cargo.toml @@ -15,9 +15,13 @@ workspace = true # reth reth-cli-runner.workspace = true +alloy-genesis.workspace = true + # misc clap.workspace = true +shellexpand.workspace = true eyre.workspace = true +serde_json.workspace = true diff --git a/crates/cli/cli/src/chainspec.rs b/crates/cli/cli/src/chainspec.rs index 63705bd28f4a6..8432009409f70 100644 --- a/crates/cli/cli/src/chainspec.rs +++ b/crates/cli/cli/src/chainspec.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; +use std::{fs, path::PathBuf, sync::Arc}; use clap::builder::TypedValueParser; @@ -61,3 +61,21 @@ pub trait ChainSpecParser: Clone + Send + Sync + 'static { format!("The chain this node is running.\nPossible values are either a built-in chain or the path to a chain specification file.\n\nBuilt-in chains:\n {}", Self::SUPPORTED_CHAINS.join(", ")) } } + +/// A helper to parse a [`Genesis`](alloy_genesis::Genesis) as argument or from disk. +pub fn parse_genesis(s: &str) -> eyre::Result { + // try to read json from path first + let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) { + Ok(raw) => raw, + Err(io_err) => { + // valid json may start with "\n", but must contain "{" + if s.contains('{') { + s.to_string() + } else { + return Err(io_err.into()) // assume invalid path + } + } + }; + + Ok(serde_json::from_str(&raw)?) +} diff --git a/crates/cli/commands/Cargo.toml b/crates/cli/commands/Cargo.toml index 4835c3d0fa29c..e307859dfd86f 100644 --- a/crates/cli/commands/Cargo.toml +++ b/crates/cli/commands/Cargo.toml @@ -14,6 +14,7 @@ repository.workspace = true reth-beacon-consensus.workspace = true reth-chainspec.workspace = true reth-cli.workspace = true +reth-ethereum-cli.workspace = true reth-cli-runner.workspace = true reth-cli-util.workspace = true reth-config.workspace = true diff --git a/crates/cli/commands/src/db/mod.rs b/crates/cli/commands/src/db/mod.rs index 1c000f56bc250..e1a9a90bacc3f 100644 --- a/crates/cli/commands/src/db/mod.rs +++ b/crates/cli/commands/src/db/mod.rs @@ -160,7 +160,7 @@ impl> Command #[cfg(test)] mod tests { use super::*; - use reth_node_core::args::utils::{EthereumChainSpecParser, SUPPORTED_CHAINS}; + use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS}; use std::path::Path; #[test] diff --git a/crates/cli/commands/src/dump_genesis.rs b/crates/cli/commands/src/dump_genesis.rs index a5c0675cc7e81..c3e7bb217b57d 100644 --- a/crates/cli/commands/src/dump_genesis.rs +++ b/crates/cli/commands/src/dump_genesis.rs @@ -32,7 +32,7 @@ impl> DumpGenesisCommand { #[cfg(test)] mod tests { use super::*; - use reth_node_core::args::utils::{EthereumChainSpecParser, SUPPORTED_CHAINS}; + use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS}; #[test] fn parse_dump_genesis_command_chain_args() { diff --git a/crates/cli/commands/src/import.rs b/crates/cli/commands/src/import.rs index 31c6cdc691570..6b750d32a3df6 100644 --- a/crates/cli/commands/src/import.rs +++ b/crates/cli/commands/src/import.rs @@ -231,7 +231,7 @@ where #[cfg(test)] mod tests { use super::*; - use reth_node_core::args::utils::{EthereumChainSpecParser, SUPPORTED_CHAINS}; + use reth_ethereum_cli::chainspec::{EthereumChainSpecParser, SUPPORTED_CHAINS}; #[test] fn parse_common_import_command_chain_args() { diff --git a/crates/cli/commands/src/node.rs b/crates/cli/commands/src/node.rs index fe49b769a3d31..5b1a87e068b35 100644 --- a/crates/cli/commands/src/node.rs +++ b/crates/cli/commands/src/node.rs @@ -6,11 +6,12 @@ use reth_cli::chainspec::ChainSpecParser; use reth_cli_runner::CliContext; use reth_cli_util::parse_socket_address; use reth_db::{init_db, DatabaseEnv}; +use reth_ethereum_cli::chainspec::EthereumChainSpecParser; use reth_node_builder::{NodeBuilder, WithLaunchContext}; use reth_node_core::{ args::{ - utils::EthereumChainSpecParser, DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, - PayloadBuilderArgs, PruningArgs, RpcServerArgs, TxPoolArgs, + DatabaseArgs, DatadirArgs, DebugArgs, DevArgs, NetworkArgs, PayloadBuilderArgs, + PruningArgs, RpcServerArgs, TxPoolArgs, }, node_config::NodeConfig, version, @@ -210,7 +211,7 @@ pub struct NoArgs; mod tests { use super::*; use reth_discv4::DEFAULT_DISCOVERY_PORT; - use reth_node_core::args::utils::SUPPORTED_CHAINS; + use reth_ethereum_cli::chainspec::SUPPORTED_CHAINS; use std::{ net::{IpAddr, Ipv4Addr}, path::Path, diff --git a/crates/cli/commands/src/stage/unwind.rs b/crates/cli/commands/src/stage/unwind.rs index ae3ae25008746..19305554eaa3f 100644 --- a/crates/cli/commands/src/stage/unwind.rs +++ b/crates/cli/commands/src/stage/unwind.rs @@ -213,7 +213,7 @@ impl Subcommands { #[cfg(test)] mod tests { - use reth_node_core::args::utils::EthereumChainSpecParser; + use reth_ethereum_cli::chainspec::EthereumChainSpecParser; use super::*; diff --git a/crates/engine/invalid-block-hooks/src/witness.rs b/crates/engine/invalid-block-hooks/src/witness.rs index 37d5bb08293d5..51978311faade 100644 --- a/crates/engine/invalid-block-hooks/src/witness.rs +++ b/crates/engine/invalid-block-hooks/src/witness.rs @@ -170,12 +170,8 @@ where if let Some(healthy_node_client) = &self.healthy_node_client { // Compare the witness against the healthy node. let healthy_node_witness = futures::executor::block_on(async move { - DebugApiClient::debug_execution_witness( - healthy_node_client, - block.number.into(), - true, - ) - .await + DebugApiClient::debug_execution_witness(healthy_node_client, block.number.into()) + .await })?; let healthy_path = self.save_file( diff --git a/crates/engine/tree/src/tree/metrics.rs b/crates/engine/tree/src/tree/metrics.rs index 52dbf34173df3..d46c2f05a028d 100644 --- a/crates/engine/tree/src/tree/metrics.rs +++ b/crates/engine/tree/src/tree/metrics.rs @@ -34,6 +34,14 @@ pub(crate) struct EngineMetrics { pub(crate) new_payload_messages: Counter, /// Histogram of persistence operation durations (in seconds) pub(crate) persistence_duration: Histogram, + /// Tracks the how often we failed to deliver a newPayload response. + /// + /// This effectively tracks how often the message sender dropped the channel and indicates a CL + /// request timeout (e.g. it took more than 8s to send the response and the CL terminated the + /// request which resulted in a closed channel). + pub(crate) failed_new_payload_response_deliveries: Counter, + /// Tracks the how often we failed to deliver a forkchoice update response. + pub(crate) failed_forkchoice_updated_response_deliveries: Counter, // TODO add latency metrics } diff --git a/crates/engine/tree/src/tree/mod.rs b/crates/engine/tree/src/tree/mod.rs index e01b75288220c..0478c73c90d28 100644 --- a/crates/engine/tree/src/tree/mod.rs +++ b/crates/engine/tree/src/tree/mod.rs @@ -282,7 +282,7 @@ impl TreeState { } // remove trie updates that are below the finalized block - self.persisted_trie_updates.retain(|_, (block_num, _)| *block_num < finalized_num); + self.persisted_trie_updates.retain(|_, (block_num, _)| *block_num > finalized_num); // The only block that should remain at the `finalized` number now, is the finalized // block, if it exists. @@ -899,16 +899,9 @@ where return Ok(None); } - if old_hash == current_hash { - // We've found the fork point - break; - } - if let Some(block) = self.executed_block_by_hash(current_hash)? { - if self.is_fork(block.block.hash())? { - current_hash = block.block.parent_hash; - new_chain.push(block); - } + current_hash = block.block.parent_hash; + new_chain.push(block); } else { // This shouldn't happen as we've already walked this path warn!(target: "engine::tree", invalid_hash=?current_hash, "New chain block not found in TreeState"); @@ -1219,6 +1212,10 @@ where if let Err(err) = tx.send(output.map(|o| o.outcome).map_err(Into::into)) { + self.metrics + .engine + .failed_forkchoice_updated_response_deliveries + .increment(1); error!(target: "engine::tree", "Failed to send event: {err:?}"); } } @@ -1230,6 +1227,10 @@ where ) })) { error!(target: "engine::tree", "Failed to send event: {err:?}"); + self.metrics + .engine + .failed_new_payload_response_deliveries + .increment(1); } } BeaconEngineMessage::TransitionConfigurationExchanged => { diff --git a/crates/ethereum/cli/Cargo.toml b/crates/ethereum/cli/Cargo.toml index b5fadc684b84d..108f42f539b90 100644 --- a/crates/ethereum/cli/Cargo.toml +++ b/crates/ethereum/cli/Cargo.toml @@ -15,13 +15,6 @@ workspace = true reth-cli.workspace = true reth-chainspec.workspace = true -# ethereum -alloy-genesis.workspace = true - -# io -shellexpand.workspace = true -serde_json.workspace = true - # misc eyre.workspace = true diff --git a/crates/ethereum/cli/src/chainspec.rs b/crates/ethereum/cli/src/chainspec.rs index 05db177df8320..cbcce9f69f61a 100644 --- a/crates/ethereum/cli/src/chainspec.rs +++ b/crates/ethereum/cli/src/chainspec.rs @@ -1,48 +1,33 @@ -use alloy_genesis::Genesis; use reth_chainspec::{ChainSpec, DEV, HOLESKY, MAINNET, SEPOLIA}; -use reth_cli::chainspec::ChainSpecParser; -use std::{fs, path::PathBuf, sync::Arc}; +use reth_cli::chainspec::{parse_genesis, ChainSpecParser}; +use std::sync::Arc; + +/// Chains supported by reth. First value should be used as the default. +pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "holesky", "dev"]; /// Clap value parser for [`ChainSpec`]s. /// /// The value parser matches either a known chain, the path /// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. -fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { +pub fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { Ok(match s { "mainnet" => MAINNET.clone(), "sepolia" => SEPOLIA.clone(), "holesky" => HOLESKY.clone(), "dev" => DEV.clone(), - _ => { - // try to read json from path first - let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) { - Ok(raw) => raw, - Err(io_err) => { - // valid json may start with "\n", but must contain "{" - if s.contains('{') { - s.to_string() - } else { - return Err(io_err.into()) // assume invalid path - } - } - }; - - // both serialized Genesis and ChainSpec structs supported - let genesis: Genesis = serde_json::from_str(&raw)?; - - Arc::new(genesis.into()) - } + _ => Arc::new(parse_genesis(s)?.into()), }) } /// Ethereum chain specification parser. #[derive(Debug, Clone, Default)] -pub struct EthChainSpecParser; +#[non_exhaustive] +pub struct EthereumChainSpecParser; -impl ChainSpecParser for EthChainSpecParser { +impl ChainSpecParser for EthereumChainSpecParser { type ChainSpec = ChainSpec; - const SUPPORTED_CHAINS: &'static [&'static str] = &["mainnet", "sepolia", "holesky", "dev"]; + const SUPPORTED_CHAINS: &'static [&'static str] = SUPPORTED_CHAINS; fn parse(s: &str) -> eyre::Result> { chain_value_parser(s) @@ -56,8 +41,8 @@ mod tests { #[test] fn parse_known_chain_spec() { - for &chain in EthChainSpecParser::SUPPORTED_CHAINS { - assert!(::parse(chain).is_ok()); + for &chain in EthereumChainSpecParser::SUPPORTED_CHAINS { + assert!(::parse(chain).is_ok()); } } @@ -108,7 +93,7 @@ mod tests { } }"#; - let spec = ::parse(s).unwrap(); + let spec = ::parse(s).unwrap(); assert!(spec.is_shanghai_active_at_timestamp(0)); assert!(spec.is_cancun_active_at_timestamp(0)); assert!(spec.is_prague_active_at_timestamp(0)); diff --git a/crates/ethereum/engine-primitives/src/lib.rs b/crates/ethereum/engine-primitives/src/lib.rs index 69d73a021747b..034a8c6bffbb4 100644 --- a/crates/ethereum/engine-primitives/src/lib.rs +++ b/crates/ethereum/engine-primitives/src/lib.rs @@ -26,21 +26,40 @@ use reth_payload_primitives::{ /// The types used in the default mainnet ethereum beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct EthEngineTypes; +pub struct EthEngineTypes { + _marker: std::marker::PhantomData, +} -impl PayloadTypes for EthEngineTypes { - type BuiltPayload = EthBuiltPayload; - type PayloadAttributes = EthPayloadAttributes; - type PayloadBuilderAttributes = EthPayloadBuilderAttributes; +impl PayloadTypes for EthEngineTypes { + type BuiltPayload = T::BuiltPayload; + type PayloadAttributes = T::PayloadAttributes; + type PayloadBuilderAttributes = T::PayloadBuilderAttributes; } -impl EngineTypes for EthEngineTypes { +impl EngineTypes for EthEngineTypes +where + T::BuiltPayload: TryInto + + TryInto + + TryInto + + TryInto, +{ type ExecutionPayloadV1 = ExecutionPayloadV1; type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; type ExecutionPayloadV3 = ExecutionPayloadEnvelopeV3; type ExecutionPayloadV4 = ExecutionPayloadEnvelopeV4; } +/// A default payload type for [`EthEngineTypes`] +#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] +#[non_exhaustive] +pub struct EthPayloadTypes; + +impl PayloadTypes for EthPayloadTypes { + type BuiltPayload = EthBuiltPayload; + type PayloadAttributes = EthPayloadAttributes; + type PayloadBuilderAttributes = EthPayloadBuilderAttributes; +} + /// Validator for the ethereum engine API. #[derive(Debug, Clone)] pub struct EthereumEngineValidator { diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index dd0b7b405e9fc..ae370fdb9d7bb 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -89,7 +89,7 @@ impl BuiltPayload for EthBuiltPayload { } } -impl<'a> BuiltPayload for &'a EthBuiltPayload { +impl BuiltPayload for &EthBuiltPayload { fn block(&self) -> &SealedBlock { (**self).block() } diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index f1d7de115d575..8c84fafc25fba 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -14,7 +14,7 @@ use reth_evm::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, - system_calls::SystemCaller, + system_calls::{NoopHook, OnStateHook, SystemCaller}, ConfigureEvm, }; use reth_execution_types::ExecutionOutcome; @@ -126,20 +126,25 @@ where /// This applies the pre-execution and post-execution changes that require an [EVM](Evm), and /// executes the transactions. /// + /// The optional `state_hook` will be executed with the state changes if present. + /// /// # Note /// /// It does __not__ apply post-execution changes that do not require an [EVM](Evm), for that see /// [`EthBlockExecutor::post_execution`]. - fn execute_state_transitions( + fn execute_state_transitions( &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, + state_hook: Option, ) -> Result where DB: Database, DB::Error: Into + Display, + F: OnStateHook, { - let mut system_caller = SystemCaller::new(&self.evm_config, &self.chain_spec); + let mut system_caller = + SystemCaller::new(&self.evm_config, &self.chain_spec).with_state_hook(state_hook); system_caller.apply_pre_execution_changes(block, &mut evm)?; @@ -161,7 +166,7 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. - let ResultAndState { result, state } = evm.transact().map_err(move |err| { + let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { @@ -169,6 +174,8 @@ where error: Box::new(new_err), } })?; + system_caller.on_state(&result_and_state); + let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); // append gas used @@ -260,17 +267,31 @@ where EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } + /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the + /// state hook as `None`. + fn execute_without_verification( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result { + self.execute_without_verification_with_state_hook(block, total_difficulty, None::) + } + /// Execute a single block and apply the state changes to the internal state. /// /// Returns the receipts of the transactions in the block, the total gas used and the list of /// EIP-7685 [requests](Request). /// /// Returns an error if execution fails. - fn execute_without_verification( + fn execute_without_verification_with_state_hook( &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result { + state_hook: Option, + ) -> Result + where + F: OnStateHook, + { // 1. prepare state on new block self.on_new_block(&block.header); @@ -278,7 +299,7 @@ where let env = self.evm_env_for_block(&block.header, total_difficulty); let output = { let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_state_transitions(block, evm) + self.executor.execute_state_transitions(block, evm, state_hook) }?; // 3. apply post execution changes @@ -368,6 +389,27 @@ where witness(&self.state); Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) } + + fn execute_with_state_hook( + mut self, + input: Self::Input<'_>, + state_hook: F, + ) -> Result + where + F: OnStateHook, + { + let BlockExecutionInput { block, total_difficulty } = input; + let EthExecuteOutput { receipts, requests, gas_used } = self + .execute_without_verification_with_state_hook( + block, + total_difficulty, + Some(state_hook), + )?; + + // NOTE: we need to merge keep the reverts for the bundle retention + self.state.merge_transitions(BundleRetention::Reverts); + Ok(BlockExecutionOutput { state: self.state.take_bundle(), receipts, requests, gas_used }) + } } /// An executor for a batch of blocks. /// diff --git a/crates/ethereum/node/src/node.rs b/crates/ethereum/node/src/node.rs index c1c4653ae6aa6..f17658bb32da7 100644 --- a/crates/ethereum/node/src/node.rs +++ b/crates/ethereum/node/src/node.rs @@ -77,7 +77,8 @@ impl NodeTypesWithEngine for EthereumNode { } /// Add-ons w.r.t. l1 ethereum. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] +#[non_exhaustive] pub struct EthereumAddOns; impl NodeAddOns for EthereumAddOns { @@ -104,6 +105,10 @@ where fn components_builder(&self) -> Self::ComponentsBuilder { Self::components() } + + fn add_ons(&self) -> Self::AddOns { + EthereumAddOns::default() + } } /// A regular ethereum evm and executor builder. diff --git a/crates/ethereum/node/tests/it/builder.rs b/crates/ethereum/node/tests/it/builder.rs index 379f66e814b64..218839fbe0190 100644 --- a/crates/ethereum/node/tests/it/builder.rs +++ b/crates/ethereum/node/tests/it/builder.rs @@ -22,7 +22,7 @@ fn test_basic_setup() { .with_database(db) .with_types::() .with_components(EthereumNode::components()) - .with_add_ons::() + .with_add_ons(EthereumAddOns::default()) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); println!("{msg}"); @@ -54,7 +54,7 @@ async fn test_eth_launcher() { NodeTypesWithDBAdapter>>, >>() .with_components(EthereumNode::components()) - .with_add_ons::() + .with_add_ons(EthereumAddOns::default()) .launch_with_fn(|builder| { let launcher = EngineNodeLauncher::new( tasks.executor(), diff --git a/crates/ethereum/node/tests/it/exex.rs b/crates/ethereum/node/tests/it/exex.rs index db19aaaf36129..856220300c2bb 100644 --- a/crates/ethereum/node/tests/it/exex.rs +++ b/crates/ethereum/node/tests/it/exex.rs @@ -33,7 +33,7 @@ fn basic_exex() { .with_database(db) .with_types::() .with_components(EthereumNode::components()) - .with_add_ons::() + .with_add_ons(EthereumAddOns::default()) .install_exex("dummy", move |ctx| future::ok(DummyExEx { _ctx: ctx })) .check_launch(); } diff --git a/crates/etl/src/lib.rs b/crates/etl/src/lib.rs index 3f978fabeee24..d30f432f9c190 100644 --- a/crates/etl/src/lib.rs +++ b/crates/etl/src/lib.rs @@ -190,14 +190,14 @@ pub struct EtlIter<'a> { files: &'a mut Vec, } -impl<'a> EtlIter<'a> { +impl EtlIter<'_> { /// Peeks into the next element pub fn peek(&self) -> Option<&(Vec, Vec)> { self.heap.peek().map(|(Reverse(entry), _)| entry) } } -impl<'a> Iterator for EtlIter<'a> { +impl Iterator for EtlIter<'_> { type Item = std::io::Result<(Vec, Vec)>; fn next(&mut self) -> Option { diff --git a/crates/evm/execution-types/src/chain.rs b/crates/evm/execution-types/src/chain.rs index 5db5495de59ff..25bc39ea32570 100644 --- a/crates/evm/execution-types/src/chain.rs +++ b/crates/evm/execution-types/src/chain.rs @@ -355,7 +355,7 @@ impl Chain { #[derive(Debug)] pub struct DisplayBlocksChain<'a>(pub &'a BTreeMap); -impl<'a> fmt::Display for DisplayBlocksChain<'a> { +impl fmt::Display for DisplayBlocksChain<'_> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut list = f.debug_list(); let mut values = self.0.values().map(|block| block.num_hash()); @@ -376,7 +376,7 @@ pub struct ChainBlocks<'a> { blocks: Cow<'a, BTreeMap>, } -impl<'a> ChainBlocks<'a> { +impl ChainBlocks<'_> { /// Creates a consuming iterator over all blocks in the chain with increasing block number. /// /// Note: this always yields at least one block. @@ -442,7 +442,7 @@ impl<'a> ChainBlocks<'a> { } } -impl<'a> IntoIterator for ChainBlocks<'a> { +impl IntoIterator for ChainBlocks<'_> { type Item = (BlockNumber, SealedBlockWithSenders); type IntoIter = std::collections::btree_map::IntoIter; @@ -516,7 +516,7 @@ pub(super) mod serde_bincode_compat { use alloy_primitives::BlockNumber; use reth_primitives::serde_bincode_compat::SealedBlockWithSenders; use reth_trie::serde_bincode_compat::updates::TrieUpdates; - use serde::{Deserialize, Deserializer, Serialize, Serializer}; + use serde::{ser::SerializeMap, Deserialize, Deserializer, Serialize, Serializer}; use serde_with::{DeserializeAs, SerializeAs}; use crate::ExecutionOutcome; @@ -538,19 +538,47 @@ pub(super) mod serde_bincode_compat { /// ``` #[derive(Debug, Serialize, Deserialize)] pub struct Chain<'a> { - blocks: BTreeMap>, + blocks: SealedBlocksWithSenders<'a>, execution_outcome: Cow<'a, ExecutionOutcome>, trie_updates: Option>, } + #[derive(Debug)] + struct SealedBlocksWithSenders<'a>( + Cow<'a, BTreeMap>, + ); + + impl Serialize for SealedBlocksWithSenders<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_map(Some(self.0.len()))?; + + for (block_number, block) in self.0.iter() { + state.serialize_entry(block_number, &SealedBlockWithSenders::<'_>::from(block))?; + } + + state.end() + } + } + + impl<'de> Deserialize<'de> for SealedBlocksWithSenders<'_> { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Ok(Self(Cow::Owned( + BTreeMap::>::deserialize(deserializer) + .map(|blocks| blocks.into_iter().map(|(n, b)| (n, b.into())).collect())?, + ))) + } + } + impl<'a> From<&'a super::Chain> for Chain<'a> { fn from(value: &'a super::Chain) -> Self { Self { - blocks: value - .blocks - .iter() - .map(|(block_number, block)| (*block_number, block.into())) - .collect(), + blocks: SealedBlocksWithSenders(Cow::Borrowed(&value.blocks)), execution_outcome: Cow::Borrowed(&value.execution_outcome), trie_updates: value.trie_updates.as_ref().map(Into::into), } @@ -560,18 +588,14 @@ pub(super) mod serde_bincode_compat { impl<'a> From> for super::Chain { fn from(value: Chain<'a>) -> Self { Self { - blocks: value - .blocks - .into_iter() - .map(|(block_number, block)| (block_number, block.into())) - .collect(), + blocks: value.blocks.0.into_owned(), execution_outcome: value.execution_outcome.into_owned(), trie_updates: value.trie_updates.map(Into::into), } } } - impl<'a> SerializeAs for Chain<'a> { + impl SerializeAs for Chain<'_> { fn serialize_as(source: &super::Chain, serializer: S) -> Result where S: Serializer, diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index a3fca50ec7eee..e28ec3887f806 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -2,7 +2,10 @@ use core::fmt::Display; -use crate::execute::{BatchExecutor, BlockExecutorProvider, Executor}; +use crate::{ + execute::{BatchExecutor, BlockExecutorProvider, Executor}, + system_calls::OnStateHook, +}; use alloy_primitives::BlockNumber; use reth_execution_errors::BlockExecutionError; use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; @@ -87,6 +90,20 @@ where Self::Right(b) => b.execute_with_state_witness(input, witness), } } + + fn execute_with_state_hook( + self, + input: Self::Input<'_>, + state_hook: F, + ) -> Result + where + F: OnStateHook, + { + match self { + Self::Left(a) => a.execute_with_state_hook(input, state_hook), + Self::Right(b) => b.execute_with_state_hook(input, state_hook), + } + } } impl BatchExecutor for Either diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index ffc08469dc8d6..1bd79378127e2 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,7 +1,9 @@ //! Traits for execution. // Re-export execution types -pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +pub use reth_execution_errors::{ + BlockExecutionError, BlockValidationError, InternalBlockExecutionError, +}; pub use reth_execution_types::{BlockExecutionInput, BlockExecutionOutput, ExecutionOutcome}; pub use reth_storage_errors::provider::ProviderError; @@ -12,6 +14,8 @@ use reth_prune_types::PruneModes; use revm::State; use revm_primitives::db::Database; +use crate::system_calls::OnStateHook; + /// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). /// @@ -43,6 +47,16 @@ pub trait Executor { ) -> Result where F: FnMut(&State); + + /// Executes the EVM with the given input and accepts a state hook closure that is invoked with + /// the EVM state after execution. + fn execute_with_state_hook( + self, + input: Self::Input<'_>, + state_hook: F, + ) -> Result + where + F: OnStateHook; } /// A general purpose executor that can execute multiple inputs in sequence, validate the outputs, @@ -199,6 +213,17 @@ mod tests { { Err(BlockExecutionError::msg("execution unavailable for tests")) } + + fn execute_with_state_hook( + self, + _: Self::Input<'_>, + _: F, + ) -> Result + where + F: OnStateHook, + { + Err(BlockExecutionError::msg("execution unavailable for tests")) + } } impl BatchExecutor for TestExecutor { diff --git a/crates/evm/src/noop.rs b/crates/evm/src/noop.rs index 392bfd0bd722d..3e01bfc4cc467 100644 --- a/crates/evm/src/noop.rs +++ b/crates/evm/src/noop.rs @@ -10,7 +10,10 @@ use reth_storage_errors::provider::ProviderError; use revm::State; use revm_primitives::db::Database; -use crate::execute::{BatchExecutor, BlockExecutorProvider, Executor}; +use crate::{ + execute::{BatchExecutor, BlockExecutorProvider, Executor}, + system_calls::OnStateHook, +}; const UNAVAILABLE_FOR_NOOP: &str = "execution unavailable for noop"; @@ -58,6 +61,17 @@ impl Executor for NoopBlockExecutorProvider { { Err(BlockExecutionError::msg(UNAVAILABLE_FOR_NOOP)) } + + fn execute_with_state_hook( + self, + _: Self::Input<'_>, + _: F, + ) -> Result + where + F: OnStateHook, + { + Err(BlockExecutionError::msg(UNAVAILABLE_FOR_NOOP)) + } } impl BatchExecutor for NoopBlockExecutorProvider { diff --git a/crates/evm/src/system_calls/mod.rs b/crates/evm/src/system_calls/mod.rs index ce5fec42184c1..43baa1c766c29 100644 --- a/crates/evm/src/system_calls/mod.rs +++ b/crates/evm/src/system_calls/mod.rs @@ -49,22 +49,19 @@ pub struct SystemCaller<'a, EvmConfig, Chainspec, Hook = NoopHook> { hook: Option, } -impl<'a, EvmConfig, Chainspec> SystemCaller<'a, EvmConfig, Chainspec> { +impl<'a, EvmConfig, Chainspec> SystemCaller<'a, EvmConfig, Chainspec, NoopHook> { /// Create a new system caller with the given EVM config, database, and chain spec, and creates /// the EVM with the given initialized config and block environment. pub const fn new(evm_config: &'a EvmConfig, chain_spec: Chainspec) -> Self { Self { evm_config, chain_spec, hook: None } } -} - -impl<'a, EvmConfig, Chainspec, Hook> SystemCaller<'a, EvmConfig, Chainspec, Hook> { /// Installs a custom hook to be called after each state change. pub fn with_state_hook( self, - hook: H, + hook: Option, ) -> SystemCaller<'a, EvmConfig, Chainspec, H> { let Self { evm_config, chain_spec, .. } = self; - SystemCaller { evm_config, chain_spec, hook: Some(hook) } + SystemCaller { evm_config, chain_spec, hook } } /// Convenience method to consume the type and drop borrowed fields pub fn finish(self) {} @@ -88,7 +85,7 @@ where .build() } -impl<'a, EvmConfig, Chainspec, Hook> SystemCaller<'a, EvmConfig, Chainspec, Hook> +impl SystemCaller<'_, EvmConfig, Chainspec, Hook> where EvmConfig: ConfigureEvm
, Chainspec: EthereumHardforks, @@ -321,4 +318,11 @@ where eip7251::post_commit(result_and_state.result) } + + /// Delegate to stored `OnStateHook`, noop if hook is `None`. + pub fn on_state(&mut self, state: &ResultAndState) { + if let Some(ref mut hook) = &mut self.hook { + hook.on_state(state); + } + } } diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index cf45930aece94..45ab2e97734e0 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -1,7 +1,10 @@ //! Helpers for testing. -use crate::execute::{ - BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, +use crate::{ + execute::{ + BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, + }, + system_calls::OnStateHook, }; use alloy_primitives::BlockNumber; use parking_lot::Mutex; @@ -73,6 +76,17 @@ impl Executor for MockExecutorProvider { { unimplemented!() } + + fn execute_with_state_hook( + self, + _: Self::Input<'_>, + _: F, + ) -> Result + where + F: OnStateHook, + { + unimplemented!() + } } impl BatchExecutor for MockExecutorProvider { diff --git a/crates/exex/exex/src/backfill/job.rs b/crates/exex/exex/src/backfill/job.rs index 7642edbac30ee..77a7b50477b00 100644 --- a/crates/exex/exex/src/backfill/job.rs +++ b/crates/exex/exex/src/backfill/job.rs @@ -8,7 +8,7 @@ use alloy_primitives::BlockNumber; use reth_evm::execute::{ BatchExecutor, BlockExecutionError, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_primitives::{Block, BlockBody, BlockWithSenders, Receipt}; +use reth_primitives::{Block, BlockWithSenders, Receipt}; use reth_primitives_traits::format_gas_throughput; use reth_provider::{ BlockReader, Chain, HeaderProvider, ProviderError, StateProviderFactory, TransactionVariant, @@ -18,6 +18,8 @@ use reth_revm::database::StateProviderDatabase; use reth_stages_api::ExecutionStageThresholds; use reth_tracing::tracing::{debug, trace}; +pub(super) type BackfillJobResult = Result; + /// Backfill job started for a specific range. /// /// It implements [`Iterator`] that executes blocks in batches according to the provided thresholds @@ -37,7 +39,7 @@ where E: BlockExecutorProvider, P: HeaderProvider + BlockReader + StateProviderFactory, { - type Item = Result; + type Item = BackfillJobResult; fn next(&mut self) -> Option { if self.range.is_empty() { @@ -63,7 +65,13 @@ where self.into() } - fn execute_range(&mut self) -> Result { + fn execute_range(&mut self) -> BackfillJobResult { + debug!( + target: "exex::backfill", + range = ?self.range, + "Executing block range" + ); + let mut executor = self.executor.batch_executor(StateProviderDatabase::new( self.provider.history_by_block_number(self.range.start().saturating_sub(1))?, )); @@ -103,16 +111,8 @@ where // Unseal the block for execution let (block, senders) = block.into_components(); let (unsealed_header, hash) = block.header.split(); - let block = Block { - header: unsealed_header, - body: BlockBody { - transactions: block.body.transactions, - ommers: block.body.ommers, - withdrawals: block.body.withdrawals, - requests: block.body.requests, - }, - } - .with_senders_unchecked(senders); + let block = + Block { header: unsealed_header, body: block.body }.with_senders_unchecked(senders); executor.execute_and_verify_one((&block, td).into())?; execution_duration += execute_start.elapsed(); @@ -167,7 +167,7 @@ where E: BlockExecutorProvider, P: HeaderProvider + BlockReader + StateProviderFactory, { - type Item = Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError>; + type Item = BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)>; fn next(&mut self) -> Option { self.range.next().map(|block_number| self.execute_block(block_number)) @@ -189,7 +189,7 @@ where pub(crate) fn execute_block( &self, block_number: u64, - ) -> Result<(BlockWithSenders, BlockExecutionOutput), BlockExecutionError> { + ) -> BackfillJobResult<(BlockWithSenders, BlockExecutionOutput)> { let td = self .provider .header_td_by_number(block_number)? diff --git a/crates/exex/exex/src/backfill/stream.rs b/crates/exex/exex/src/backfill/stream.rs index 07b710b7e389d..c55b8651daf11 100644 --- a/crates/exex/exex/src/backfill/stream.rs +++ b/crates/exex/exex/src/backfill/stream.rs @@ -15,14 +15,28 @@ use reth_primitives::{BlockWithSenders, Receipt}; use reth_provider::{BlockReader, Chain, HeaderProvider, StateProviderFactory}; use reth_prune_types::PruneModes; use reth_stages_api::ExecutionStageThresholds; +use reth_tracing::tracing::debug; use tokio::task::JoinHandle; +use super::job::BackfillJobResult; + /// The default parallelism for active tasks in [`StreamBackfillJob`]. pub(crate) const DEFAULT_PARALLELISM: usize = 4; /// The default batch size for active tasks in [`StreamBackfillJob`]. const DEFAULT_BATCH_SIZE: usize = 100; -type BackfillTasks = FuturesOrdered>>; +/// Boxed thread-safe iterator that yields [`BackfillJobResult`]s. +type BackfillTaskIterator = + Box> + Send + Sync + 'static>; + +/// Backfill task output. +struct BackfillTaskOutput { + job: BackfillTaskIterator, + result: Option>, +} + +/// Ordered queue of [`JoinHandle`]s that yield [`BackfillTaskOutput`]s. +type BackfillTasks = FuturesOrdered>>; type SingleBlockStreamItem = (BlockWithSenders, BlockExecutionOutput); type BatchBlockStreamItem = Chain; @@ -40,9 +54,13 @@ pub struct StreamBackfillJob { tasks: BackfillTasks, parallelism: usize, batch_size: usize, + thresholds: ExecutionStageThresholds, } -impl StreamBackfillJob { +impl StreamBackfillJob +where + T: Send + Sync + 'static, +{ /// Configures the parallelism of the [`StreamBackfillJob`] to handle active tasks. pub const fn with_parallelism(mut self, parallelism: usize) -> Self { self.parallelism = parallelism; @@ -55,14 +73,30 @@ impl StreamBackfillJob { self } - fn poll_next_task( - &mut self, - cx: &mut Context<'_>, - ) -> Poll>> { - match ready!(self.tasks.poll_next_unpin(cx)) { - Some(res) => Poll::Ready(Some(res.map_err(BlockExecutionError::other)?)), - None => Poll::Ready(None), + /// Spawns a new task calling the [`BackfillTaskIterator::next`] method and pushes it to the + /// [`BackfillTasks`] queue. + fn push_task(&mut self, mut job: BackfillTaskIterator) { + self.tasks.push_back(tokio::task::spawn_blocking(move || BackfillTaskOutput { + result: job.next(), + job, + })); + } + + /// Polls the next task in the [`BackfillTasks`] queue until it returns a non-empty result. + fn poll_next_task(&mut self, cx: &mut Context<'_>) -> Poll>> { + while let Some(res) = ready!(self.tasks.poll_next_unpin(cx)) { + let task_result = res.map_err(BlockExecutionError::other)?; + + if let BackfillTaskOutput { result: Some(job_result), job } = task_result { + // If the task returned a non-empty result, a new task advancing the job is created + // and pushed to the front of the queue. + self.push_task(job); + + return Poll::Ready(Some(job_result)) + }; } + + Poll::Ready(None) } } @@ -71,27 +105,28 @@ where E: BlockExecutorProvider + Clone + Send + 'static, P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, { - type Item = Result; + type Item = BackfillJobResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); // Spawn new tasks only if we are below the parallelism configured. while this.tasks.len() < this.parallelism { - // If we have a block number, then we can spawn a new task for that block - if let Some(block_number) = this.range.next() { - let mut job = SingleBlockBackfillJob { - executor: this.executor.clone(), - provider: this.provider.clone(), - range: block_number..=block_number, - stream_parallelism: this.parallelism, - }; - let task = - tokio::task::spawn_blocking(move || job.next().expect("non-empty range")); - this.tasks.push_back(task); - } else { + // Get the next block number from the range. If it is empty, we are done. + let Some(block_number) = this.range.next() else { + debug!(target: "exex::backfill", tasks = %this.tasks.len(), range = ?this.range, "No more single blocks to backfill"); break; - } + }; + + // Spawn a new task for that block + debug!(target: "exex::backfill", tasks = %this.tasks.len(), ?block_number, "Spawning new single block backfill task"); + let job = Box::new(SingleBlockBackfillJob { + executor: this.executor.clone(), + provider: this.provider.clone(), + range: block_number..=block_number, + stream_parallelism: this.parallelism, + }) as BackfillTaskIterator<_>; + this.push_task(job); } this.poll_next_task(cx) @@ -103,7 +138,7 @@ where E: BlockExecutorProvider + Clone + Send + 'static, P: HeaderProvider + BlockReader + StateProviderFactory + Clone + Send + Unpin + 'static, { - type Item = Result; + type Item = BackfillJobResult; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); @@ -115,23 +150,23 @@ where let start = range.next(); let range_bounds = start.zip(range.last().or(start)); - // If we have range bounds, then we can spawn a new task for that range - if let Some((first, last)) = range_bounds { - let range = first..=last; - let mut job = BackfillJob { - executor: this.executor.clone(), - provider: this.provider.clone(), - prune_modes: this.prune_modes.clone(), - thresholds: ExecutionStageThresholds::default(), - range, - stream_parallelism: this.parallelism, - }; - let task = - tokio::task::spawn_blocking(move || job.next().expect("non-empty range")); - this.tasks.push_back(task); - } else { + // Create the range from the range bounds. If it is empty, we are done. + let Some(range) = range_bounds.map(|(first, last)| first..=last) else { + debug!(target: "exex::backfill", tasks = %this.tasks.len(), range = ?this.range, "No more block batches to backfill"); break; - } + }; + + // Spawn a new task for that range + debug!(target: "exex::backfill", tasks = %this.tasks.len(), ?range, "Spawning new block batch backfill task"); + let job = Box::new(BackfillJob { + executor: this.executor.clone(), + provider: this.provider.clone(), + prune_modes: this.prune_modes.clone(), + thresholds: this.thresholds.clone(), + range, + stream_parallelism: this.parallelism, + }) as BackfillTaskIterator<_>; + this.push_task(job); } this.poll_next_task(cx) @@ -148,12 +183,14 @@ impl From> for StreamBackfillJob From> for StreamBackfillJob { fn from(job: BackfillJob) -> Self { + let batch_size = job.thresholds.max_blocks.map_or(DEFAULT_BATCH_SIZE, |max| max as usize); Self { executor: job.executor, provider: job.provider, @@ -161,7 +198,11 @@ impl From> for StreamBackfillJob { /// considered delivered by the node. pub notifications: ExExNotifications, - /// node components + /// Node components pub components: Node, } @@ -92,4 +93,16 @@ impl ExExContext { pub fn task_executor(&self) -> &TaskExecutor { self.components.task_executor() } + + /// Sets notifications stream to [`crate::ExExNotificationsWithoutHead`], a stream of + /// notifications without a head. + pub fn set_notifications_without_head(&mut self) { + self.notifications.set_without_head(); + } + + /// Sets notifications stream to [`crate::ExExNotificationsWithHead`], a stream of notifications + /// with the provided head. + pub fn set_notifications_with_head(&mut self, head: ExExHead) { + self.notifications.set_with_head(head); + } } diff --git a/crates/exex/exex/src/manager.rs b/crates/exex/exex/src/manager.rs index e8e24c09db024..31dc822222b63 100644 --- a/crates/exex/exex/src/manager.rs +++ b/crates/exex/exex/src/manager.rs @@ -610,12 +610,16 @@ impl Clone for ExExManagerHandle { mod tests { use super::*; use alloy_primitives::B256; - use eyre::OptionExt; - use futures::{FutureExt, StreamExt}; + use futures::StreamExt; use rand::Rng; + use reth_db_common::init::init_genesis; + use reth_evm_ethereum::execute::EthExecutorProvider; use reth_primitives::SealedBlockWithSenders; - use reth_provider::{test_utils::create_test_provider_factory, BlockWriter, Chain}; - use reth_testing_utils::generators::{self, random_block}; + use reth_provider::{ + providers::BlockchainProvider2, test_utils::create_test_provider_factory, BlockReader, + Chain, TransactionVariant, + }; + use reth_testing_utils::generators; fn empty_finalized_header_stream() -> ForkChoiceStream { let (tx, rx) = watch::channel(None); @@ -975,11 +979,20 @@ mod tests { #[tokio::test] async fn exex_handle_new() { + let provider_factory = create_test_provider_factory(); + init_genesis(&provider_factory).unwrap(); + let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (mut exex_handle, _, mut notifications) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + provider, + EthExecutorProvider::mainnet(), + wal.handle(), + ); // Check initial state assert_eq!(exex_handle.id, "test_exex"); @@ -1008,7 +1021,7 @@ mod tests { // Send a notification and ensure it's received correctly match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { - let received_notification = notifications.next().await.unwrap(); + let received_notification = notifications.next().await.unwrap().unwrap(); assert_eq!(received_notification, notification); } Poll::Pending => panic!("Notification send is pending"), @@ -1021,11 +1034,20 @@ mod tests { #[tokio::test] async fn test_notification_if_finished_height_gt_chain_tip() { + let provider_factory = create_test_provider_factory(); + init_genesis(&provider_factory).unwrap(); + let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (mut exex_handle, _, mut notifications) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + provider, + EthExecutorProvider::mainnet(), + wal.handle(), + ); // Set finished_height to a value higher than the block tip exex_handle.finished_height = Some(BlockNumHash::new(15, B256::random())); @@ -1046,11 +1068,7 @@ mod tests { poll_fn(|cx| { // The notification should be skipped, so nothing should be sent. // Check that the receiver channel is indeed empty - assert_eq!( - notifications.poll_next_unpin(cx), - Poll::Pending, - "Receiver channel should be empty" - ); + assert!(notifications.poll_next_unpin(cx).is_pending()); Poll::Ready(()) }) .await; @@ -1066,11 +1084,20 @@ mod tests { #[tokio::test] async fn test_sends_chain_reorged_notification() { + let provider_factory = create_test_provider_factory(); + init_genesis(&provider_factory).unwrap(); + let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (mut exex_handle, _, mut notifications) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + provider, + EthExecutorProvider::mainnet(), + wal.handle(), + ); let notification = ExExNotification::ChainReorged { old: Arc::new(Chain::default()), @@ -1086,7 +1113,7 @@ mod tests { // Send the notification match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { - let received_notification = notifications.next().await.unwrap(); + let received_notification = notifications.next().await.unwrap().unwrap(); assert_eq!(received_notification, notification); } Poll::Pending | Poll::Ready(Err(_)) => { @@ -1100,11 +1127,20 @@ mod tests { #[tokio::test] async fn test_sends_chain_reverted_notification() { + let provider_factory = create_test_provider_factory(); + init_genesis(&provider_factory).unwrap(); + let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let (mut exex_handle, _, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); + let (mut exex_handle, _, mut notifications) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + provider, + EthExecutorProvider::mainnet(), + wal.handle(), + ); let notification = ExExNotification::ChainReverted { old: Arc::new(Chain::default()) }; @@ -1117,7 +1153,7 @@ mod tests { // Send the notification match exex_handle.send(&mut cx, &(22, notification.clone())) { Poll::Ready(Ok(())) => { - let received_notification = notifications.next().await.unwrap(); + let received_notification = notifications.next().await.unwrap().unwrap(); assert_eq!(received_notification, notification); } Poll::Pending | Poll::Ready(Err(_)) => { @@ -1135,30 +1171,34 @@ mod tests { let mut rng = generators::rng(); + let provider_factory = create_test_provider_factory(); + let genesis_hash = init_genesis(&provider_factory).unwrap(); + let genesis_block = provider_factory + .sealed_block_with_senders(genesis_hash.into(), TransactionVariant::NoHash) + .unwrap() + .ok_or_else(|| eyre::eyre!("genesis block not found"))?; + let provider = BlockchainProvider2::new(provider_factory).unwrap(); + let temp_dir = tempfile::tempdir().unwrap(); let wal = Wal::new(temp_dir.path()).unwrap(); - let provider_factory = create_test_provider_factory(); - - let block = random_block(&mut rng, 0, Default::default()) - .seal_with_senders() - .ok_or_eyre("failed to recover senders")?; - let provider_rw = provider_factory.provider_rw()?; - provider_rw.insert_block(block.clone())?; - provider_rw.commit()?; + let (exex_handle, events_tx, mut notifications) = ExExHandle::new( + "test_exex".to_string(), + Head::default(), + provider.clone(), + EthExecutorProvider::mainnet(), + wal.handle(), + ); let notification = ExExNotification::ChainCommitted { - new: Arc::new(Chain::new(vec![block.clone()], Default::default(), None)), + new: Arc::new(Chain::new(vec![genesis_block.clone()], Default::default(), None)), }; let (finalized_headers_tx, rx) = watch::channel(None); let finalized_header_stream = ForkChoiceStream::new(rx); - let (exex_handle, events_tx, mut notifications) = - ExExHandle::new("test_exex".to_string(), Head::default(), (), (), wal.handle()); - let mut exex_manager = std::pin::pin!(ExExManager::new( - provider_factory, + provider, vec![exex_handle], 1, wal, @@ -1170,16 +1210,13 @@ mod tests { exex_manager.handle().send(notification.clone())?; assert!(exex_manager.as_mut().poll(&mut cx)?.is_pending()); - assert_eq!( - notifications.next().poll_unpin(&mut cx), - Poll::Ready(Some(notification.clone())) - ); + assert_eq!(notifications.next().await.unwrap().unwrap(), notification.clone()); assert_eq!( exex_manager.wal.iter_notifications()?.collect::>>()?, [notification.clone()] ); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(genesis_block.header.clone()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx didn't emit the `FinishedHeight` event assert_eq!( @@ -1192,7 +1229,7 @@ mod tests { .send(ExExEvent::FinishedHeight((rng.gen::(), rng.gen::()).into())) .unwrap(); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(genesis_block.header.clone()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL isn't finalized because the ExEx emitted a `FinishedHeight` event with a // non-canonical block @@ -1202,9 +1239,9 @@ mod tests { ); // Send a `FinishedHeight` event with a canonical block - events_tx.send(ExExEvent::FinishedHeight(block.num_hash())).unwrap(); + events_tx.send(ExExEvent::FinishedHeight(genesis_block.num_hash())).unwrap(); - finalized_headers_tx.send(Some(block.header.clone()))?; + finalized_headers_tx.send(Some(genesis_block.header.clone()))?; assert!(exex_manager.as_mut().poll(&mut cx).is_pending()); // WAL is finalized assert!(exex_manager.wal.iter_notifications()?.next().is_none()); diff --git a/crates/exex/exex/src/notifications.rs b/crates/exex/exex/src/notifications.rs index 116dac95422bc..d0c94d34f6442 100644 --- a/crates/exex/exex/src/notifications.rs +++ b/crates/exex/exex/src/notifications.rs @@ -13,27 +13,28 @@ use std::{ }; use tokio::sync::mpsc::Receiver; -/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. +/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. If the +/// stream is configured with a head via [`ExExNotifications::set_with_head`] or +/// [`ExExNotifications::with_head`], it will run backfill jobs to catch up to the node head. +#[derive(Debug)] pub struct ExExNotifications { - node_head: Head, - provider: P, - executor: E, - notifications: Receiver, - wal_handle: WalHandle, + inner: ExExNotificationsInner, } -impl Debug for ExExNotifications { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("ExExNotifications") - .field("provider", &self.provider) - .field("executor", &self.executor) - .field("notifications", &self.notifications) - .finish() - } +#[derive(Debug)] +enum ExExNotificationsInner { + /// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. + WithoutHead(ExExNotificationsWithoutHead), + /// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that + /// are committed or reverted after the given head. + WithHead(ExExNotificationsWithHead), + /// Internal state used when transitioning between [`ExExNotificationsInner::WithoutHead`] and + /// [`ExExNotificationsInner::WithHead`]. + Invalid, } impl ExExNotifications { - /// Creates a new instance of [`ExExNotifications`]. + /// Creates a new stream of [`ExExNotifications`] without a head. pub const fn new( node_head: Head, provider: P, @@ -41,73 +42,131 @@ impl ExExNotifications { notifications: Receiver, wal_handle: WalHandle, ) -> Self { - Self { node_head, provider, executor, notifications, wal_handle } + Self { + inner: ExExNotificationsInner::WithoutHead(ExExNotificationsWithoutHead::new( + node_head, + provider, + executor, + notifications, + wal_handle, + )), + } } - /// Receives the next value for this receiver. + /// Sets [`ExExNotifications`] to a stream of [`ExExNotification`]s without a head. /// - /// This method returns `None` if the channel has been closed and there are - /// no remaining messages in the channel's buffer. This indicates that no - /// further values can ever be received from this `Receiver`. The channel is - /// closed when all senders have been dropped, or when [`Receiver::close`] is called. + /// It's a no-op if the stream has already been configured without a head. /// - /// # Cancel safety - /// - /// This method is cancel safe. If `recv` is used as the event in a - /// [`tokio::select!`] statement and some other branch - /// completes first, it is guaranteed that no messages were received on this - /// channel. - /// - /// For full documentation, see [`Receiver::recv`]. - #[deprecated(note = "use `ExExNotifications::next` and its `Stream` implementation instead")] - pub async fn recv(&mut self) -> Option { - self.notifications.recv().await + /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. + pub fn set_without_head(&mut self) { + let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); + self.inner = ExExNotificationsInner::WithoutHead(match current { + ExExNotificationsInner::WithoutHead(notifications) => notifications, + ExExNotificationsInner::WithHead(notifications) => ExExNotificationsWithoutHead::new( + notifications.node_head, + notifications.provider, + notifications.executor, + notifications.notifications, + notifications.wal_handle, + ), + ExExNotificationsInner::Invalid => unreachable!(), + }); } - /// Polls to receive the next message on this channel. + /// Returns a new [`ExExNotifications`] without a head. /// - /// This method returns: - /// - /// * `Poll::Pending` if no messages are available but the channel is not closed, or if a - /// spurious failure happens. - /// * `Poll::Ready(Some(message))` if a message is available. - /// * `Poll::Ready(None)` if the channel has been closed and all messages sent before it was - /// closed have been received. + /// See the documentation of [`ExExNotificationsWithoutHead`] for more details. + pub fn without_head(mut self) -> Self { + self.set_without_head(); + self + } + + /// Sets [`ExExNotifications`] to a stream of [`ExExNotification`]s with the provided head. /// - /// When the method returns `Poll::Pending`, the `Waker` in the provided - /// `Context` is scheduled to receive a wakeup when a message is sent on any - /// receiver, or when the channel is closed. Note that on multiple calls to - /// `poll_recv` or `poll_recv_many`, only the `Waker` from the `Context` - /// passed to the most recent call is scheduled to receive a wakeup. + /// It's a no-op if the stream has already been configured with a head. /// - /// If this method returns `Poll::Pending` due to a spurious failure, then - /// the `Waker` will be notified when the situation causing the spurious - /// failure has been resolved. Note that receiving such a wakeup does not - /// guarantee that the next call will succeed — it could fail with another - /// spurious failure. + /// See the documentation of [`ExExNotificationsWithHead`] for more details. + pub fn set_with_head(&mut self, exex_head: ExExHead) { + let current = std::mem::replace(&mut self.inner, ExExNotificationsInner::Invalid); + self.inner = ExExNotificationsInner::WithHead(match current { + ExExNotificationsInner::WithoutHead(notifications) => { + notifications.with_head(exex_head) + } + ExExNotificationsInner::WithHead(notifications) => ExExNotificationsWithHead::new( + notifications.node_head, + notifications.provider, + notifications.executor, + notifications.notifications, + notifications.wal_handle, + exex_head, + ), + ExExNotificationsInner::Invalid => unreachable!(), + }); + } + + /// Returns a new [`ExExNotifications`] with the provided head. /// - /// For full documentation, see [`Receiver::poll_recv`]. - #[deprecated( - note = "use `ExExNotifications::poll_next` and its `Stream` implementation instead" - )] - pub fn poll_recv(&mut self, cx: &mut Context<'_>) -> Poll> { - self.notifications.poll_recv(cx) + /// See the documentation of [`ExExNotificationsWithHead`] for more details. + pub fn with_head(mut self, exex_head: ExExHead) -> Self { + self.set_with_head(exex_head); + self } } -impl ExExNotifications +impl Stream for ExExNotifications where P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, E: BlockExecutorProvider + Clone + Unpin + 'static, { - /// Subscribe to notifications with the given head. This head is the ExEx's - /// latest view of the host chain. - /// - /// Notifications will be sent starting from the head, not inclusive. For - /// example, if `head.number == 10`, then the first notification will be - /// with `block.number == 11`. A `head.number` of 10 indicates that the ExEx - /// has processed up to block 10, and is ready to process block 11. - pub fn with_head(self, head: ExExHead) -> ExExNotificationsWithHead { + type Item = eyre::Result; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + match &mut self.get_mut().inner { + ExExNotificationsInner::WithoutHead(notifications) => { + notifications.poll_next_unpin(cx).map(|result| result.map(Ok)) + } + ExExNotificationsInner::WithHead(notifications) => notifications.poll_next_unpin(cx), + ExExNotificationsInner::Invalid => unreachable!(), + } + } +} + +/// A stream of [`ExExNotification`]s. The stream will emit notifications for all blocks. +pub struct ExExNotificationsWithoutHead { + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + wal_handle: WalHandle, +} + +impl Debug for ExExNotificationsWithoutHead { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ExExNotifications") + .field("provider", &self.provider) + .field("executor", &self.executor) + .field("notifications", &self.notifications) + .finish() + } +} + +impl ExExNotificationsWithoutHead { + /// Creates a new instance of [`ExExNotificationsWithoutHead`]. + const fn new( + node_head: Head, + provider: P, + executor: E, + notifications: Receiver, + wal_handle: WalHandle, + ) -> Self { + Self { node_head, provider, executor, notifications, wal_handle } + } + + /// Subscribe to notifications with the given head. + fn with_head(self, head: ExExHead) -> ExExNotificationsWithHead { ExExNotificationsWithHead::new( self.node_head, self.provider, @@ -119,7 +178,7 @@ where } } -impl Stream for ExExNotifications { +impl Stream for ExExNotificationsWithoutHead { type Item = ExExNotification; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -128,7 +187,13 @@ impl Stream for ExExNotifications { } /// A stream of [`ExExNotification`]s. The stream will only emit notifications for blocks that are -/// committed or reverted after the given head. +/// committed or reverted after the given head. The head is the ExEx's latest view of the host +/// chain. +/// +/// Notifications will be sent starting from the head, not inclusive. For example, if +/// `exex_head.number == 10`, then the first notification will be with `block.number == 11`. An +/// `exex_head.number` of 10 indicates that the ExEx has processed up to block 10, and is ready to +/// process block 11. #[derive(Debug)] pub struct ExExNotificationsWithHead { node_head: Head, @@ -147,13 +212,9 @@ pub struct ExExNotificationsWithHead { backfill_job: Option>, } -impl ExExNotificationsWithHead -where - P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, - E: BlockExecutorProvider + Clone + Unpin + 'static, -{ +impl ExExNotificationsWithHead { /// Creates a new [`ExExNotificationsWithHead`]. - pub const fn new( + const fn new( node_head: Head, provider: P, executor: E, @@ -173,7 +234,13 @@ where backfill_job: None, } } +} +impl ExExNotificationsWithHead +where + P: BlockReader + HeaderProvider + StateProviderFactory + Clone + Unpin + 'static, + E: BlockExecutorProvider + Clone + Unpin + 'static, +{ /// Checks if the ExEx head is on the canonical chain. /// /// If the head block is not found in the database or it's ahead of the node head, it means @@ -271,9 +338,11 @@ where } if let Some(backfill_job) = &mut this.backfill_job { - if let Some(chain) = ready!(backfill_job.poll_next_unpin(cx)) { + debug!(target: "exex::notifications", "Polling backfill job"); + if let Some(chain) = ready!(backfill_job.poll_next_unpin(cx)).transpose()? { + debug!(target: "exex::notifications", range = ?chain.range(), "Backfill job returned a chain"); return Poll::Ready(Some(Ok(ExExNotification::ChainCommitted { - new: Arc::new(chain?), + new: Arc::new(chain), }))) } @@ -367,7 +436,7 @@ mod tests { notifications_tx.send(notification.clone()).await?; - let mut notifications = ExExNotifications::new( + let mut notifications = ExExNotificationsWithoutHead::new( node_head, provider, EthExecutorProvider::mainnet(), @@ -438,7 +507,7 @@ mod tests { notifications_tx.send(notification.clone()).await?; - let mut notifications = ExExNotifications::new( + let mut notifications = ExExNotificationsWithoutHead::new( node_head, provider, EthExecutorProvider::mainnet(), @@ -528,7 +597,7 @@ mod tests { notifications_tx.send(new_notification.clone()).await?; - let mut notifications = ExExNotifications::new( + let mut notifications = ExExNotificationsWithoutHead::new( node_head, provider, EthExecutorProvider::mainnet(), @@ -609,7 +678,7 @@ mod tests { notifications_tx.send(new_notification.clone()).await?; - let mut notifications = ExExNotifications::new( + let mut notifications = ExExNotificationsWithoutHead::new( node_head, provider, EthExecutorProvider::mainnet(), diff --git a/crates/exex/exex/src/wal/metrics.rs b/crates/exex/exex/src/wal/metrics.rs index 7726fc978d474..01837629407e1 100644 --- a/crates/exex/exex/src/wal/metrics.rs +++ b/crates/exex/exex/src/wal/metrics.rs @@ -7,10 +7,10 @@ use reth_metrics::Metrics; pub(super) struct Metrics { /// Size of all notifications in WAL in bytes pub size_bytes: Gauge, - /// Total number of notifications in WAL - pub notifications_total: Gauge, - /// Total number of committed blocks in WAL - pub committed_blocks_total: Gauge, + /// Number of notifications in WAL + pub notifications_count: Gauge, + /// Number of committed blocks in WAL + pub committed_blocks_count: Gauge, /// Lowest committed block height in WAL pub lowest_committed_block_height: Gauge, /// Highest committed block height in WAL diff --git a/crates/exex/exex/src/wal/mod.rs b/crates/exex/exex/src/wal/mod.rs index 2341b56d10449..00b0ea919ef62 100644 --- a/crates/exex/exex/src/wal/mod.rs +++ b/crates/exex/exex/src/wal/mod.rs @@ -167,8 +167,8 @@ impl WalInner { fn update_metrics(&self, block_cache: &BlockCache, size_delta: i64) { self.metrics.size_bytes.increment(size_delta as f64); - self.metrics.notifications_total.set(block_cache.notification_max_blocks.len() as f64); - self.metrics.committed_blocks_total.set(block_cache.committed_blocks.len() as f64); + self.metrics.notifications_count.set(block_cache.notification_max_blocks.len() as f64); + self.metrics.committed_blocks_count.set(block_cache.committed_blocks.len() as f64); if let Some(lowest_committed_block_height) = block_cache.lowest_committed_block_height { self.metrics.lowest_committed_block_height.set(lowest_committed_block_height as f64); diff --git a/crates/exex/test-utils/src/lib.rs b/crates/exex/test-utils/src/lib.rs index b8be08616b4ac..e219f55031dbb 100644 --- a/crates/exex/test-utils/src/lib.rs +++ b/crates/exex/test-utils/src/lib.rs @@ -154,6 +154,10 @@ where .consensus(TestConsensusBuilder::default()) .engine_validator(EthereumEngineValidatorBuilder::default()) } + + fn add_ons(&self) -> Self::AddOns { + EthereumAddOns::default() + } } /// A shared [`TempDatabase`] used for testing @@ -267,6 +271,7 @@ pub async fn test_exex_context_with_chain_spec( let network_manager = NetworkManager::new( NetworkConfigBuilder::new(SecretKey::new(&mut rand::thread_rng())) + .with_unused_discovery_port() .build(provider_factory.clone()), ) .await?; diff --git a/crates/exex/types/src/notification.rs b/crates/exex/types/src/notification.rs index 53411250270d8..61d42a3319be3 100644 --- a/crates/exex/types/src/notification.rs +++ b/crates/exex/types/src/notification.rs @@ -136,7 +136,7 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> SerializeAs for ExExNotification<'a> { + impl SerializeAs for ExExNotification<'_> { fn serialize_as( source: &super::ExExNotification, serializer: S, diff --git a/crates/fs-util/src/lib.rs b/crates/fs-util/src/lib.rs index 0cfcf04539bdd..d242ecc98e2d1 100644 --- a/crates/fs-util/src/lib.rs +++ b/crates/fs-util/src/lib.rs @@ -307,6 +307,9 @@ where F: FnOnce(&mut File) -> std::result::Result<(), E>, E: Into>, { + #[cfg(windows)] + use std::os::windows::fs::OpenOptionsExt; + let mut tmp_path = file_path.to_path_buf(); tmp_path.set_extension("tmp"); diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index 6ce51786f282a..82c9fe37a44db 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -20,7 +20,6 @@ reth-primitives.workspace = true # ethereum alloy-chains = { workspace = true, features = ["rlp"] } alloy-eips.workspace = true -alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } @@ -36,6 +35,7 @@ proptest-arbitrary-interop = { workspace = true, optional = true } [dev-dependencies] reth-primitives = { workspace = true, features = ["arbitrary"] } +alloy-genesis.workspace = true alloy-chains = { workspace = true, features = ["arbitrary"] } arbitrary = { workspace = true, features = ["derive"] } proptest.workspace = true diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index baf1e2991522f..a5e7530ec09a4 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -1,9 +1,8 @@ use crate::EthVersion; use alloy_chains::{Chain, NamedChain}; -use alloy_genesis::Genesis; use alloy_primitives::{hex, B256, U256}; use alloy_rlp::{RlpDecodable, RlpEncodable}; -use reth_chainspec::{ChainSpec, EthChainSpec, Hardforks, MAINNET}; +use reth_chainspec::{EthChainSpec, Hardforks, MAINNET}; use reth_codecs_derive::add_arbitrary_tests; use reth_primitives::{EthereumHardfork, ForkId, Head}; use std::fmt::{Debug, Display}; @@ -43,23 +42,6 @@ pub struct Status { pub forkid: ForkId, } -impl From for Status { - fn from(genesis: Genesis) -> Self { - let chain = genesis.config.chain_id; - let total_difficulty = genesis.difficulty; - let chainspec = ChainSpec::from(genesis); - - Self { - version: EthVersion::Eth68 as u8, - chain: Chain::from_id(chain), - total_difficulty, - blockhash: chainspec.genesis_hash(), - genesis: chainspec.genesis_hash(), - forkid: chainspec.fork_id(&Head::default()), - } - } -} - impl Status { /// Helper for returning a builder for the status message. pub fn builder() -> StatusBuilder { @@ -71,10 +53,10 @@ impl Status { self.version = version as u8; } - /// Create a [`StatusBuilder`] from the given [`ChainSpec`] and head block. + /// Create a [`StatusBuilder`] from the given [`EthChainSpec`] and head block. /// - /// Sets the `chain` and `genesis`, `blockhash`, and `forkid` fields based on the [`ChainSpec`] - /// and head. + /// Sets the `chain` and `genesis`, `blockhash`, and `forkid` fields based on the + /// [`EthChainSpec`] and head. pub fn spec_builder(spec: Spec, head: &Head) -> StatusBuilder where Spec: EthChainSpec + Hardforks, diff --git a/crates/net/eth-wire/Cargo.toml b/crates/net/eth-wire/Cargo.toml index 13224a37ada17..6eea4bc4ac659 100644 --- a/crates/net/eth-wire/Cargo.toml +++ b/crates/net/eth-wire/Cargo.toml @@ -62,6 +62,7 @@ proptest.workspace = true proptest-arbitrary-interop.workspace = true async-stream.workspace = true serde.workspace = true +alloy-eips.workspace = true [features] arbitrary = [ diff --git a/crates/net/eth-wire/tests/pooled_transactions.rs b/crates/net/eth-wire/tests/pooled_transactions.rs index 7927384c95b02..6690f42631a66 100644 --- a/crates/net/eth-wire/tests/pooled_transactions.rs +++ b/crates/net/eth-wire/tests/pooled_transactions.rs @@ -1,5 +1,6 @@ //! Decoding tests for [`PooledTransactions`] +use alloy_eips::eip2718::Decodable2718; use alloy_primitives::hex; use alloy_rlp::{Decodable, Encodable}; use reth_eth_wire::{EthVersion, PooledTransactions, ProtocolMessage}; @@ -72,5 +73,5 @@ fn decode_blob_rpc_transaction() { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("testdata/rpc_blob_transaction"); let data = fs::read_to_string(network_data_path).expect("Unable to read file"); let hex_data = hex::decode(data.trim()).unwrap(); - let _txs = PooledTransactionsElement::decode_enveloped(&mut hex_data.as_ref()).unwrap(); + let _txs = PooledTransactionsElement::decode_2718(&mut hex_data.as_ref()).unwrap(); } diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 8217a02a1bab6..9d9183edcf986 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -106,6 +106,14 @@ impl NetworkConfig { NetworkConfig::builder(secret_key).build(client) } + /// Apply a function to the config. + pub fn apply(self, f: F) -> Self + where + F: FnOnce(Self) -> Self, + { + f(self) + } + /// Sets the config to use for the discovery v4 protocol. pub fn set_discovery_v4(mut self, discovery_config: Discv4Config) -> Self { self.discovery_v4_config = Some(discovery_config); @@ -339,6 +347,18 @@ impl NetworkConfigBuilder { self } + /// Sets the discovery port to an unused port. + /// This is useful for testing. + pub fn with_unused_discovery_port(self) -> Self { + self.discovery_port(0) + } + + /// Sets the listener port to an unused port. + /// This is useful for testing. + pub fn with_unused_listener_port(self) -> Self { + self.listener_port(0) + } + /// Sets the external ip resolver to use for discovery v4. /// /// If no [`Discv4ConfigBuilder`] is set via [`Self::discovery`], this will create a new one. diff --git a/crates/net/network/src/peers.rs b/crates/net/network/src/peers.rs index b9196d29e8e4d..3d5ff7a0d43ac 100644 --- a/crates/net/network/src/peers.rs +++ b/crates/net/network/src/peers.rs @@ -1131,7 +1131,7 @@ mod tests { peers: &'a mut PeersManager, } - impl<'a> Future for PeerActionFuture<'a> { + impl Future for PeerActionFuture<'_> { type Output = PeerAction; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { diff --git a/crates/net/network/src/transactions/config.rs b/crates/net/network/src/transactions/config.rs index b8023ca7928c4..81ec293ea1ff9 100644 --- a/crates/net/network/src/transactions/config.rs +++ b/crates/net/network/src/transactions/config.rs @@ -18,6 +18,9 @@ pub struct TransactionsManagerConfig { pub transaction_fetcher_config: TransactionFetcherConfig, /// Max number of seen transactions to store for each peer. pub max_transactions_seen_by_peer_history: u32, + /// How new pending transactions are propagated. + #[cfg_attr(feature = "serde", serde(default))] + pub propagation_mode: TransactionPropagationMode, } impl Default for TransactionsManagerConfig { @@ -25,6 +28,31 @@ impl Default for TransactionsManagerConfig { Self { transaction_fetcher_config: TransactionFetcherConfig::default(), max_transactions_seen_by_peer_history: DEFAULT_MAX_COUNT_TRANSACTIONS_SEEN_BY_PEER, + propagation_mode: TransactionPropagationMode::default(), + } + } +} + +/// Determines how new pending transactions are propagated to other peers in full. +#[derive(Debug, Clone, Default)] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum TransactionPropagationMode { + /// Send full transactions to sqrt of current peers. + #[default] + Sqrt, + /// Always send transactions in full. + All, + /// Send full transactions to a maximum number of peers + Max(usize), +} + +impl TransactionPropagationMode { + /// Returns the number of peers that should + pub(crate) fn full_peer_count(&self, peer_count: usize) -> usize { + match self { + Self::Sqrt => (peer_count as f64).sqrt().round() as usize, + Self::All => peer_count, + Self::Max(max) => peer_count.min(*max), } } } diff --git a/crates/net/network/src/transactions/fetcher.rs b/crates/net/network/src/transactions/fetcher.rs index e15972df08b28..9276219d593b7 100644 --- a/crates/net/network/src/transactions/fetcher.rs +++ b/crates/net/network/src/transactions/fetcher.rs @@ -98,6 +98,11 @@ pub struct TransactionFetcher { // === impl TransactionFetcher === impl TransactionFetcher { + /// Removes the peer from the active set. + pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) { + self.active_peers.remove(peer_id); + } + /// Updates metrics. #[inline] pub fn update_metrics(&self) { @@ -125,20 +130,27 @@ impl TransactionFetcher { /// Sets up transaction fetcher with config pub fn with_transaction_fetcher_config(config: &TransactionFetcherConfig) -> Self { - let mut tx_fetcher = Self::default(); + let TransactionFetcherConfig { + max_inflight_requests, + max_capacity_cache_txns_pending_fetch, + .. + } = *config; - tx_fetcher.info.soft_limit_byte_size_pooled_transactions_response = - config.soft_limit_byte_size_pooled_transactions_response; - tx_fetcher.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request = - config.soft_limit_byte_size_pooled_transactions_response_on_pack_request; - tx_fetcher - .metrics - .capacity_inflight_requests - .increment(tx_fetcher.info.max_inflight_requests as u64); - tx_fetcher.info.max_capacity_cache_txns_pending_fetch = - config.max_capacity_cache_txns_pending_fetch; + let info = config.clone().into(); - tx_fetcher + let metrics = TransactionFetcherMetrics::default(); + metrics.capacity_inflight_requests.increment(max_inflight_requests as u64); + + Self { + active_peers: LruMap::new(max_inflight_requests), + hashes_pending_fetch: LruCache::new(max_capacity_cache_txns_pending_fetch), + hashes_fetch_inflight_and_pending_fetch: LruMap::new( + max_inflight_requests + max_capacity_cache_txns_pending_fetch, + ), + info, + metrics, + ..Default::default() + } } /// Removes the specified hashes from inflight tracking. @@ -157,7 +169,7 @@ impl TransactionFetcher { fn decrement_inflight_request_count_for(&mut self, peer_id: &PeerId) { let remove = || -> bool { if let Some(inflight_count) = self.active_peers.get(peer_id) { - *inflight_count -= 1; + *inflight_count = inflight_count.saturating_sub(1); if *inflight_count == 0 { return true } @@ -173,7 +185,7 @@ impl TransactionFetcher { /// Returns `true` if peer is idle with respect to `self.inflight_requests`. pub fn is_idle(&self, peer_id: &PeerId) -> bool { let Some(inflight_count) = self.active_peers.peek(peer_id) else { return true }; - if *inflight_count < DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER { + if *inflight_count < self.info.max_inflight_requests_per_peer { return true } false @@ -648,19 +660,17 @@ impl TransactionFetcher { return Some(new_announced_hashes) }; - if *inflight_count >= DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER { + if *inflight_count >= self.info.max_inflight_requests_per_peer { trace!(target: "net::tx", peer_id=format!("{peer_id:#}"), hashes=?*new_announced_hashes, %conn_eth_version, - max_concurrent_tx_reqs_per_peer=DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, + max_concurrent_tx_reqs_per_peer=self.info.max_inflight_requests_per_peer, "limit for concurrent `GetPooledTransactions` requests per peer reached" ); return Some(new_announced_hashes) } - *inflight_count += 1; - #[cfg(debug_assertions)] { for hash in &new_announced_hashes { @@ -695,6 +705,8 @@ impl TransactionFetcher { } } } + + *inflight_count += 1; // stores a new request future for the request self.inflight_requests.push(GetPooledTxRequestFut::new(peer_id, new_announced_hashes, rx)); @@ -1283,10 +1295,12 @@ pub enum VerificationOutcome { } /// Tracks stats about the [`TransactionFetcher`]. -#[derive(Debug)] +#[derive(Debug, Constructor)] pub struct TransactionFetcherInfo { /// Max inflight [`GetPooledTransactions`] requests. pub max_inflight_requests: usize, + /// Max inflight [`GetPooledTransactions`] requests per peer. + pub max_inflight_requests_per_peer: u8, /// Soft limit for the byte size of the expected [`PooledTransactions`] response, upon packing /// a [`GetPooledTransactions`] request with hashes (by default less than 2 MiB worth of /// transactions is requested). @@ -1300,27 +1314,11 @@ pub struct TransactionFetcherInfo { pub max_capacity_cache_txns_pending_fetch: u32, } -impl TransactionFetcherInfo { - /// Creates a new max - pub const fn new( - max_inflight_requests: usize, - soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize, - soft_limit_byte_size_pooled_transactions_response: usize, - max_capacity_cache_txns_pending_fetch: u32, - ) -> Self { - Self { - max_inflight_requests, - soft_limit_byte_size_pooled_transactions_response_on_pack_request, - soft_limit_byte_size_pooled_transactions_response, - max_capacity_cache_txns_pending_fetch, - } - } -} - impl Default for TransactionFetcherInfo { fn default() -> Self { Self::new( - DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS as usize * DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER as usize, + DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS as usize, + DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER, DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH, @@ -1328,6 +1326,26 @@ impl Default for TransactionFetcherInfo { } } +impl From for TransactionFetcherInfo { + fn from(config: TransactionFetcherConfig) -> Self { + let TransactionFetcherConfig { + max_inflight_requests, + max_inflight_requests_per_peer, + soft_limit_byte_size_pooled_transactions_response, + soft_limit_byte_size_pooled_transactions_response_on_pack_request, + max_capacity_cache_txns_pending_fetch, + } = config; + + Self::new( + max_inflight_requests as usize, + max_inflight_requests_per_peer, + soft_limit_byte_size_pooled_transactions_response_on_pack_request, + soft_limit_byte_size_pooled_transactions_response, + max_capacity_cache_txns_pending_fetch, + ) + } +} + #[derive(Debug, Default)] struct TxFetcherSearchDurations { find_idle_peer: Duration, diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 7e3b71aa4365a..0c488ff919dc8 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -12,7 +12,7 @@ pub use self::constants::{ tx_fetcher::DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE, }; -pub use config::{TransactionFetcherConfig, TransactionsManagerConfig}; +pub use config::{TransactionFetcherConfig, TransactionPropagationMode, TransactionsManagerConfig}; pub use validation::*; pub(crate) use fetcher::{FetchEvent, TransactionFetcher}; @@ -246,8 +246,8 @@ pub struct TransactionsManager { pending_transactions: ReceiverStream, /// Incoming events from the [`NetworkManager`](crate::NetworkManager). transaction_events: UnboundedMeteredReceiver, - /// Max number of seen transactions to store for each peer. - max_transactions_seen_by_peer_history: u32, + /// How the `TransactionsManager` is configured. + config: TransactionsManagerConfig, /// `TransactionsManager` metrics metrics: TransactionsManagerMetrics, } @@ -298,8 +298,7 @@ impl TransactionsManager { from_network, NETWORK_POOL_TRANSACTIONS_SCOPE, ), - max_transactions_seen_by_peer_history: transactions_manager_config - .max_transactions_seen_by_peer_history, + config: transactions_manager_config, metrics, } } @@ -424,9 +423,8 @@ where return propagated } - // send full transactions to a fraction of the connected peers (square root of the total - // number of connected peers) - let max_num_full = (self.peers.len() as f64).sqrt().round() as usize; + // send full transactions to a set of the connected peers based on the configured mode + let max_num_full = self.config.propagation_mode.full_peer_count(self.peers.len()); // Note: Assuming ~random~ order due to random state of the peers map hasher for (peer_idx, (peer_id, peer)) in self.peers.iter_mut().enumerate() { @@ -904,6 +902,7 @@ where NetworkEvent::SessionClosed { peer_id, .. } => { // remove the peer self.peers.remove(&peer_id); + self.transaction_fetcher.remove_peer(&peer_id); } NetworkEvent::SessionEstablished { peer_id, client_version, messages, version, .. @@ -913,7 +912,7 @@ where messages, version, client_version, - self.max_transactions_seen_by_peer_history, + self.config.max_transactions_seen_by_peer_history, ); let peer = match self.peers.entry(peer_id) { Entry::Occupied(mut entry) => { @@ -1033,7 +1032,7 @@ where has_bad_transactions = true; } else { // this is a new transaction that should be imported into the pool - let pool_transaction = Pool::Transaction::from_pooled(tx); + let pool_transaction = Pool::Transaction::from_pooled(tx.into()); new_txs.push(pool_transaction); entry.insert(HashSet::from([peer_id])); @@ -1396,11 +1395,14 @@ impl PropagateTransaction { } /// Create a new instance from a pooled transaction - fn new>( - tx: Arc>, - ) -> Self { + fn new(tx: Arc>) -> Self + where + T: PoolTransaction>, + { let size = tx.encoded_length(); - let transaction = Arc::new(tx.transaction.clone().into_consensus().into_signed()); + let recovered: TransactionSignedEcRecovered = + tx.transaction.clone().into_consensus().into(); + let transaction = Arc::new(recovered.into_signed()); Self { size, transaction } } } diff --git a/crates/node/builder/src/builder/add_ons.rs b/crates/node/builder/src/builder/add_ons.rs index 910cd5896efe6..26d7553bb86d4 100644 --- a/crates/node/builder/src/builder/add_ons.rs +++ b/crates/node/builder/src/builder/add_ons.rs @@ -14,6 +14,8 @@ pub struct AddOns> { pub exexs: Vec<(String, Box>)>, /// Additional RPC add-ons. pub rpc: RpcAddOns, + /// Additional captured addons. + pub addons: AddOns, } /// Captures node specific addons that can be installed on top of the type configured node and are diff --git a/crates/node/builder/src/builder/mod.rs b/crates/node/builder/src/builder/mod.rs index 4989589c9f985..8b57781ea97c3 100644 --- a/crates/node/builder/src/builder/mod.rs +++ b/crates/node/builder/src/builder/mod.rs @@ -9,6 +9,13 @@ pub use states::*; use std::sync::Arc; +use crate::{ + common::WithConfigs, + components::NodeComponentsBuilder, + node::FullNode, + rpc::{EthApiBuilderProvider, RethRpcServerHandles, RpcContext}, + DefaultNodeLauncher, LaunchNode, Node, NodeHandle, +}; use futures::Future; use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks}; use reth_cli_util::get_secret_key; @@ -18,7 +25,8 @@ use reth_db_api::{ }; use reth_exex::ExExContext; use reth_network::{ - NetworkBuilder, NetworkConfig, NetworkConfigBuilder, NetworkHandle, NetworkManager, + transactions::TransactionsManagerConfig, NetworkBuilder, NetworkConfig, NetworkConfigBuilder, + NetworkHandle, NetworkManager, }; use reth_node_api::{ FullNodeTypes, FullNodeTypesAdapter, NodeAddOns, NodeTypes, NodeTypesWithDBAdapter, @@ -38,14 +46,6 @@ use reth_transaction_pool::{PoolConfig, TransactionPool}; use secp256k1::SecretKey; use tracing::{info, trace, warn}; -use crate::{ - common::WithConfigs, - components::NodeComponentsBuilder, - node::FullNode, - rpc::{EthApiBuilderProvider, RethRpcServerHandles, RpcContext}, - DefaultNodeLauncher, LaunchNode, Node, NodeHandle, -}; - /// The adapter type for a reth node with the builtin provider type // Note: we need to hardcode this because custom components might depend on it in associated types. pub type RethFullAdapter = FullNodeTypesAdapter< @@ -243,7 +243,7 @@ where where N: Node, ChainSpec = ChainSpec>, { - self.with_types().with_components(node.components_builder()).with_add_ons::() + self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } } @@ -311,7 +311,7 @@ where where N: Node, ChainSpec = ChainSpec>, { - self.with_types().with_components(node.components_builder()).with_add_ons::() + self.with_types().with_components(node.components_builder()).with_add_ons(node.add_ons()) } /// Launches a preconfigured [Node] @@ -375,12 +375,15 @@ where { /// Advances the state of the node builder to the next state where all customizable /// [`NodeAddOns`] types are configured. - pub fn with_add_ons(self) -> WithLaunchContext> + pub fn with_add_ons( + self, + add_ons: AO, + ) -> WithLaunchContext> where AO: NodeAddOns>, { WithLaunchContext { - builder: self.builder.with_add_ons::(), + builder: self.builder.with_add_ons(add_ons), task_executor: self.task_executor, } } @@ -580,16 +583,34 @@ impl BuilderContext { self.config().builder.clone() } - /// Convenience function to start the network. + /// Convenience function to start the network tasks. /// /// Spawns the configured network and associated tasks and returns the [`NetworkHandle`] /// connected to that network. pub fn start_network(&self, builder: NetworkBuilder<(), ()>, pool: Pool) -> NetworkHandle + where + Pool: TransactionPool + Unpin + 'static, + { + self.start_network_with(builder, pool, Default::default()) + } + + /// Convenience function to start the network tasks. + /// + /// Accepts the config for the transaction task. + /// + /// Spawns the configured network and associated tasks and returns the [`NetworkHandle`] + /// connected to that network. + pub fn start_network_with( + &self, + builder: NetworkBuilder<(), ()>, + pool: Pool, + tx_config: TransactionsManagerConfig, + ) -> NetworkHandle where Pool: TransactionPool + Unpin + 'static, { let (handle, network, txpool, eth) = builder - .transactions(pool, Default::default()) + .transactions(pool, tx_config) .request_handler(self.provider().clone()) .split_with_handle(); diff --git a/crates/node/builder/src/builder/states.rs b/crates/node/builder/src/builder/states.rs index 30ef54c5683e8..80930ef743cda 100644 --- a/crates/node/builder/src/builder/states.rs +++ b/crates/node/builder/src/builder/states.rs @@ -58,6 +58,7 @@ impl NodeBuilderWithTypes { hooks: NodeHooks::default(), rpc: RpcAddOns { hooks: RpcHooks::default() }, exexs: Vec::new(), + addons: (), }, } } @@ -168,7 +169,7 @@ where { /// Advances the state of the node builder to the next state where all customizable /// [`NodeAddOns`] types are configured. - pub fn with_add_ons(self) -> NodeBuilderWithComponents + pub fn with_add_ons(self, addons: AO) -> NodeBuilderWithComponents where AO: NodeAddOns>, { @@ -182,6 +183,7 @@ where hooks: NodeHooks::default(), rpc: RpcAddOns { hooks: RpcHooks::default() }, exexs: Vec::new(), + addons, }, } } diff --git a/crates/node/builder/src/components/pool.rs b/crates/node/builder/src/components/pool.rs index af31f29307ebf..234455913c6f9 100644 --- a/crates/node/builder/src/components/pool.rs +++ b/crates/node/builder/src/components/pool.rs @@ -1,8 +1,8 @@ //! Pool component for the node builder. -use std::future::Future; - -use reth_transaction_pool::TransactionPool; +use alloy_primitives::Address; +use reth_transaction_pool::{PoolConfig, SubPoolLimit, TransactionPool}; +use std::{collections::HashSet, future::Future}; use crate::{BuilderContext, FullNodeTypes}; @@ -34,3 +34,59 @@ where self(ctx) } } + +/// Convenience type to override cli or default pool configuration during build. +#[derive(Debug, Clone, Default)] +pub struct PoolBuilderConfigOverrides { + /// Max number of transaction in the pending sub-pool + pub pending_limit: Option, + /// Max number of transaction in the basefee sub-pool + pub basefee_limit: Option, + /// Max number of transaction in the queued sub-pool + pub queued_limit: Option, + /// Max number of transactions in the blob sub-pool + pub blob_limit: Option, + /// Max number of executable transaction slots guaranteed per account + pub max_account_slots: Option, + /// Minimum base fee required by the protocol. + pub minimal_protocol_basefee: Option, + /// Addresses that will be considered as local. Above exemptions apply. + pub local_addresses: HashSet
, +} + +impl PoolBuilderConfigOverrides { + /// Applies the configured overrides to the given [`PoolConfig`]. + pub fn apply(self, mut config: PoolConfig) -> PoolConfig { + let Self { + pending_limit, + basefee_limit, + queued_limit, + blob_limit, + max_account_slots, + minimal_protocol_basefee, + local_addresses, + } = self; + + if let Some(pending_limit) = pending_limit { + config.pending_limit = pending_limit; + } + if let Some(basefee_limit) = basefee_limit { + config.basefee_limit = basefee_limit; + } + if let Some(queued_limit) = queued_limit { + config.queued_limit = queued_limit; + } + if let Some(blob_limit) = blob_limit { + config.blob_limit = blob_limit; + } + if let Some(max_account_slots) = max_account_slots { + config.max_account_slots = max_account_slots; + } + if let Some(minimal_protocol_basefee) = minimal_protocol_basefee { + config.minimal_protocol_basefee = minimal_protocol_basefee; + } + config.local_transactions_config.local_addresses.extend(local_addresses); + + config + } +} diff --git a/crates/node/builder/src/launch/engine.rs b/crates/node/builder/src/launch/engine.rs index e71a0263c52cb..46ffacbf717a2 100644 --- a/crates/node/builder/src/launch/engine.rs +++ b/crates/node/builder/src/launch/engine.rs @@ -91,7 +91,7 @@ where let NodeBuilderWithComponents { adapter: NodeTypesAdapter { database }, components_builder, - add_ons: AddOns { hooks, rpc, exexs: installed_exex }, + add_ons: AddOns { hooks, rpc, exexs: installed_exex, .. }, config, } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index db98ffacedeb0..3188cde4b1578 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -126,7 +126,7 @@ where let NodeBuilderWithComponents { adapter: NodeTypesAdapter { database }, components_builder, - add_ons: AddOns { hooks, rpc, exexs: installed_exex }, + add_ons: AddOns { hooks, rpc, exexs: installed_exex, .. }, config, } = target; let NodeHooks { on_component_initialized, on_node_started, .. } = hooks; diff --git a/crates/node/builder/src/node.rs b/crates/node/builder/src/node.rs index 5d047f94c9182..3a70c08c10313 100644 --- a/crates/node/builder/src/node.rs +++ b/crates/node/builder/src/node.rs @@ -34,21 +34,29 @@ pub trait Node: NodeTypesWithEngine + Clone { /// Returns a [`NodeComponentsBuilder`] for the node. fn components_builder(&self) -> Self::ComponentsBuilder; + + /// Returns the node add-ons. + fn add_ons(&self) -> Self::AddOns; } /// A [`Node`] type builder #[derive(Clone, Default, Debug)] -pub struct AnyNode(PhantomData<(N, AO)>, C); +pub struct AnyNode(PhantomData, C, AO); -impl AnyNode { +impl AnyNode { /// Configures the types of the node. - pub fn types(self) -> AnyNode { - AnyNode::(PhantomData::<(T, ())>, self.1) + pub fn types(self) -> AnyNode { + AnyNode(PhantomData, self.1, self.2) } /// Sets the node components builder. - pub const fn components_builder(&self, value: T) -> AnyNode { - AnyNode::(PhantomData::<(N, ())>, value) + pub fn components_builder(self, value: T) -> AnyNode { + AnyNode(PhantomData, value, self.2) + } + + /// Sets the node add-ons. + pub fn add_ons(self, value: T) -> AnyNode { + AnyNode(PhantomData, self.1, value) } } @@ -84,6 +92,10 @@ where fn components_builder(&self) -> Self::ComponentsBuilder { self.1.clone() } + + fn add_ons(&self) -> Self::AddOns { + self.2.clone() + } } /// The launched node with all components including RPC handlers. diff --git a/crates/node/builder/src/rpc.rs b/crates/node/builder/src/rpc.rs index 607a33147e940..cb8d8f355dac9 100644 --- a/crates/node/builder/src/rpc.rs +++ b/crates/node/builder/src/rpc.rs @@ -254,7 +254,7 @@ pub struct RpcContext<'a, Node: FullNodeComponents, EthApi: EthApiTypes> { pub auth_module: &'a mut AuthRpcModule, } -impl<'a, Node, EthApi> RpcContext<'a, Node, EthApi> +impl RpcContext<'_, Node, EthApi> where Node: FullNodeComponents, EthApi: EthApiTypes, diff --git a/crates/node/core/Cargo.toml b/crates/node/core/Cargo.toml index 35802cf5165a5..3ac90a888705a 100644 --- a/crates/node/core/Cargo.toml +++ b/crates/node/core/Cargo.toml @@ -14,9 +14,7 @@ workspace = true # reth reth-chainspec.workspace = true reth-primitives.workspace = true -reth-cli.workspace = true reth-cli-util.workspace = true -reth-fs-util.workspace = true reth-db = { workspace = true, features = ["mdbx"] } reth-storage-errors.workspace = true reth-storage-api.workspace = true @@ -37,10 +35,8 @@ reth-network-peers.workspace = true reth-consensus-common.workspace = true reth-prune-types.workspace = true reth-stages-types.workspace = true -reth-optimism-chainspec = { workspace = true, optional = true } # ethereum -alloy-genesis.workspace = true alloy-primitives.workspace = true alloy-rpc-types-engine = { workspace = true, features = ["jwt"] } @@ -59,7 +55,6 @@ thiserror.workspace = true # io dirs-next = "2.0.0" shellexpand.workspace = true -serde_json.workspace = true # tracing tracing.workspace = true @@ -82,10 +77,7 @@ tempfile.workspace = true [features] optimism = [ - "reth-primitives/optimism", - "reth-rpc-types-compat/optimism", - "reth-rpc-eth-api/optimism", - "dep:reth-optimism-chainspec", + "reth-primitives/optimism" ] # Features for vergen to generate correct env vars jemalloc = [] diff --git a/crates/node/core/src/args/mod.rs b/crates/node/core/src/args/mod.rs index 1a647ac65b031..7f1b643615156 100644 --- a/crates/node/core/src/args/mod.rs +++ b/crates/node/core/src/args/mod.rs @@ -56,7 +56,5 @@ pub use datadir_args::DatadirArgs; mod benchmark_args; pub use benchmark_args::BenchmarkArgs; -pub mod utils; - mod error; pub mod types; diff --git a/crates/node/core/src/args/network.rs b/crates/node/core/src/args/network.rs index 0f1465bc5795b..04153b93ecdd4 100644 --- a/crates/node/core/src/args/network.rs +++ b/crates/node/core/src/args/network.rs @@ -226,6 +226,7 @@ impl NetworkArgs { self.max_capacity_cache_txns_pending_fetch, ), max_transactions_seen_by_peer_history: self.max_seen_tx_history, + propagation_mode: Default::default(), }; // Configure basic network stack diff --git a/crates/node/core/src/args/utils.rs b/crates/node/core/src/args/utils.rs deleted file mode 100644 index e6ebda45b7f71..0000000000000 --- a/crates/node/core/src/args/utils.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! Clap parser utilities - -use std::{path::PathBuf, sync::Arc}; - -use alloy_genesis::Genesis; -use reth_chainspec::ChainSpec; -#[cfg(not(feature = "optimism"))] -use reth_chainspec::{DEV, HOLESKY, MAINNET, SEPOLIA}; -use reth_cli::chainspec::ChainSpecParser; -use reth_fs_util as fs; -#[cfg(feature = "optimism")] -use reth_optimism_chainspec::{BASE_MAINNET, BASE_SEPOLIA, OP_DEV, OP_MAINNET, OP_SEPOLIA}; - -#[cfg(feature = "optimism")] -/// Chains supported by op-reth. First value should be used as the default. -pub const SUPPORTED_CHAINS: &[&str] = - &["optimism", "optimism-sepolia", "base", "base-sepolia", "dev"]; -#[cfg(not(feature = "optimism"))] -/// Chains supported by reth. First value should be used as the default. -pub const SUPPORTED_CHAINS: &[&str] = &["mainnet", "sepolia", "holesky", "dev"]; - -/// Clap value parser for [`ChainSpec`]s. -/// -/// The value parser matches either a known chain, the path -/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. -#[cfg(not(feature = "optimism"))] -pub fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { - Ok(match s { - "mainnet" => MAINNET.clone(), - "sepolia" => SEPOLIA.clone(), - "holesky" => HOLESKY.clone(), - "dev" => DEV.clone(), - _ => Arc::new(parse_custom_chain_spec(s)?), - }) -} - -/// Clap value parser for [`OpChainSpec`](reth_optimism_chainspec::OpChainSpec)s. -/// -/// The value parser matches either a known chain, the path -/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. -#[cfg(feature = "optimism")] -pub fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { - Ok(Arc::new(match s { - "optimism" => OP_MAINNET.inner.clone(), - "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.inner.clone(), - "base" => BASE_MAINNET.inner.clone(), - "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.inner.clone(), - "dev" => OP_DEV.inner.clone(), - _ => parse_custom_chain_spec(s)?, - })) -} - -/// Parses a custom [`ChainSpec`]. -pub fn parse_custom_chain_spec(s: &str) -> eyre::Result { - // try to read json from path first - let raw = match fs::read_to_string(PathBuf::from(shellexpand::full(s)?.into_owned())) { - Ok(raw) => raw, - Err(io_err) => { - // valid json may start with "\n", but must contain "{" - if s.contains('{') { - s.to_string() - } else { - return Err(io_err.into()) // assume invalid path - } - } - }; - - // both serialized Genesis and ChainSpec structs supported - let genesis: Genesis = serde_json::from_str(&raw)?; - - Ok(genesis.into()) -} - -/// A chain specification parser for ethereum chains. -#[derive(Debug, Copy, Clone, Default)] -#[non_exhaustive] -pub struct EthereumChainSpecParser; - -impl ChainSpecParser for EthereumChainSpecParser { - type ChainSpec = ChainSpec; - - const SUPPORTED_CHAINS: &'static [&'static str] = SUPPORTED_CHAINS; - - fn parse(s: &str) -> eyre::Result> { - chain_value_parser(s) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn parse_known_chain_spec() { - for chain in SUPPORTED_CHAINS { - chain_value_parser(chain).unwrap(); - } - } -} diff --git a/crates/node/types/src/lib.rs b/crates/node/types/src/lib.rs index 2c72e02d3edc7..0f6d5a1a370f3 100644 --- a/crates/node/types/src/lib.rs +++ b/crates/node/types/src/lib.rs @@ -40,6 +40,7 @@ pub trait NodeTypes: Send + Sync + Unpin + 'static { pub trait NodeTypesWithEngine: NodeTypes { /// The node's engine types, defining the interaction with the consensus engine. type Engine: EngineTypes; + // type Engine: EngineTypes; } /// A helper trait that is downstream of the [`NodeTypesWithEngine`] trait and adds database to the diff --git a/crates/optimism/bin/Cargo.toml b/crates/optimism/bin/Cargo.toml index 13e7b1aa6d4d4..2de0bb6ee181d 100644 --- a/crates/optimism/bin/Cargo.toml +++ b/crates/optimism/bin/Cargo.toml @@ -15,6 +15,12 @@ reth-optimism-cli.workspace = true reth-provider.workspace = true reth-optimism-rpc.workspace = true reth-optimism-node.workspace = true +reth-optimism-chainspec.workspace = true +reth-optimism-consensus.workspace = true +reth-optimism-evm.workspace = true +reth-optimism-payload-builder.workspace = true +reth-optimism-primitives.workspace = true +reth-optimism-forks.workspace = true clap = { workspace = true, features = ["derive", "env"] } tracing.workspace = true @@ -25,7 +31,7 @@ workspace = true [features] default = ["jemalloc"] -jemalloc = ["reth-cli-util/jemalloc"] +jemalloc = ["reth-cli-util/jemalloc", "reth-optimism-cli/jemalloc"] jemalloc-prof = ["reth-cli-util/jemalloc-prof"] tracy-allocator = ["reth-cli-util/tracy-allocator"] diff --git a/crates/optimism/bin/src/lib.rs b/crates/optimism/bin/src/lib.rs new file mode 100644 index 0000000000000..21c28f7c5470f --- /dev/null +++ b/crates/optimism/bin/src/lib.rs @@ -0,0 +1,73 @@ +//! Rust Optimism (op-reth) binary executable. +//! +//! ## Feature Flags +//! +//! - `jemalloc`: Uses [jemallocator](https://github.com/tikv/jemallocator) as the global allocator. +//! This is **not recommended on Windows**. See [here](https://rust-lang.github.io/rfcs/1974-global-allocators.html#jemalloc) +//! for more info. +//! - `jemalloc-prof`: Enables [jemallocator's](https://github.com/tikv/jemallocator) heap profiling +//! and leak detection functionality. See [jemalloc's opt.prof](https://jemalloc.net/jemalloc.3.html#opt.prof) +//! documentation for usage details. This is **not recommended on Windows**. See [here](https://rust-lang.github.io/rfcs/1974-global-allocators.html#jemalloc) +//! for more info. +//! - `asm-keccak`: replaces the default, pure-Rust implementation of Keccak256 with one implemented +//! in assembly; see [the `keccak-asm` crate](https://github.com/DaniPopes/keccak-asm) for more +//! details and supported targets +//! - `min-error-logs`: Disables all logs below `error` level. +//! - `min-warn-logs`: Disables all logs below `warn` level. +//! - `min-info-logs`: Disables all logs below `info` level. This can speed up the node, since fewer +//! calls to the logging component is made. +//! - `min-debug-logs`: Disables all logs below `debug` level. +//! - `min-trace-logs`: Disables all logs below `trace` level. +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +// The `optimism` feature must be enabled to use this crate. +#![cfg(feature = "optimism")] + +/// Re-exported from `reth_optimism_cli`. +pub mod cli { + pub use reth_optimism_cli::*; +} + +/// Re-exported from `reth_optimism_chainspec`. +pub mod chainspec { + pub use reth_optimism_chainspec::*; +} + +/// Re-exported from `reth_optimism_consensus`. +pub mod consensus { + pub use reth_optimism_consensus::*; +} + +/// Re-exported from `reth_optimism_evm`. +pub mod evm { + pub use reth_optimism_evm::*; +} + +/// Re-exported from `reth_optimism_forks`. +pub mod forks { + pub use reth_optimism_forks::*; +} + +/// Re-exported from `reth_optimism_node`. +pub mod node { + pub use reth_optimism_node::*; +} + +/// Re-exported from `reth_optimism_payload_builder`. +pub mod payload { + pub use reth_optimism_payload_builder::*; +} + +/// Re-exported from `reth_optimism_primitives`. +pub mod primitives { + pub use reth_optimism_primitives::*; +} + +/// Re-exported from `reth_optimism_rpc`. +pub mod rpc { + pub use reth_optimism_rpc::*; +} diff --git a/crates/optimism/bin/src/main.rs b/crates/optimism/bin/src/main.rs index cecd01ea88971..6822a6a50ec4e 100644 --- a/crates/optimism/bin/src/main.rs +++ b/crates/optimism/bin/src/main.rs @@ -1,7 +1,6 @@ #![allow(missing_docs, rustdoc::missing_crate_level_docs)] // The `optimism` feature must be enabled to use this crate. #![cfg(feature = "optimism")] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] use clap::Parser; use reth_node_builder::{engine_tree_config::TreeConfig, EngineNodeLauncher}; @@ -35,7 +34,7 @@ fn main() { let handle = builder .with_types_and_provider::>() .with_components(OptimismNode::components(rollup_args)) - .with_add_ons::() + .with_add_ons(OptimismAddOns::new(sequencer_http_arg.clone())) .extend_rpc_modules(move |ctx| { // register sequencer tx forwarder if let Some(sequencer_http) = sequencer_http_arg { diff --git a/crates/optimism/chainspec/Cargo.toml b/crates/optimism/chainspec/Cargo.toml index 41b54902c201b..e13b95056c7fa 100644 --- a/crates/optimism/chainspec/Cargo.toml +++ b/crates/optimism/chainspec/Cargo.toml @@ -26,12 +26,15 @@ alloy-chains.workspace = true alloy-genesis.workspace = true alloy-primitives.workspace = true +# op +op-alloy-rpc-types.workspace = true + # io serde_json.workspace = true # misc -once_cell.workspace = true derive_more.workspace = true +once_cell.workspace = true [dev-dependencies] reth-chainspec = { workspace = true, features = ["test-utils"] } diff --git a/crates/optimism/chainspec/src/lib.rs b/crates/optimism/chainspec/src/lib.rs index 2668c374500df..22316a234d9b1 100644 --- a/crates/optimism/chainspec/src/lib.rs +++ b/crates/optimism/chainspec/src/lib.rs @@ -27,10 +27,12 @@ pub use op::OP_MAINNET; pub use op_sepolia::OP_SEPOLIA; use derive_more::{Constructor, Deref, Into}; +use once_cell::sync::OnceCell; use reth_chainspec::{ - BaseFeeParams, ChainSpec, DepositContract, EthChainSpec, EthereumHardforks, ForkFilter, ForkId, - Hardforks, Head, + BaseFeeParams, BaseFeeParamsKind, ChainSpec, DepositContract, EthChainSpec, EthereumHardforks, + ForkFilter, ForkId, Hardforks, Head, }; +use reth_ethereum_forks::{ChainHardforks, EthereumHardfork, ForkCondition}; use reth_network_peers::NodeRecord; use reth_primitives_traits::Header; @@ -52,14 +54,14 @@ impl EthChainSpec for OpChainSpec { self.inner.chain() } - fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { - self.inner.base_fee_params_at_timestamp(timestamp) - } - fn base_fee_params_at_block(&self, block_number: u64) -> BaseFeeParams { self.inner.base_fee_params_at_block(block_number) } + fn base_fee_params_at_timestamp(&self, timestamp: u64) -> BaseFeeParams { + self.inner.base_fee_params_at_timestamp(timestamp) + } + fn deposit_contract(&self) -> Option<&DepositContract> { self.inner.deposit_contract() } @@ -118,20 +120,161 @@ impl Hardforks for OpChainSpec { } impl EthereumHardforks for OpChainSpec { + fn get_final_paris_total_difficulty(&self) -> Option { + self.inner.get_final_paris_total_difficulty() + } + fn final_paris_total_difficulty(&self, block_number: u64) -> Option { self.inner.final_paris_total_difficulty(block_number) } +} - fn get_final_paris_total_difficulty(&self) -> Option { - self.inner.get_final_paris_total_difficulty() +impl From for OpChainSpec { + fn from(genesis: Genesis) -> Self { + use reth_optimism_forks::OptimismHardfork; + let optimism_genesis_info = OptimismGenesisInfo::extract_from(&genesis); + let genesis_info = + optimism_genesis_info.optimism_chain_info.genesis_info.unwrap_or_default(); + + // Block-based hardforks + let hardfork_opts = [ + (EthereumHardfork::Homestead.boxed(), genesis.config.homestead_block), + (EthereumHardfork::Tangerine.boxed(), genesis.config.eip150_block), + (EthereumHardfork::SpuriousDragon.boxed(), genesis.config.eip155_block), + (EthereumHardfork::Byzantium.boxed(), genesis.config.byzantium_block), + (EthereumHardfork::Constantinople.boxed(), genesis.config.constantinople_block), + (EthereumHardfork::Petersburg.boxed(), genesis.config.petersburg_block), + (EthereumHardfork::Istanbul.boxed(), genesis.config.istanbul_block), + (EthereumHardfork::MuirGlacier.boxed(), genesis.config.muir_glacier_block), + (EthereumHardfork::Berlin.boxed(), genesis.config.berlin_block), + (EthereumHardfork::London.boxed(), genesis.config.london_block), + (EthereumHardfork::ArrowGlacier.boxed(), genesis.config.arrow_glacier_block), + (EthereumHardfork::GrayGlacier.boxed(), genesis.config.gray_glacier_block), + (OptimismHardfork::Bedrock.boxed(), genesis_info.bedrock_block), + ]; + let mut block_hardforks = hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| opt.map(|block| (hardfork, ForkCondition::Block(block)))) + .collect::>(); + + // Paris + let paris_block_and_final_difficulty = + if let Some(ttd) = genesis.config.terminal_total_difficulty { + block_hardforks.push(( + EthereumHardfork::Paris.boxed(), + ForkCondition::TTD { + total_difficulty: ttd, + fork_block: genesis.config.merge_netsplit_block, + }, + )); + + genesis.config.merge_netsplit_block.map(|block| (block, ttd)) + } else { + None + }; + + // Time-based hardforks + let time_hardfork_opts = [ + (EthereumHardfork::Shanghai.boxed(), genesis.config.shanghai_time), + (EthereumHardfork::Cancun.boxed(), genesis.config.cancun_time), + (EthereumHardfork::Prague.boxed(), genesis.config.prague_time), + (OptimismHardfork::Regolith.boxed(), genesis_info.regolith_time), + (OptimismHardfork::Canyon.boxed(), genesis_info.canyon_time), + (OptimismHardfork::Ecotone.boxed(), genesis_info.ecotone_time), + (OptimismHardfork::Fjord.boxed(), genesis_info.fjord_time), + (OptimismHardfork::Granite.boxed(), genesis_info.granite_time), + ]; + + let mut time_hardforks = time_hardfork_opts + .into_iter() + .filter_map(|(hardfork, opt)| { + opt.map(|time| (hardfork, ForkCondition::Timestamp(time))) + }) + .collect::>(); + + block_hardforks.append(&mut time_hardforks); + + // Ordered Hardforks + let mainnet_hardforks = OptimismHardfork::op_mainnet(); + let mainnet_order = mainnet_hardforks.forks_iter(); + + let mut ordered_hardforks = Vec::with_capacity(block_hardforks.len()); + for (hardfork, _) in mainnet_order { + if let Some(pos) = block_hardforks.iter().position(|(e, _)| **e == *hardfork) { + ordered_hardforks.push(block_hardforks.remove(pos)); + } + } + + // append the remaining unknown hardforks to ensure we don't filter any out + ordered_hardforks.append(&mut block_hardforks); + + Self { + inner: ChainSpec { + chain: genesis.config.chain_id.into(), + genesis, + genesis_hash: OnceCell::new(), + hardforks: ChainHardforks::new(ordered_hardforks), + paris_block_and_final_difficulty, + base_fee_params: optimism_genesis_info.base_fee_params, + ..Default::default() + }, + } + } +} + +#[derive(Default, Debug)] +struct OptimismGenesisInfo { + optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo, + base_fee_params: BaseFeeParamsKind, +} + +impl OptimismGenesisInfo { + fn extract_from(genesis: &Genesis) -> Self { + let mut info = Self { + optimism_chain_info: op_alloy_rpc_types::genesis::OptimismChainInfo::extract_from( + &genesis.config.extra_fields, + ) + .unwrap_or_default(), + ..Default::default() + }; + if let Some(optimism_base_fee_info) = &info.optimism_chain_info.base_fee_info { + if let (Some(elasticity), Some(denominator)) = ( + optimism_base_fee_info.eip1559_elasticity, + optimism_base_fee_info.eip1559_denominator, + ) { + let base_fee_params = if let Some(canyon_denominator) = + optimism_base_fee_info.eip1559_denominator_canyon + { + BaseFeeParamsKind::Variable( + vec![ + ( + EthereumHardfork::London.boxed(), + BaseFeeParams::new(denominator as u128, elasticity as u128), + ), + ( + reth_optimism_forks::OptimismHardfork::Canyon.boxed(), + BaseFeeParams::new(canyon_denominator as u128, elasticity as u128), + ), + ] + .into(), + ) + } else { + BaseFeeParams::new(denominator as u128, elasticity as u128).into() + }; + + info.base_fee_params = base_fee_params; + } + } + + info } } #[cfg(test)] mod tests { - use alloy_genesis::Genesis; + use alloy_genesis::{ChainConfig, Genesis}; use alloy_primitives::b256; - use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind, ChainSpec}; + use reth_chainspec::{test_fork_ids, BaseFeeParams, BaseFeeParamsKind}; use reth_ethereum_forks::{EthereumHardfork, ForkCondition, ForkHash, ForkId, Head}; use reth_optimism_forks::{OptimismHardfork, OptimismHardforks}; @@ -383,7 +526,7 @@ mod tests { }) ); - let chain_spec: ChainSpec = genesis.into(); + let chain_spec: OpChainSpec = genesis.into(); assert_eq!( chain_spec.base_fee_params, @@ -449,7 +592,7 @@ mod tests { }) ); - let chain_spec: ChainSpec = genesis.into(); + let chain_spec: OpChainSpec = genesis.into(); assert_eq!( chain_spec.base_fee_params, @@ -511,7 +654,7 @@ mod tests { } "#; let genesis: Genesis = serde_json::from_str(geth_genesis).unwrap(); - let chainspec = ChainSpec::from(genesis.clone()); + let chainspec = OpChainSpec::from(genesis.clone()); let actual_chain_id = genesis.config.chain_id; assert_eq!(actual_chain_id, 8453); @@ -552,4 +695,79 @@ mod tests { assert!(chainspec.is_fork_active_at_timestamp(OptimismHardfork::Regolith, 20)); } + + #[test] + fn test_fork_order_optimism_mainnet() { + use reth_optimism_forks::OptimismHardfork; + + let genesis = Genesis { + config: ChainConfig { + chain_id: 0, + homestead_block: Some(0), + dao_fork_block: Some(0), + dao_fork_support: false, + eip150_block: Some(0), + eip155_block: Some(0), + eip158_block: Some(0), + byzantium_block: Some(0), + constantinople_block: Some(0), + petersburg_block: Some(0), + istanbul_block: Some(0), + muir_glacier_block: Some(0), + berlin_block: Some(0), + london_block: Some(0), + arrow_glacier_block: Some(0), + gray_glacier_block: Some(0), + merge_netsplit_block: Some(0), + shanghai_time: Some(0), + cancun_time: Some(0), + terminal_total_difficulty: Some(U256::ZERO), + extra_fields: [ + (String::from("bedrockBlock"), 0.into()), + (String::from("regolithTime"), 0.into()), + (String::from("canyonTime"), 0.into()), + (String::from("ecotoneTime"), 0.into()), + (String::from("fjordTime"), 0.into()), + (String::from("graniteTime"), 0.into()), + ] + .into_iter() + .collect(), + ..Default::default() + }, + ..Default::default() + }; + + let chain_spec: OpChainSpec = genesis.into(); + + let hardforks: Vec<_> = chain_spec.hardforks.forks_iter().map(|(h, _)| h).collect(); + let expected_hardforks = vec![ + EthereumHardfork::Homestead.boxed(), + EthereumHardfork::Tangerine.boxed(), + EthereumHardfork::SpuriousDragon.boxed(), + EthereumHardfork::Byzantium.boxed(), + EthereumHardfork::Constantinople.boxed(), + EthereumHardfork::Petersburg.boxed(), + EthereumHardfork::Istanbul.boxed(), + EthereumHardfork::MuirGlacier.boxed(), + EthereumHardfork::Berlin.boxed(), + EthereumHardfork::London.boxed(), + EthereumHardfork::ArrowGlacier.boxed(), + EthereumHardfork::GrayGlacier.boxed(), + EthereumHardfork::Paris.boxed(), + OptimismHardfork::Bedrock.boxed(), + OptimismHardfork::Regolith.boxed(), + EthereumHardfork::Shanghai.boxed(), + OptimismHardfork::Canyon.boxed(), + EthereumHardfork::Cancun.boxed(), + OptimismHardfork::Ecotone.boxed(), + OptimismHardfork::Fjord.boxed(), + OptimismHardfork::Granite.boxed(), + ]; + + assert!(expected_hardforks + .iter() + .zip(hardforks.iter()) + .all(|(expected, actual)| &**expected == *actual)); + assert_eq!(expected_hardforks.len(), hardforks.len()); + } } diff --git a/crates/optimism/cli/Cargo.toml b/crates/optimism/cli/Cargo.toml index 99d1641e36438..d53270cd62f82 100644 --- a/crates/optimism/cli/Cargo.toml +++ b/crates/optimism/cli/Cargo.toml @@ -27,6 +27,9 @@ reth-node-core.workspace = true reth-optimism-node.workspace = true reth-primitives.workspace = true +# so jemalloc metrics can be included +reth-node-metrics.workspace = true + ## optimism reth-optimism-primitives.workspace = true reth-optimism-chainspec.workspace = true @@ -82,3 +85,9 @@ asm-keccak = [ "reth-optimism-node/asm-keccak", "reth-primitives/asm-keccak", ] + +# Jemalloc feature for vergen to generate correct env vars +jemalloc = [ + "reth-node-core/jemalloc", + "reth-node-metrics/jemalloc" +] diff --git a/crates/optimism/cli/src/chainspec.rs b/crates/optimism/cli/src/chainspec.rs index e76bfd5f0656c..329669ab8c9cd 100644 --- a/crates/optimism/cli/src/chainspec.rs +++ b/crates/optimism/cli/src/chainspec.rs @@ -1,28 +1,12 @@ -use std::sync::Arc; - -use reth_cli::chainspec::ChainSpecParser; -use reth_node_core::args::utils::parse_custom_chain_spec; +use reth_cli::chainspec::{parse_genesis, ChainSpecParser}; use reth_optimism_chainspec::{ OpChainSpec, BASE_MAINNET, BASE_SEPOLIA, OP_DEV, OP_MAINNET, OP_SEPOLIA, }; - -/// Clap value parser for [`OpChainSpec`]s. -/// -/// The value parser matches either a known chain, the path -/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. -fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { - Ok(match s { - "dev" => OP_DEV.clone(), - "optimism" => OP_MAINNET.clone(), - "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), - "base" => BASE_MAINNET.clone(), - "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), - _ => Arc::new(OpChainSpec { inner: parse_custom_chain_spec(s)? }), - }) -} +use std::sync::Arc; /// Optimism chain specification parser. #[derive(Debug, Clone, Default)] +#[non_exhaustive] pub struct OpChainSpecParser; impl ChainSpecParser for OpChainSpecParser { @@ -43,6 +27,21 @@ impl ChainSpecParser for OpChainSpecParser { } } +/// Clap value parser for [`OpChainSpec`]s. +/// +/// The value parser matches either a known chain, the path +/// to a json file, or a json formatted string in-memory. The json needs to be a Genesis struct. +pub fn chain_value_parser(s: &str) -> eyre::Result, eyre::Error> { + Ok(match s { + "dev" => OP_DEV.clone(), + "optimism" => OP_MAINNET.clone(), + "optimism_sepolia" | "optimism-sepolia" => OP_SEPOLIA.clone(), + "base" => BASE_MAINNET.clone(), + "base_sepolia" | "base-sepolia" => BASE_SEPOLIA.clone(), + _ => Arc::new(parse_genesis(s)?.into()), + }) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/optimism/cli/src/lib.rs b/crates/optimism/cli/src/lib.rs index b17cefa63287a..e6eed86bf7fc5 100644 --- a/crates/optimism/cli/src/lib.rs +++ b/crates/optimism/cli/src/lib.rs @@ -51,6 +51,10 @@ use reth_optimism_node::OptimismNode; use reth_tracing::FileWorkerGuard; use tracing::info; +// This allows us to manually enable node metrics features, required for proper jemalloc metric +// reporting +use reth_node_metrics as _; + /// The main op-reth cli interface. /// /// This is the entrypoint to the executable. @@ -176,13 +180,14 @@ where #[cfg(test)] mod test { + use crate::chainspec::OpChainSpecParser; use clap::Parser; - use reth_cli_commands::NodeCommand; + use reth_cli_commands::{node::NoArgs, NodeCommand}; use reth_optimism_chainspec::OP_DEV; #[test] fn parse_dev() { - let cmd: NodeCommand = NodeCommand::parse_from(["op-reth", "--dev"]); + let cmd = NodeCommand::::parse_from(["op-reth", "--dev"]); let chain = OP_DEV.clone(); assert_eq!(cmd.chain.chain, chain.chain); assert_eq!(cmd.chain.genesis_hash, chain.genesis_hash); diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index 6ca9cec5260e5..4e5918831f764 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -10,7 +10,7 @@ use reth_evm::{ BatchExecutor, BlockExecutionError, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, - system_calls::SystemCaller, + system_calls::{NoopHook, OnStateHook, SystemCaller}, ConfigureEvm, }; use reth_execution_types::ExecutionOutcome; @@ -108,18 +108,23 @@ where /// /// This applies the pre-execution changes, and executes the transactions. /// + /// The optional `state_hook` will be executed with the state changes if present. + /// /// # Note /// /// It does __not__ apply post-execution changes. - fn execute_pre_and_transactions( + fn execute_pre_and_transactions( &self, block: &BlockWithSenders, mut evm: Evm<'_, Ext, &mut State>, + state_hook: Option, ) -> Result<(Vec, u64), BlockExecutionError> where DB: Database + Display>, + F: OnStateHook, { - let mut system_caller = SystemCaller::new(&self.evm_config, &self.chain_spec); + let mut system_caller = + SystemCaller::new(&self.evm_config, &self.chain_spec).with_state_hook(state_hook); // apply pre execution changes system_caller.apply_beacon_root_contract_call( @@ -178,7 +183,7 @@ where self.evm_config.fill_tx_env(evm.tx_mut(), transaction, *sender); // Execute transaction. - let ResultAndState { result, state } = evm.transact().map_err(move |err| { + let result_and_state = evm.transact().map_err(move |err| { let new_err = err.map_db_err(|e| e.into()); // Ensure hash is calculated for error log, if not already done BlockValidationError::EVM { @@ -192,7 +197,8 @@ where ?transaction, "Executed transaction" ); - + system_caller.on_state(&result_and_state); + let ResultAndState { result, state } = result_and_state; evm.db_mut().commit(state); // append gas used @@ -278,16 +284,30 @@ where EnvWithHandlerCfg::new_with_cfg_env(cfg, block_env, Default::default()) } + /// Convenience method to invoke `execute_without_verification_with_state_hook` setting the + /// state hook as `None`. + fn execute_without_verification( + &mut self, + block: &BlockWithSenders, + total_difficulty: U256, + ) -> Result<(Vec, u64), BlockExecutionError> { + self.execute_without_verification_with_state_hook(block, total_difficulty, None::) + } + /// Execute a single block and apply the state changes to the internal state. /// /// Returns the receipts of the transactions in the block and the total gas used. /// /// Returns an error if execution fails. - fn execute_without_verification( + fn execute_without_verification_with_state_hook( &mut self, block: &BlockWithSenders, total_difficulty: U256, - ) -> Result<(Vec, u64), BlockExecutionError> { + state_hook: Option, + ) -> Result<(Vec, u64), BlockExecutionError> + where + F: OnStateHook, + { // 1. prepare state on new block self.on_new_block(&block.header); @@ -296,7 +316,7 @@ where let (receipts, gas_used) = { let evm = self.executor.evm_config.evm_with_env(&mut self.state, env); - self.executor.execute_pre_and_transactions(block, evm) + self.executor.execute_pre_and_transactions(block, evm, state_hook) }?; // 3. apply post execution changes @@ -383,6 +403,32 @@ where gas_used, }) } + + fn execute_with_state_hook( + mut self, + input: Self::Input<'_>, + state_hook: F, + ) -> Result + where + F: OnStateHook, + { + let BlockExecutionInput { block, total_difficulty } = input; + let (receipts, gas_used) = self.execute_without_verification_with_state_hook( + block, + total_difficulty, + Some(state_hook), + )?; + + // NOTE: we need to merge keep the reverts for the bundle retention + self.state.merge_transitions(BundleRetention::Reverts); + + Ok(BlockExecutionOutput { + state: self.state.take_bundle(), + receipts, + requests: vec![], + gas_used, + }) + } } /// An executor for a batch of blocks. diff --git a/crates/optimism/node/Cargo.toml b/crates/optimism/node/Cargo.toml index cb74bcbc09040..3c298bea9cee0 100644 --- a/crates/optimism/node/Cargo.toml +++ b/crates/optimism/node/Cargo.toml @@ -81,7 +81,6 @@ optimism = [ "reth-chainspec/optimism", "reth-primitives/optimism", "reth-provider/optimism", - "reth-rpc-types-compat/optimism", "reth-optimism-evm/optimism", "reth-optimism-payload-builder/optimism", "reth-beacon-consensus/optimism", diff --git a/crates/optimism/node/src/engine.rs b/crates/optimism/node/src/engine.rs index a9adadd3068e8..11c8f15cfdac1 100644 --- a/crates/optimism/node/src/engine.rs +++ b/crates/optimism/node/src/engine.rs @@ -21,7 +21,39 @@ use reth_optimism_payload_builder::{OptimismBuiltPayload, OptimismPayloadBuilder /// The types used in the optimism beacon consensus engine. #[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] #[non_exhaustive] -pub struct OptimismEngineTypes; +pub struct OptimismEngineTypes { + _marker: std::marker::PhantomData, +} + +impl PayloadTypes for OptimismEngineTypes { + type BuiltPayload = T::BuiltPayload; + type PayloadAttributes = T::PayloadAttributes; + type PayloadBuilderAttributes = T::PayloadBuilderAttributes; +} + +impl EngineTypes for OptimismEngineTypes +where + T::BuiltPayload: TryInto + + TryInto + + TryInto + + TryInto, +{ + type ExecutionPayloadV1 = ExecutionPayloadV1; + type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; + type ExecutionPayloadV3 = OptimismExecutionPayloadEnvelopeV3; + type ExecutionPayloadV4 = OptimismExecutionPayloadEnvelopeV4; +} + +/// A default payload type for [`OptimismEngineTypes`] +#[derive(Debug, Default, Clone, serde::Deserialize, serde::Serialize)] +#[non_exhaustive] +pub struct OptimismPayloadTypes; + +impl PayloadTypes for OptimismPayloadTypes { + type BuiltPayload = OptimismBuiltPayload; + type PayloadAttributes = OptimismPayloadAttributes; + type PayloadBuilderAttributes = OptimismPayloadBuilderAttributes; +} /// Validator for Optimism engine API. #[derive(Debug, Clone)] @@ -36,19 +68,6 @@ impl OptimismEngineValidator { } } -impl PayloadTypes for OptimismEngineTypes { - type BuiltPayload = OptimismBuiltPayload; - type PayloadAttributes = OptimismPayloadAttributes; - type PayloadBuilderAttributes = OptimismPayloadBuilderAttributes; -} - -impl EngineTypes for OptimismEngineTypes { - type ExecutionPayloadV1 = ExecutionPayloadV1; - type ExecutionPayloadV2 = ExecutionPayloadEnvelopeV2; - type ExecutionPayloadV3 = OptimismExecutionPayloadEnvelopeV3; - type ExecutionPayloadV4 = OptimismExecutionPayloadEnvelopeV4; -} - /// Validates the presence of the `withdrawals` field according to the payload timestamp. /// /// After Canyon, withdrawals field must be [Some]. diff --git a/crates/optimism/node/src/node.rs b/crates/optimism/node/src/node.rs index d82cb70a3ffec..caeab3741a2e6 100644 --- a/crates/optimism/node/src/node.rs +++ b/crates/optimism/node/src/node.rs @@ -3,13 +3,14 @@ use std::sync::Arc; use reth_basic_payload_builder::{BasicPayloadJobGenerator, BasicPayloadJobGeneratorConfig}; +use reth_chainspec::{EthChainSpec, Hardforks}; use reth_evm::ConfigureEvm; -use reth_network::{NetworkHandle, NetworkManager}; +use reth_network::{NetworkConfig, NetworkHandle, NetworkManager}; use reth_node_api::{EngineValidator, FullNodeComponents, NodeAddOns}; use reth_node_builder::{ components::{ ComponentsBuilder, ConsensusBuilder, EngineValidatorBuilder, ExecutorBuilder, - NetworkBuilder, PayloadServiceBuilder, PoolBuilder, + NetworkBuilder, PayloadServiceBuilder, PoolBuilder, PoolBuilderConfigOverrides, }, node::{FullNodeTypes, NodeTypes, NodeTypesWithEngine}, BuilderContext, Node, PayloadBuilderConfig, @@ -102,6 +103,10 @@ where let Self { args } = self; Self::components(args.clone()) } + + fn add_ons(&self) -> Self::AddOns { + OptimismAddOns::new(self.args.sequencer_http.clone()) + } } impl NodeTypes for OptimismNode { @@ -115,7 +120,21 @@ impl NodeTypesWithEngine for OptimismNode { /// Add-ons w.r.t. optimism. #[derive(Debug, Clone)] -pub struct OptimismAddOns; +pub struct OptimismAddOns { + sequencer_http: Option, +} + +impl OptimismAddOns { + /// Create a new instance with the given `sequencer_http` URL. + pub const fn new(sequencer_http: Option) -> Self { + Self { sequencer_http } + } + + /// Returns the sequencer HTTP URL. + pub fn sequencer_http(&self) -> Option<&str> { + self.sequencer_http.as_deref() + } +} impl NodeAddOns for OptimismAddOns { type EthApi = OpEthApi; @@ -148,9 +167,11 @@ where /// /// This contains various settings that can be configured and take precedence over the node's /// config. -#[derive(Debug, Default, Clone, Copy)] -#[non_exhaustive] -pub struct OptimismPoolBuilder; +#[derive(Debug, Default, Clone)] +pub struct OptimismPoolBuilder { + /// Enforced overrides that are applied to the pool config. + pub pool_config_overrides: PoolBuilderConfigOverrides, +} impl PoolBuilder for OptimismPoolBuilder where @@ -159,6 +180,7 @@ where type Pool = OpTransactionPool; async fn build_pool(self, ctx: &BuilderContext) -> eyre::Result { + let Self { pool_config_overrides } = self; let data_dir = ctx.config().datadir(); let blob_store = DiskFileBlobStore::open(data_dir.blobstore(), Default::default())?; @@ -180,7 +202,7 @@ where validator, CoinbaseTipOrdering::default(), blob_store, - ctx.pool_config(), + pool_config_overrides.apply(ctx.pool_config()), ); info!(target: "reth::cli", "Transaction pool initialized"); let transactions_path = data_dir.txpool_transactions(); @@ -309,18 +331,18 @@ pub struct OptimismNetworkBuilder { pub disable_discovery_v4: bool, } -impl NetworkBuilder for OptimismNetworkBuilder -where - Node: FullNodeTypes>, - Pool: TransactionPool + Unpin + 'static, -{ - async fn build_network( - self, +impl OptimismNetworkBuilder { + /// Returns the [`NetworkConfig`] that contains the settings to launch the p2p network. + /// + /// This applies the configured [`OptimismNetworkBuilder`] settings. + pub fn network_config( + &self, ctx: &BuilderContext, - pool: Pool, - ) -> eyre::Result { - let Self { disable_txpool_gossip, disable_discovery_v4 } = self; - + ) -> eyre::Result::Provider>> + where + Node: FullNodeTypes>, + { + let Self { disable_txpool_gossip, disable_discovery_v4 } = self.clone(); let args = &ctx.config().network; let network_builder = ctx .network_config_builder()? @@ -353,8 +375,22 @@ where // gossip to prevent other parties in the network from learning about them. network_config.tx_gossip_disabled = disable_txpool_gossip; - let network = NetworkManager::builder(network_config).await?; + Ok(network_config) + } +} +impl NetworkBuilder for OptimismNetworkBuilder +where + Node: FullNodeTypes>, + Pool: TransactionPool + Unpin + 'static, +{ + async fn build_network( + self, + ctx: &BuilderContext, + pool: Pool, + ) -> eyre::Result { + let network_config = self.network_config(ctx)?; + let network = NetworkManager::builder(network_config).await?; let handle = ctx.start_network(network, pool); Ok(handle) diff --git a/crates/optimism/node/src/txpool.rs b/crates/optimism/node/src/txpool.rs index 811c37e91cb57..7ed2a161d0e4f 100644 --- a/crates/optimism/node/src/txpool.rs +++ b/crates/optimism/node/src/txpool.rs @@ -140,7 +140,7 @@ where let l1_block_info = self.block_info.l1_block_info.read().clone(); let mut encoded = Vec::with_capacity(valid_tx.transaction().encoded_length()); - valid_tx.transaction().clone().into_consensus().encode_2718(&mut encoded); + valid_tx.transaction().clone().into_consensus().into().encode_2718(&mut encoded); let cost_addition = match l1_block_info.l1_tx_data_fee( &self.chain_spec(), diff --git a/crates/optimism/node/tests/it/builder.rs b/crates/optimism/node/tests/it/builder.rs index 20363828e8612..f1dde4c2c0a81 100644 --- a/crates/optimism/node/tests/it/builder.rs +++ b/crates/optimism/node/tests/it/builder.rs @@ -15,7 +15,7 @@ fn test_basic_setup() { .with_database(db) .with_types::() .with_components(OptimismNode::components(Default::default())) - .with_add_ons::() + .with_add_ons(OptimismAddOns::new(None)) .on_component_initialized(move |ctx| { let _provider = ctx.provider(); Ok(()) diff --git a/crates/optimism/payload/Cargo.toml b/crates/optimism/payload/Cargo.toml index 117f63201a48a..d5f8b520e822a 100644 --- a/crates/optimism/payload/Cargo.toml +++ b/crates/optimism/payload/Cargo.toml @@ -52,7 +52,6 @@ optimism = [ "reth-chainspec/optimism", "reth-primitives/optimism", "reth-provider/optimism", - "reth-rpc-types-compat/optimism", "reth-optimism-evm/optimism", "reth-revm/optimism", ] diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index cb3b939136f30..f1ba24435092c 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -183,7 +183,7 @@ impl BuiltPayload for OptimismBuiltPayload { } } -impl<'a> BuiltPayload for &'a OptimismBuiltPayload { +impl BuiltPayload for &OptimismBuiltPayload { fn block(&self) -> &SealedBlock { (**self).block() } diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml index 24c3eb02d247c..65dce7510b0e9 100644 --- a/crates/optimism/rpc/Cargo.toml +++ b/crates/optimism/rpc/Cargo.toml @@ -65,6 +65,5 @@ optimism = [ "reth-optimism-evm/optimism", "reth-primitives/optimism", "reth-provider/optimism", - "reth-rpc-eth-api/optimism", "revm/optimism", ] diff --git a/crates/optimism/rpc/src/eth/transaction.rs b/crates/optimism/rpc/src/eth/transaction.rs index 2556c895783e4..ab7525016a1ac 100644 --- a/crates/optimism/rpc/src/eth/transaction.rs +++ b/crates/optimism/rpc/src/eth/transaction.rs @@ -34,7 +34,8 @@ where /// Returns the hash of the transaction. async fn send_raw_transaction(&self, tx: Bytes) -> Result { let recovered = recover_raw_transaction(tx.clone())?; - let pool_transaction = ::Transaction::from_pooled(recovered); + let pool_transaction = + ::Transaction::from_pooled(recovered.into()); // On optimism, transactions are forwarded directly to the sequencer to be included in // blocks that it builds. @@ -104,14 +105,19 @@ impl TransactionCompat for OpTxBuilder { fn fill(tx: TransactionSignedEcRecovered, tx_info: TransactionInfo) -> Self::Transaction { let signed_tx = tx.clone().into_signed(); - let inner = EthTxBuilder::fill(tx, tx_info).inner; + let mut inner = EthTxBuilder::fill(tx, tx_info).inner; + + if signed_tx.is_deposit() { + inner.gas_price = Some(signed_tx.max_fee_per_gas()) + } Transaction { inner, source_hash: signed_tx.source_hash(), mint: signed_tx.mint(), // only include is_system_tx if true: - is_system_tx: signed_tx.is_deposit().then_some(signed_tx.is_system_transaction()), + is_system_tx: (signed_tx.is_deposit() && signed_tx.is_system_transaction()) + .then_some(true), deposit_receipt_version: None, // todo: how to fill this field? } } diff --git a/crates/payload/builder/src/database.rs b/crates/payload/builder/src/database.rs index ea1ae08543487..d63f7322dee21 100644 --- a/crates/payload/builder/src/database.rs +++ b/crates/payload/builder/src/database.rs @@ -67,7 +67,7 @@ pub struct CachedReadsDbMut<'a, DB> { pub db: DB, } -impl<'a, DB: DatabaseRef> Database for CachedReadsDbMut<'a, DB> { +impl Database for CachedReadsDbMut<'_, DB> { type Error = ::Error; fn basic(&mut self, address: Address) -> Result, Self::Error> { @@ -130,7 +130,7 @@ pub struct CachedReadsDBRef<'a, DB> { pub inner: RefCell>, } -impl<'a, DB: DatabaseRef> DatabaseRef for CachedReadsDBRef<'a, DB> { +impl DatabaseRef for CachedReadsDBRef<'_, DB> { type Error = ::Error; fn basic_ref(&self, address: Address) -> Result, Self::Error> { diff --git a/crates/primitives-traits/src/header/sealed.rs b/crates/primitives-traits/src/header/sealed.rs index f2047e079c8d5..7119a37e742aa 100644 --- a/crates/primitives-traits/src/header/sealed.rs +++ b/crates/primitives-traits/src/header/sealed.rs @@ -14,22 +14,24 @@ use serde::{Deserialize, Serialize}; /// to modify header. #[derive(Debug, Clone, PartialEq, Eq, Hash, AsRef, Deref, Serialize, Deserialize)] #[add_arbitrary_tests(rlp)] -pub struct SealedHeader { +pub struct SealedHeader { /// Locked Header hash. hash: BlockHash, /// Locked Header fields. #[as_ref] #[deref] - header: Header, + header: H, } -impl SealedHeader { +impl SealedHeader { /// Creates the sealed header with the corresponding block hash. #[inline] - pub const fn new(header: Header, hash: BlockHash) -> Self { + pub const fn new(header: H, hash: BlockHash) -> Self { Self { header, hash } } +} +impl SealedHeader { /// Returns the sealed Header fields. #[inline] pub const fn header(&self) -> &Header { @@ -182,7 +184,7 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> SerializeAs for SealedHeader<'a> { + impl SerializeAs for SealedHeader<'_> { fn serialize_as(source: &super::SealedHeader, serializer: S) -> Result where S: Serializer, diff --git a/crates/primitives-traits/src/storage.rs b/crates/primitives-traits/src/storage.rs index 801edba088f4c..39b6155ee2841 100644 --- a/crates/primitives-traits/src/storage.rs +++ b/crates/primitives-traits/src/storage.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// /// `key` is the subkey when used as a value in the `StorageChangeSets` table. #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, PartialOrd, Ord)] -#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))] +#[cfg_attr(any(test, feature = "arbitrary"), derive(arbitrary::Arbitrary))] #[add_arbitrary_tests(compact)] pub struct StorageEntry { /// Storage key. diff --git a/crates/primitives-traits/src/withdrawal.rs b/crates/primitives-traits/src/withdrawal.rs index f6b0607e7f032..995e60292c6e1 100644 --- a/crates/primitives-traits/src/withdrawal.rs +++ b/crates/primitives-traits/src/withdrawal.rs @@ -133,7 +133,7 @@ mod tests { // #[test] fn test_withdrawal_serde_roundtrip() { - let input = r#"[{"index":"0x0","validatorIndex":"0x0","address":"0x0000000000000000000000000000000000001000","amount":"0x1"},{"index":"0x1","validatorIndex":"0x1","address":"0x0000000000000000000000000000000000001001","amount":"0x1"},{"index":"0x2","validatorIndex":"0x2","address":"0x0000000000000000000000000000000000001002","amount":"0x1"},{"index":"0x3","validatorIndex":"0x3","address":"0x0000000000000000000000000000000000001003","amount":"0x1"},{"index":"0x4","validatorIndex":"0x4","address":"0x0000000000000000000000000000000000001004","amount":"0x1"},{"index":"0x5","validatorIndex":"0x5","address":"0x0000000000000000000000000000000000001005","amount":"0x1"},{"index":"0x6","validatorIndex":"0x6","address":"0x0000000000000000000000000000000000001006","amount":"0x1"},{"index":"0x7","validatorIndex":"0x7","address":"0x0000000000000000000000000000000000001007","amount":"0x1"},{"index":"0x8","validatorIndex":"0x8","address":"0x0000000000000000000000000000000000001008","amount":"0x1"},{"index":"0x9","validatorIndex":"0x9","address":"0x0000000000000000000000000000000000001009","amount":"0x1"},{"index":"0xa","validatorIndex":"0xa","address":"0x000000000000000000000000000000000000100A","amount":"0x1"},{"index":"0xb","validatorIndex":"0xb","address":"0x000000000000000000000000000000000000100b","amount":"0x1"},{"index":"0xc","validatorIndex":"0xc","address":"0x000000000000000000000000000000000000100C","amount":"0x1"},{"index":"0xd","validatorIndex":"0xd","address":"0x000000000000000000000000000000000000100D","amount":"0x1"},{"index":"0xe","validatorIndex":"0xe","address":"0x000000000000000000000000000000000000100e","amount":"0x1"},{"index":"0xf","validatorIndex":"0xf","address":"0x000000000000000000000000000000000000100f","amount":"0x1"}]"#; + let input = r#"[{"index":"0x0","validatorIndex":"0x0","address":"0x0000000000000000000000000000000000001000","amount":"0x1"},{"index":"0x1","validatorIndex":"0x1","address":"0x0000000000000000000000000000000000001001","amount":"0x1"},{"index":"0x2","validatorIndex":"0x2","address":"0x0000000000000000000000000000000000001002","amount":"0x1"},{"index":"0x3","validatorIndex":"0x3","address":"0x0000000000000000000000000000000000001003","amount":"0x1"},{"index":"0x4","validatorIndex":"0x4","address":"0x0000000000000000000000000000000000001004","amount":"0x1"},{"index":"0x5","validatorIndex":"0x5","address":"0x0000000000000000000000000000000000001005","amount":"0x1"},{"index":"0x6","validatorIndex":"0x6","address":"0x0000000000000000000000000000000000001006","amount":"0x1"},{"index":"0x7","validatorIndex":"0x7","address":"0x0000000000000000000000000000000000001007","amount":"0x1"},{"index":"0x8","validatorIndex":"0x8","address":"0x0000000000000000000000000000000000001008","amount":"0x1"},{"index":"0x9","validatorIndex":"0x9","address":"0x0000000000000000000000000000000000001009","amount":"0x1"},{"index":"0xa","validatorIndex":"0xa","address":"0x000000000000000000000000000000000000100a","amount":"0x1"},{"index":"0xb","validatorIndex":"0xb","address":"0x000000000000000000000000000000000000100b","amount":"0x1"},{"index":"0xc","validatorIndex":"0xc","address":"0x000000000000000000000000000000000000100c","amount":"0x1"},{"index":"0xd","validatorIndex":"0xd","address":"0x000000000000000000000000000000000000100d","amount":"0x1"},{"index":"0xe","validatorIndex":"0xe","address":"0x000000000000000000000000000000000000100e","amount":"0x1"},{"index":"0xf","validatorIndex":"0xf","address":"0x000000000000000000000000000000000000100f","amount":"0x1"}]"#; let withdrawals: Vec = serde_json::from_str(input).unwrap(); let s = serde_json::to_string(&withdrawals).unwrap(); diff --git a/crates/primitives/benches/validate_blob_tx.rs b/crates/primitives/benches/validate_blob_tx.rs index 61fe161f2f74f..50498a9420fdd 100644 --- a/crates/primitives/benches/validate_blob_tx.rs +++ b/crates/primitives/benches/validate_blob_tx.rs @@ -1,7 +1,7 @@ #![allow(missing_docs)] use alloy_consensus::TxEip4844; -use alloy_eips::eip4844::env_settings::EnvKzgSettings; +use alloy_eips::eip4844::{env_settings::EnvKzgSettings, MAX_BLOBS_PER_BLOCK}; use alloy_primitives::hex; use criterion::{ criterion_group, criterion_main, measurement::WallTime, BenchmarkGroup, Criterion, @@ -13,7 +13,6 @@ use proptest::{ }; use proptest_arbitrary_interop::arb; use reth_primitives::BlobTransactionSidecar; -use revm_primitives::MAX_BLOB_NUMBER_PER_BLOCK; // constant seed to use for the rng const SEED: [u8; 32] = hex!("1337133713371337133713371337133713371337133713371337133713371337"); @@ -22,9 +21,9 @@ const SEED: [u8; 32] = hex!("133713371337133713371337133713371337133713371337133 fn blob_validation(c: &mut Criterion) { let mut group = c.benchmark_group("Blob Transaction KZG validation"); - for num_blobs in 1..=MAX_BLOB_NUMBER_PER_BLOCK { + for num_blobs in 1..=MAX_BLOBS_PER_BLOCK { println!("Benchmarking validation for tx with {num_blobs} blobs"); - validate_blob_tx(&mut group, "ValidateBlob", num_blobs, EnvKzgSettings::Default); + validate_blob_tx(&mut group, "ValidateBlob", num_blobs as u64, EnvKzgSettings::Default); } } diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index bf7f557b799af..f43af019e75b1 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -278,9 +278,8 @@ impl TryFrom> for TransactionSigne #[cfg(feature = "optimism")] mod tests { use super::*; - use alloy_primitives::{B256, U256}; + use alloy_primitives::{address, Address, B256, U256}; use alloy_rpc_types::Transaction as AlloyTransaction; - use revm_primitives::{address, Address}; #[test] fn optimism_deposit_tx_conversion_no_mint() { diff --git a/crates/primitives/src/block.rs b/crates/primitives/src/block.rs index 0464c28dee0fc..de0817fb025db 100644 --- a/crates/primitives/src/block.rs +++ b/crates/primitives/src/block.rs @@ -756,7 +756,7 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> SerializeAs for BlockBody<'a> { + impl SerializeAs for BlockBody<'_> { fn serialize_as(source: &super::BlockBody, serializer: S) -> Result where S: Serializer, @@ -807,7 +807,7 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> SerializeAs for SealedBlock<'a> { + impl SerializeAs for SealedBlock<'_> { fn serialize_as(source: &super::SealedBlock, serializer: S) -> Result where S: Serializer, @@ -858,7 +858,7 @@ pub(super) mod serde_bincode_compat { } } - impl<'a> SerializeAs for SealedBlockWithSenders<'a> { + impl SerializeAs for SealedBlockWithSenders<'_> { fn serialize_as( source: &super::SealedBlockWithSenders, serializer: S, @@ -1130,4 +1130,13 @@ mod tests { Some(SealedBlockWithSenders { block: sealed, senders: vec![sender] }) ); } + + #[test] + fn test_default_seal() { + let block = SealedBlock::default(); + let sealed = block.hash(); + let block = block.unseal(); + let block = block.seal_slow(); + assert_eq!(sealed, block.hash()); + } } diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 5c794be5061e6..cfd831ed0f740 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -373,7 +373,7 @@ impl<'a> ReceiptWithBloomRef<'a> { } } -impl<'a> Encodable for ReceiptWithBloomRef<'a> { +impl Encodable for ReceiptWithBloomRef<'_> { fn encode(&self, out: &mut dyn BufMut) { self.as_encoder().encode_inner(out, true) } @@ -394,7 +394,7 @@ struct ReceiptWithBloomEncoder<'a> { receipt: &'a Receipt, } -impl<'a> ReceiptWithBloomEncoder<'a> { +impl ReceiptWithBloomEncoder<'_> { /// Returns the rlp header for the receipt payload. fn receipt_rlp_header(&self) -> alloy_rlp::Header { let mut rlp_head = alloy_rlp::Header { list: true, payload_length: 0 }; @@ -481,7 +481,7 @@ impl<'a> ReceiptWithBloomEncoder<'a> { } } -impl<'a> Encodable for ReceiptWithBloomEncoder<'a> { +impl Encodable for ReceiptWithBloomEncoder<'_> { fn encode(&self, out: &mut dyn BufMut) { self.encode_inner(out, true) } diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index b16c2c88ab529..7ef2c0c1fb76f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -1640,7 +1640,7 @@ pub mod serde_bincode_compat { } } - impl<'a> SerializeAs for Transaction<'a> { + impl SerializeAs for Transaction<'_> { fn serialize_as(source: &super::Transaction, serializer: S) -> Result where S: Serializer, @@ -1700,7 +1700,7 @@ pub mod serde_bincode_compat { } } - impl<'a> SerializeAs for TransactionSigned<'a> { + impl SerializeAs for TransactionSigned<'_> { fn serialize_as( source: &super::TransactionSigned, serializer: S, diff --git a/crates/primitives/src/transaction/pooled.rs b/crates/primitives/src/transaction/pooled.rs index cc2dc57663948..ec49f44a680bc 100644 --- a/crates/primitives/src/transaction/pooled.rs +++ b/crates/primitives/src/transaction/pooled.rs @@ -10,14 +10,13 @@ use crate::{ BlobTransaction, BlobTransactionSidecar, Signature, Transaction, TransactionSigned, TransactionSignedEcRecovered, EIP4844_TX_TYPE_ID, }; -use alloc::vec::Vec; use alloy_consensus::{ transaction::{TxEip1559, TxEip2930, TxEip4844, TxLegacy}, SignableTransaction, TxEip4844WithSidecar, }; -use alloy_eips::eip2718::{Decodable2718, Eip2718Error}; -use alloy_primitives::{Address, Bytes, TxHash, B256}; -use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header, EMPTY_LIST_CODE}; +use alloy_eips::eip2718::{Decodable2718, Eip2718Result, Encodable2718}; +use alloy_primitives::{Address, TxHash, B256}; +use alloy_rlp::{Decodable, Encodable, Error as RlpError, Header}; use bytes::Buf; use derive_more::{AsRef, Deref}; use serde::{Deserialize, Serialize}; @@ -183,101 +182,6 @@ impl PooledTransactionsElement { } } - /// Decodes the "raw" format of transaction (e.g. `eth_sendRawTransaction`). - /// - /// This should be used for `eth_sendRawTransaction`, for any transaction type. Blob - /// transactions **must** include the blob sidecar as part of the raw encoding. - /// - /// This method can not be used for decoding the `transactions` field of `engine_newPayload`, - /// because EIP-4844 transactions for that method do not include the blob sidecar. The blobs - /// are supplied in an argument separate from the payload. - /// - /// A raw transaction is either a legacy transaction or EIP-2718 typed transaction, with a - /// special case for EIP-4844 transactions. - /// - /// For legacy transactions, the format is encoded as: `rlp(tx)`. This format will start with a - /// RLP list header. - /// - /// For EIP-2718 typed transactions, the format is encoded as the type of the transaction - /// followed by the rlp of the transaction: `type || rlp(tx)`. - /// - /// For EIP-4844 transactions, the format includes a blob sidecar (the blobs, commitments, and - /// proofs) after the transaction: - /// `type || rlp([tx_payload_body, blobs, commitments, proofs])` - /// - /// Where `tx_payload_body` is encoded as a RLP list: - /// `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - pub fn decode_enveloped(data: &mut &[u8]) -> alloy_rlp::Result { - if data.is_empty() { - return Err(RlpError::InputTooShort) - } - - // Check if the tx is a list - tx types are less than EMPTY_LIST_CODE (0xc0) - if data[0] >= EMPTY_LIST_CODE { - // decode as legacy transaction - let (transaction, hash, signature) = - TransactionSigned::decode_rlp_legacy_transaction_tuple(data)?; - - Ok(Self::Legacy { transaction, signature, hash }) - } else { - // decode the type byte, only decode BlobTransaction if it is a 4844 transaction - let tx_type = *data.first().ok_or(RlpError::InputTooShort)?; - - // First, we advance the buffer past the type byte - data.advance(1); - - if tx_type == EIP4844_TX_TYPE_ID { - // Recall that the blob transaction response `TransactionPayload` is encoded like - // this: `rlp([tx_payload_body, blobs, commitments, proofs])` - // - // Note that `tx_payload_body` is a list: - // `[chain_id, nonce, max_priority_fee_per_gas, ..., y_parity, r, s]` - // - // This makes the full encoding: - // `tx_type (0x03) || rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` - - // Now, we decode the inner blob transaction: - // `rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` - let blob_tx = BlobTransaction::decode_inner(data)?; - Ok(Self::BlobTransaction(blob_tx)) - } else { - let typed_tx = - TransactionSigned::typed_decode(tx_type, data).map_err(|err| match err { - Eip2718Error::RlpError(err) => err, - _ => RlpError::Custom("failed to decode EIP-2718 transaction"), - })?; - - // because we checked the tx type, we can be sure that the transaction is not a - // blob transaction or legacy - match typed_tx.transaction { - Transaction::Legacy(_) => Err(RlpError::Custom( - "legacy transactions should not be a result of EIP-2718 decoding", - )), - Transaction::Eip4844(_) => Err(RlpError::Custom( - "EIP-4844 transactions can only be decoded with transaction type 0x03", - )), - Transaction::Eip2930(tx) => Ok(Self::Eip2930 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), - Transaction::Eip1559(tx) => Ok(Self::Eip1559 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - }), - Transaction::Eip7702(tx) => {Ok(Self::Eip7702 { - transaction: tx, - signature: typed_tx.signature, - hash: typed_tx.hash, - })}, - #[cfg(feature = "optimism")] - Transaction::Deposit(_) => Err(RlpError::Custom("Optimism deposit transaction cannot be decoded to PooledTransactionsElement")) - } - } - } - } - /// Create [`TransactionSignedEcRecovered`] by converting this transaction into /// [`TransactionSigned`] and [`Address`] of the signer. pub fn into_ecrecovered_transaction(self, signer: Address) -> TransactionSignedEcRecovered { @@ -309,83 +213,6 @@ impl PooledTransactionsElement { } } - /// Returns the length without an RLP header - this is used for eth/68 sizes. - pub fn length_without_header(&self) -> usize { - match self { - Self::Legacy { transaction, signature, .. } => { - // method computes the payload len with a RLP header - transaction.encoded_len_with_signature(&with_eip155_parity( - signature, - transaction.chain_id, - )) - } - Self::Eip2930 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) - } - Self::Eip1559 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) - } - Self::Eip7702 { transaction, signature, .. } => { - // method computes the payload len without a RLP header - transaction.encoded_len_with_signature(signature, false) - } - Self::BlobTransaction(blob_tx) => { - // the encoding does not use a header, so we set `with_header` to false - blob_tx.payload_len_with_type(false) - } - } - } - - /// Returns the enveloped encoded transactions. - /// - /// See also [`alloy_eips::eip2718::Encodable2718::encoded_2718`] - pub fn envelope_encoded(&self) -> Bytes { - let mut buf = Vec::new(); - self.encode_enveloped(&mut buf); - buf.into() - } - - /// Encodes the transaction into the "raw" format (e.g. `eth_sendRawTransaction`). - /// This format is also referred to as "binary" encoding. - /// - /// For legacy transactions, it encodes the RLP of the transaction into the buffer: - /// `rlp(tx-data)` - /// For EIP-2718 typed it encodes the type of the transaction followed by the rlp of the - /// transaction: `tx-type || rlp(tx-data)` - pub fn encode_enveloped(&self, out: &mut dyn bytes::BufMut) { - // The encoding of `tx-data` depends on the transaction type. Refer to these docs for more - // information on the exact format: - // - Legacy: TxLegacy::encode_with_signature - // - EIP-2930: TxEip2930::encode_with_signature - // - EIP-1559: TxEip1559::encode_with_signature - // - EIP-4844: BlobTransaction::encode_with_type_inner - // - EIP-7702: TxEip7702::encode_with_signature - match self { - Self::Legacy { transaction, signature, .. } => transaction - .encode_with_signature_fields( - &with_eip155_parity(signature, transaction.chain_id), - out, - ), - Self::Eip2930 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } - Self::Eip1559 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } - Self::Eip7702 { transaction, signature, .. } => { - transaction.encode_with_signature(signature, out, false) - } - Self::BlobTransaction(blob_tx) => { - // The inner encoding is used with `with_header` set to true, making the final - // encoding: - // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs]))` - blob_tx.encode_with_type_inner(out, false); - } - } - } - /// Returns true if the transaction is an EIP-4844 transaction. #[inline] pub const fn is_eip4844(&self) -> bool { @@ -481,73 +308,25 @@ impl PooledTransactionsElement { } impl Encodable for PooledTransactionsElement { - /// Encodes an enveloped post EIP-4844 [`PooledTransactionsElement`]. + /// This encodes the transaction _with_ the signature, and an rlp header. /// - /// For legacy transactions, this encodes the transaction as `rlp(tx-data)`. + /// For legacy transactions, it encodes the transaction data: + /// `rlp(tx-data)` /// - /// For EIP-2718 transactions, this encodes the transaction as `rlp(tx_type || rlp(tx-data)))`, - /// ___including__ the RLP-header for the entire transaction. + /// For EIP-2718 typed transactions, it encodes the transaction type followed by the rlp of the + /// transaction: + /// `rlp(tx-type || rlp(tx-data))` fn encode(&self, out: &mut dyn bytes::BufMut) { - // The encoding of `tx-data` depends on the transaction type. Refer to these docs for more - // information on the exact format: - // - Legacy: TxLegacy::encode_with_signature - // - EIP-2930: TxEip2930::encode_with_signature - // - EIP-1559: TxEip1559::encode_with_signature - // - EIP-4844: BlobTransaction::encode_with_type_inner - // - EIP-7702: TxEip7702::encode_with_signature - match self { - Self::Legacy { transaction, signature, .. } => transaction - .encode_with_signature_fields( - &with_eip155_parity(signature, transaction.chain_id), - out, - ), - Self::Eip2930 { transaction, signature, .. } => { - // encodes with string header - transaction.encode_with_signature(signature, out, true) - } - Self::Eip1559 { transaction, signature, .. } => { - // encodes with string header - transaction.encode_with_signature(signature, out, true) - } - Self::Eip7702 { transaction, signature, .. } => { - // encodes with string header - transaction.encode_with_signature(signature, out, true) - } - Self::BlobTransaction(blob_tx) => { - // The inner encoding is used with `with_header` set to true, making the final - // encoding: - // `rlp(tx_type || rlp([transaction_payload_body, blobs, commitments, proofs]))` - blob_tx.encode_with_type_inner(out, true); - } - } + self.network_encode(out); } fn length(&self) -> usize { - match self { - Self::Legacy { transaction, signature, .. } => { - // method computes the payload len with a RLP header - transaction.encoded_len_with_signature(&with_eip155_parity( - signature, - transaction.chain_id, - )) - } - Self::Eip2930 { transaction, signature, .. } => { - // method computes the payload len with a RLP header - transaction.encoded_len_with_signature(signature, true) - } - Self::Eip1559 { transaction, signature, .. } => { - // method computes the payload len with a RLP header - transaction.encoded_len_with_signature(signature, true) - } - Self::Eip7702 { transaction, signature, .. } => { - // method computes the payload len with a RLP header - transaction.encoded_len_with_signature(signature, true) - } - Self::BlobTransaction(blob_tx) => { - // the encoding uses a header, so we set `with_header` to true - blob_tx.payload_len_with_type(true) - } + let mut payload_length = self.encode_2718_len(); + if !self.is_legacy() { + payload_length += Header { list: false, payload_length }.length(); } + + payload_length } } @@ -581,23 +360,110 @@ impl Decodable for PooledTransactionsElement { // Check if the tx is a list if header.list { // decode as legacy transaction - let (transaction, hash, signature) = - TransactionSigned::decode_rlp_legacy_transaction_tuple(&mut original_encoding)?; + let tx = Self::fallback_decode(&mut original_encoding)?; // advance the buffer by however long the legacy transaction decoding advanced the // buffer *buf = original_encoding; - Ok(Self::Legacy { transaction, signature, hash }) + Ok(tx) } else { // decode the type byte, only decode BlobTransaction if it is a 4844 transaction let tx_type = *buf.first().ok_or(RlpError::InputTooShort)?; let remaining_len = buf.len(); - // Aadvance the buffer past the type byte + // Advance the buffer past the type byte buf.advance(1); - if tx_type == EIP4844_TX_TYPE_ID { + let tx = Self::typed_decode(tx_type, buf).map_err(RlpError::from)?; + + // check that the bytes consumed match the payload length + let bytes_consumed = remaining_len - buf.len(); + if bytes_consumed != header.payload_length { + return Err(RlpError::UnexpectedLength) + } + + Ok(tx) + } + } +} + +impl Encodable2718 for PooledTransactionsElement { + fn type_flag(&self) -> Option { + match self { + Self::Legacy { .. } => None, + Self::Eip2930 { .. } => Some(0x01), + Self::Eip1559 { .. } => Some(0x02), + Self::BlobTransaction { .. } => Some(0x03), + Self::Eip7702 { .. } => Some(0x04), + } + } + + fn encode_2718_len(&self) -> usize { + match self { + Self::Legacy { transaction, signature, .. } => { + // method computes the payload len with a RLP header + transaction.encoded_len_with_signature(&with_eip155_parity( + signature, + transaction.chain_id, + )) + } + Self::Eip2930 { transaction, signature, .. } => { + // method computes the payload len without a RLP header + transaction.encoded_len_with_signature(signature, false) + } + Self::Eip1559 { transaction, signature, .. } => { + // method computes the payload len without a RLP header + transaction.encoded_len_with_signature(signature, false) + } + Self::Eip7702 { transaction, signature, .. } => { + // method computes the payload len without a RLP header + transaction.encoded_len_with_signature(signature, false) + } + Self::BlobTransaction(blob_tx) => { + // the encoding does not use a header, so we set `with_header` to false + blob_tx.payload_len_with_type(false) + } + } + } + + fn encode_2718(&self, out: &mut dyn alloy_rlp::BufMut) { + // The encoding of `tx-data` depends on the transaction type. Refer to these docs for more + // information on the exact format: + // - Legacy: TxLegacy::encode_with_signature + // - EIP-2930: TxEip2930::encode_with_signature + // - EIP-1559: TxEip1559::encode_with_signature + // - EIP-4844: BlobTransaction::encode_with_type_inner + // - EIP-7702: TxEip7702::encode_with_signature + match self { + Self::Legacy { transaction, signature, .. } => transaction + .encode_with_signature_fields( + &with_eip155_parity(signature, transaction.chain_id), + out, + ), + Self::Eip2930 { transaction, signature, .. } => { + transaction.encode_with_signature(signature, out, false) + } + Self::Eip1559 { transaction, signature, .. } => { + transaction.encode_with_signature(signature, out, false) + } + Self::Eip7702 { transaction, signature, .. } => { + transaction.encode_with_signature(signature, out, false) + } + Self::BlobTransaction(blob_tx) => { + // The inner encoding is used with `with_header` set to true, making the final + // encoding: + // `tx_type || rlp([transaction_payload_body, blobs, commitments, proofs]))` + blob_tx.encode_with_type_inner(out, false); + } + } + } +} + +impl Decodable2718 for PooledTransactionsElement { + fn typed_decode(ty: u8, buf: &mut &[u8]) -> Eip2718Result { + match ty { + EIP4844_TX_TYPE_ID => { // Recall that the blob transaction response `TransactionPayload` is encoded like // this: `rlp([tx_payload_body, blobs, commitments, proofs])` // @@ -607,36 +473,23 @@ impl Decodable for PooledTransactionsElement { // This makes the full encoding: // `tx_type (0x03) || rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` - // Decode the inner blob transaction: + // Now, we decode the inner blob transaction: // `rlp([[chain_id, nonce, ...], blobs, commitments, proofs])` let blob_tx = BlobTransaction::decode_inner(buf)?; - - // check that the bytes consumed match the payload length - let bytes_consumed = remaining_len - buf.len(); - if bytes_consumed != header.payload_length { - return Err(RlpError::UnexpectedLength) - } - Ok(Self::BlobTransaction(blob_tx)) - } else { - let typed_tx = - TransactionSigned::typed_decode(tx_type, buf).map_err(RlpError::from)?; - - // check that the bytes consumed match the payload length - let bytes_consumed = remaining_len - buf.len(); - if bytes_consumed != header.payload_length { - return Err(RlpError::UnexpectedLength) - } + } + tx_type => { + let typed_tx = TransactionSigned::typed_decode(tx_type, buf)?; - // because we checked the tx type, we can be sure that the transaction is not a - // blob transaction or legacy match typed_tx.transaction { Transaction::Legacy(_) => Err(RlpError::Custom( - "legacy transactions should not be a result of EIP-2718 decoding", - )), + "legacy transactions should not be a result of typed decoding", + ).into()), + // because we checked the tx type, we can be sure that the transaction is not a + // blob transaction Transaction::Eip4844(_) => Err(RlpError::Custom( "EIP-4844 transactions can only be decoded with transaction type 0x03", - )), + ).into()), Transaction::Eip2930(tx) => Ok(Self::Eip2930 { transaction: tx, signature: typed_tx.signature, @@ -653,11 +506,19 @@ impl Decodable for PooledTransactionsElement { hash: typed_tx.hash, }), #[cfg(feature = "optimism")] - Transaction::Deposit(_) => Err(RlpError::Custom("Optimism deposit transaction cannot be decoded to PooledTransactionsElement")) + Transaction::Deposit(_) => Err(RlpError::Custom("Optimism deposit transaction cannot be decoded to PooledTransactionsElement").into()) } } } } + + fn fallback_decode(buf: &mut &[u8]) -> Eip2718Result { + // decode as legacy transaction + let (transaction, hash, signature) = + TransactionSigned::decode_rlp_legacy_transaction_tuple(buf)?; + + Ok(Self::Legacy { transaction, signature, hash }) + } } impl TryFrom for PooledTransactionsElement { @@ -773,6 +634,7 @@ mod tests { use super::*; use alloy_primitives::{address, hex}; use assert_matches::assert_matches; + use bytes::Bytes; #[test] fn invalid_legacy_pooled_decoding_input_too_short() { @@ -804,7 +666,7 @@ mod tests { // this is a legacy tx so we can attempt the same test with decode_enveloped let input_rlp = &mut &hex_data[..]; - let res = PooledTransactionsElement::decode_enveloped(input_rlp); + let res = PooledTransactionsElement::decode_2718(input_rlp); assert!( res.is_err(), @@ -820,7 +682,7 @@ mod tests { let data = hex!("02f903d382426882ba09832dc6c0848674742682ed9694714b6a4ea9b94a8a7d9fd362ed72630688c8898c80b90364492d24749189822d8512430d3f3ff7a2ede675ac08265c08e2c56ff6fdaa66dae1cdbe4a5d1d7809f3e99272d067364e597542ac0c369d69e22a6399c3e9bee5da4b07e3f3fdc34c32c3d88aa2268785f3e3f8086df0934b10ef92cfffc2e7f3d90f5e83302e31382e302d64657600000000000000000000000000000000000000000000569e75fc77c1a856f6daaf9e69d8a9566ca34aa47f9133711ce065a571af0cfd000000000000000000000000e1e210594771824dad216568b91c9cb4ceed361c00000000000000000000000000000000000000000000000000000000000546e00000000000000000000000000000000000000000000000000000000000e4e1c00000000000000000000000000000000000000000000000000000000065d6750c00000000000000000000000000000000000000000000000000000000000f288000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002cf600000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000000f1628e56fa6d8c50e5b984a58c0df14de31c7b857ce7ba499945b99252976a93d06dcda6776fc42167fbe71cb59f978f5ef5b12577a90b132d14d9c6efa528076f0161d7bf03643cfc5490ec5084f4a041db7f06c50bd97efa08907ba79ddcac8b890f24d12d8db31abbaaf18985d54f400449ee0559a4452afe53de5853ce090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000003e800000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000064ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000c080a01428023fc54a27544abc421d5d017b9a7c5936ad501cbdecd0d9d12d04c1a033a0753104bbf1c87634d6ff3f0ffa0982710612306003eb022363b57994bdef445a" ); - let res = PooledTransactionsElement::decode_enveloped(&mut &data[..]).unwrap(); + let res = PooledTransactionsElement::decode_2718(&mut &data[..]).unwrap(); assert_eq!( res.into_transaction().to(), Some(address!("714b6a4ea9b94a8a7d9fd362ed72630688c8898c")) @@ -854,7 +716,7 @@ mod tests { assert!(input_rlp.is_empty()); // we can also decode_enveloped - let res = PooledTransactionsElement::decode_enveloped(&mut &data[..]); + let res = PooledTransactionsElement::decode_2718(&mut &data[..]); assert_matches!(res, Ok(_tx)); } } diff --git a/crates/primitives/src/transaction/sidecar.rs b/crates/primitives/src/transaction/sidecar.rs index 52c3c68ef9db7..87b8c1fbf3e80 100644 --- a/crates/primitives/src/transaction/sidecar.rs +++ b/crates/primitives/src/transaction/sidecar.rs @@ -283,7 +283,10 @@ pub fn generate_blob_sidecar(blobs: Vec) -> BlobTransactionSidecar mod tests { use super::*; use crate::{kzg::Blob, PooledTransactionsElement}; - use alloy_eips::eip4844::Bytes48; + use alloy_eips::{ + eip2718::{Decodable2718, Encodable2718}, + eip4844::Bytes48, + }; use alloy_primitives::hex; use alloy_rlp::Encodable; use std::{fs, path::PathBuf, str::FromStr}; @@ -435,15 +438,15 @@ mod tests { let entry = entry.unwrap(); let content = fs::read_to_string(entry.path()).unwrap(); let raw = hex::decode(content.trim()).unwrap(); - let tx = PooledTransactionsElement::decode_enveloped(&mut raw.as_ref()) + let tx = PooledTransactionsElement::decode_2718(&mut raw.as_ref()) .map_err(|err| { panic!("Failed to decode transaction: {:?} {:?}", err, entry.path()); }) .unwrap(); // We want to test only EIP-4844 transactions assert!(tx.is_eip4844()); - let encoded = tx.envelope_encoded(); - assert_eq!(encoded.as_ref(), &raw[..], "{:?}", entry.path()); + let encoded = tx.encoded_2718(); + assert_eq!(encoded.as_slice(), &raw[..], "{:?}", entry.path()); } } } diff --git a/crates/primitives/src/transaction/util.rs b/crates/primitives/src/transaction/util.rs index 6205ec886ca09..7569400e94b46 100644 --- a/crates/primitives/src/transaction/util.rs +++ b/crates/primitives/src/transaction/util.rs @@ -1,6 +1,5 @@ use crate::Signature; use alloy_primitives::Address; -use revm_primitives::B256; #[cfg(feature = "secp256k1")] pub(crate) mod secp256k1 { @@ -20,8 +19,7 @@ mod impl_secp256k1 { ecdsa::{RecoverableSignature, RecoveryId}, Message, PublicKey, SecretKey, SECP256K1, }; - use alloy_primitives::{keccak256, Parity}; - use revm_primitives::U256; + use alloy_primitives::{keccak256, Parity, B256, U256}; /// Recovers the address of the sender using secp256k1 pubkey recovery. /// @@ -65,10 +63,9 @@ mod impl_secp256k1 { #[cfg_attr(feature = "secp256k1", allow(unused, unreachable_pub))] mod impl_k256 { use super::*; - use alloy_primitives::{keccak256, Parity}; + use alloy_primitives::{keccak256, Parity, B256, U256}; pub(crate) use k256::ecdsa::Error; use k256::ecdsa::{RecoveryId, SigningKey, VerifyingKey}; - use revm_primitives::U256; /// Recovers the address of the sender using secp256k1 pubkey recovery. /// @@ -117,11 +114,12 @@ mod impl_k256 { #[cfg(test)] mod tests { + use alloy_primitives::{keccak256, B256}; + #[cfg(feature = "secp256k1")] #[test] fn sanity_ecrecover_call_secp256k1() { use super::impl_secp256k1::*; - use revm_primitives::{keccak256, B256}; let (secret, public) = secp256k1::generate_keypair(&mut rand::thread_rng()); let signer = public_key_to_address(public); @@ -143,7 +141,6 @@ mod tests { #[test] fn sanity_ecrecover_call_k256() { use super::impl_k256::*; - use revm_primitives::{keccak256, B256}; let secret = k256::ecdsa::SigningKey::random(&mut rand::thread_rng()); let public = *secret.verifying_key(); @@ -165,7 +162,6 @@ mod tests { #[test] fn sanity_secp256k1_k256_compat() { use super::{impl_k256, impl_secp256k1}; - use revm_primitives::{keccak256, B256}; let (secp256k1_secret, secp256k1_public) = secp256k1::generate_keypair(&mut rand::thread_rng()); diff --git a/crates/prune/prune/src/segments/static_file/headers.rs b/crates/prune/prune/src/segments/static_file/headers.rs index a3daf504e667c..8700a653b1115 100644 --- a/crates/prune/prune/src/segments/static_file/headers.rs +++ b/crates/prune/prune/src/segments/static_file/headers.rs @@ -137,7 +137,7 @@ where } } -impl<'a, Provider> Iterator for HeaderTablesIter<'a, Provider> +impl Iterator for HeaderTablesIter<'_, Provider> where Provider: DBProvider, { diff --git a/crates/prune/types/src/limiter.rs b/crates/prune/types/src/limiter.rs index 09f0c4cd3f2d8..3a1059949300a 100644 --- a/crates/prune/types/src/limiter.rs +++ b/crates/prune/types/src/limiter.rs @@ -120,3 +120,270 @@ impl PruneLimiter { self.is_deleted_entries_limit_reached() || self.is_time_limit_reached() } } + +#[cfg(test)] +mod tests { + use super::*; + use std::thread::sleep; + + #[test] + fn test_prune_deleted_entries_limit_initial_state() { + let limit_tracker = PruneDeletedEntriesLimit::new(10); + // Limit should be set properly + assert_eq!(limit_tracker.limit, 10); + // No entries should be deleted + assert_eq!(limit_tracker.deleted, 0); + assert!(!limit_tracker.is_limit_reached()); + } + + #[test] + fn test_prune_deleted_entries_limit_is_limit_reached() { + // Test when the deleted entries are less than the limit + let mut limit_tracker = PruneDeletedEntriesLimit::new(5); + limit_tracker.deleted = 3; + assert!(!limit_tracker.is_limit_reached()); + + // Test when the deleted entries are equal to the limit + limit_tracker.deleted = 5; + assert!(limit_tracker.is_limit_reached()); + + // Test when the deleted entries exceed the limit + limit_tracker.deleted = 6; + assert!(limit_tracker.is_limit_reached()); + } + + #[test] + fn test_prune_time_limit_initial_state() { + let time_limit = PruneTimeLimit::new(Duration::from_secs(10)); + // The limit should be set correctly + assert_eq!(time_limit.limit, Duration::from_secs(10)); + // The elapsed time should be very small right after creation + assert!(time_limit.start.elapsed() < Duration::from_secs(1)); + // Limit should not be reached initially + assert!(!time_limit.is_limit_reached()); + } + + #[test] + fn test_prune_time_limit_is_limit_reached() { + let time_limit = PruneTimeLimit::new(Duration::from_millis(50)); + + // Simulate waiting for some time (less than the limit) + std::thread::sleep(Duration::from_millis(30)); + assert!(!time_limit.is_limit_reached()); + + // Simulate waiting for time greater than the limit + std::thread::sleep(Duration::from_millis(30)); + assert!(time_limit.is_limit_reached()); + } + + #[test] + fn test_set_deleted_entries_limit_initial_state() { + let pruner = PruneLimiter::default().set_deleted_entries_limit(100); + // The deleted_entries_limit should be set with the correct limit + assert!(pruner.deleted_entries_limit.is_some()); + let deleted_entries_limit = pruner.deleted_entries_limit.unwrap(); + assert_eq!(deleted_entries_limit.limit, 100); + // The deleted count should be initially zero + assert_eq!(deleted_entries_limit.deleted, 0); + // The limit should not be reached initially + assert!(!deleted_entries_limit.is_limit_reached()); + } + + #[test] + fn test_set_deleted_entries_limit_overwrite_existing() { + let mut pruner = PruneLimiter::default().set_deleted_entries_limit(50); + // Overwrite the existing limit + pruner = pruner.set_deleted_entries_limit(200); + + assert!(pruner.deleted_entries_limit.is_some()); + let deleted_entries_limit = pruner.deleted_entries_limit.unwrap(); + // Check that the limit has been overwritten correctly + assert_eq!(deleted_entries_limit.limit, 200); + // Deleted count should still be zero + assert_eq!(deleted_entries_limit.deleted, 0); + assert!(!deleted_entries_limit.is_limit_reached()); + } + + #[test] + fn test_set_deleted_entries_limit_when_limit_is_reached() { + let mut pruner = PruneLimiter::default().set_deleted_entries_limit(5); + assert!(pruner.deleted_entries_limit.is_some()); + let mut deleted_entries_limit = pruner.deleted_entries_limit.clone().unwrap(); + + // Simulate deletion of entries + deleted_entries_limit.deleted = 5; + assert!(deleted_entries_limit.is_limit_reached()); + + // Overwrite the limit and check if it resets correctly + pruner = pruner.set_deleted_entries_limit(10); + deleted_entries_limit = pruner.deleted_entries_limit.unwrap(); + assert_eq!(deleted_entries_limit.limit, 10); + // Deletion count should reset + assert_eq!(deleted_entries_limit.deleted, 0); + assert!(!deleted_entries_limit.is_limit_reached()); + } + + #[test] + fn test_floor_deleted_entries_limit_to_multiple_of() { + let limiter = PruneLimiter::default().set_deleted_entries_limit(15); + let denominator = NonZeroUsize::new(4).unwrap(); + + // Floor limit to the largest multiple of 4 less than or equal to 15 (that is 12) + let updated_limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator); + assert_eq!(updated_limiter.deleted_entries_limit.unwrap().limit, 12); + + // Test when the limit is already a multiple of the denominator + let limiter = PruneLimiter::default().set_deleted_entries_limit(16); + let updated_limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator); + assert_eq!(updated_limiter.deleted_entries_limit.unwrap().limit, 16); + + // Test when there's no limit set (should not panic) + let limiter = PruneLimiter::default(); + let updated_limiter = limiter.floor_deleted_entries_limit_to_multiple_of(denominator); + assert!(updated_limiter.deleted_entries_limit.is_none()); + } + + #[test] + fn test_is_deleted_entries_limit_reached() { + // Limit is not set, should return false + let limiter = PruneLimiter::default(); + assert!(!limiter.is_deleted_entries_limit_reached()); + + // Limit is set but not reached, should return false + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.deleted_entries_limit.as_mut().unwrap().deleted = 5; + // 5 entries deleted out of 10 + assert!(!limiter.is_deleted_entries_limit_reached()); + + // Limit is reached, should return true + limiter.deleted_entries_limit.as_mut().unwrap().deleted = 10; + // 10 entries deleted out of 10 + assert!(limiter.is_deleted_entries_limit_reached()); + + // Deleted entries exceed the limit, should return true + limiter.deleted_entries_limit.as_mut().unwrap().deleted = 12; + // 12 entries deleted out of 10 + assert!(limiter.is_deleted_entries_limit_reached()); + } + + #[test] + fn test_increment_deleted_entries_count_by() { + // Increment when no limit is set + let mut limiter = PruneLimiter::default(); + limiter.increment_deleted_entries_count_by(5); + assert_eq!(limiter.deleted_entries_limit.as_ref().map(|l| l.deleted), None); // Still None + + // Increment when limit is set + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.increment_deleted_entries_count_by(3); + assert_eq!(limiter.deleted_entries_limit.as_ref().unwrap().deleted, 3); // Now 3 deleted + + // Increment again + limiter.increment_deleted_entries_count_by(2); + assert_eq!(limiter.deleted_entries_limit.as_ref().unwrap().deleted, 5); // Now 5 deleted + } + + #[test] + fn test_increment_deleted_entries_count() { + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(5); + assert_eq!(limiter.deleted_entries_limit.as_ref().unwrap().deleted, 0); // Initially 0 + + limiter.increment_deleted_entries_count(); // Increment by 1 + assert_eq!(limiter.deleted_entries_limit.as_ref().unwrap().deleted, 1); // Now 1 + } + + #[test] + fn test_deleted_entries_limit_left() { + // Test when limit is set and some entries are deleted + let mut limiter = PruneLimiter::default().set_deleted_entries_limit(10); + limiter.increment_deleted_entries_count_by(3); // Simulate 3 deleted entries + assert_eq!(limiter.deleted_entries_limit_left(), Some(7)); // 10 - 3 = 7 + + // Test when no entries are deleted + limiter = PruneLimiter::default().set_deleted_entries_limit(5); + assert_eq!(limiter.deleted_entries_limit_left(), Some(5)); // 5 - 0 = 5 + + // Test when limit is reached + limiter.increment_deleted_entries_count_by(5); // Simulate deleting 5 entries + assert_eq!(limiter.deleted_entries_limit_left(), Some(0)); // 5 - 5 = 0 + + // Test when limit is not set + limiter = PruneLimiter::default(); // No limit set + assert_eq!(limiter.deleted_entries_limit_left(), None); // Should be None + } + + #[test] + fn test_set_time_limit() { + // Create a PruneLimiter instance with no time limit set + let mut limiter = PruneLimiter::default(); + + // Set a time limit of 5 seconds + limiter = limiter.set_time_limit(Duration::new(5, 0)); + + // Verify that the time limit is set correctly + assert!(limiter.time_limit.is_some()); + let time_limit = limiter.time_limit.as_ref().unwrap(); + assert_eq!(time_limit.limit, Duration::new(5, 0)); + // Ensure the start time is recent + assert!(time_limit.start.elapsed() < Duration::new(1, 0)); + } + + #[test] + fn test_is_time_limit_reached() { + // Create a PruneLimiter instance and set a time limit of 10 milliseconds + let mut limiter = PruneLimiter::default(); + + // Time limit should not be reached initially + assert!(!limiter.is_time_limit_reached(), "Time limit should not be reached yet"); + + limiter = limiter.set_time_limit(Duration::new(0, 10_000_000)); // 10 milliseconds + + // Sleep for 5 milliseconds (less than the time limit) + sleep(Duration::new(0, 5_000_000)); // 5 milliseconds + assert!(!limiter.is_time_limit_reached(), "Time limit should not be reached yet"); + + // Sleep for an additional 10 milliseconds (totaling 15 milliseconds) + sleep(Duration::new(0, 10_000_000)); // 10 milliseconds + assert!(limiter.is_time_limit_reached(), "Time limit should be reached now"); + } + + #[test] + fn test_is_limit_reached() { + // Create a PruneLimiter instance + let mut limiter = PruneLimiter::default(); + + // Test when no limits are set + assert!(!limiter.is_limit_reached(), "Limit should not be reached with no limits set"); + + // Set a deleted entries limit + limiter = limiter.set_deleted_entries_limit(5); + assert!( + !limiter.is_limit_reached(), + "Limit should not be reached when deleted entries are less than limit" + ); + + // Increment deleted entries count to reach the limit + limiter.increment_deleted_entries_count_by(5); + assert!( + limiter.is_limit_reached(), + "Limit should be reached when deleted entries equal the limit" + ); + + // Reset the limiter + limiter = PruneLimiter::default(); + + // Set a time limit and check + limiter = limiter.set_time_limit(Duration::new(0, 10_000_000)); // 10 milliseconds + + // Sleep for 5 milliseconds (less than the time limit) + sleep(Duration::new(0, 5_000_000)); // 5 milliseconds + assert!( + !limiter.is_limit_reached(), + "Limit should not be reached when time limit not reached" + ); + + // Sleep for another 10 milliseconds (totaling 15 milliseconds) + sleep(Duration::new(0, 10_000_000)); // 10 milliseconds + assert!(limiter.is_limit_reached(), "Limit should be reached when time limit is reached"); + } +} diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index d42ec49599076..813997c72d11b 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -11,7 +11,8 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, + TrieInput, }; /// Mock state for testing @@ -102,6 +103,15 @@ impl StorageRootProvider for StateProviderTest { ) -> ProviderResult { unimplemented!("storage root is not supported") } + + fn storage_proof( + &self, + _address: Address, + _slot: B256, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + unimplemented!("proof generation is not supported") + } } impl StateProofProvider for StateProviderTest { diff --git a/crates/rpc/rpc-api/src/debug.rs b/crates/rpc/rpc-api/src/debug.rs index 4ea8c0f0e4dfe..3e03210f1ffd6 100644 --- a/crates/rpc/rpc-api/src/debug.rs +++ b/crates/rpc/rpc-api/src/debug.rs @@ -140,11 +140,8 @@ pub trait DebugApi { /// The first argument is the block number or block hash. The second argument is a boolean /// indicating whether to include the preimages of keys in the response. #[method(name = "executionWitness")] - async fn debug_execution_witness( - &self, - block: BlockNumberOrTag, - include_preimages: bool, - ) -> RpcResult; + async fn debug_execution_witness(&self, block: BlockNumberOrTag) + -> RpcResult; /// Sets the logging backtrace location. When a backtrace location is set and a log message is /// emitted at that location, the stack of the goroutine executing the log statement will diff --git a/crates/rpc/rpc-builder/src/lib.rs b/crates/rpc/rpc-builder/src/lib.rs index 113cb93c4630d..e49105ab7d7d4 100644 --- a/crates/rpc/rpc-builder/src/lib.rs +++ b/crates/rpc/rpc-builder/src/lib.rs @@ -1832,6 +1832,13 @@ impl TransportRpcModules { } } + /// Removes the given methods from the configured http methods. + pub fn remove_http_methods(&mut self, methods: impl IntoIterator) { + for name in methods { + self.remove_http_method(name); + } + } + /// Removes the method with the given name from the configured ws methods. /// /// Returns `true` if the method was found and removed, `false` otherwise. @@ -1847,6 +1854,13 @@ impl TransportRpcModules { } } + /// Removes the given methods from the configured ws methods. + pub fn remove_ws_methods(&mut self, methods: impl IntoIterator) { + for name in methods { + self.remove_ws_method(name); + } + } + /// Removes the method with the given name from the configured ipc methods. /// /// Returns `true` if the method was found and removed, `false` otherwise. @@ -1862,6 +1876,13 @@ impl TransportRpcModules { } } + /// Removes the given methods from the configured ipc methods. + pub fn remove_ipc_methods(&mut self, methods: impl IntoIterator) { + for name in methods { + self.remove_ipc_method(name); + } + } + /// Removes the method with the given name from all configured transports. /// /// Returns `true` if the method was found and removed, `false` otherwise. @@ -1872,6 +1893,56 @@ impl TransportRpcModules { http_removed || ws_removed || ipc_removed } + + /// Replace the given [Methods] in the configured http methods. + /// + /// Fails if any of the methods in other is present already or if the method being removed is + /// not present + /// + /// Returns [Ok(false)] if no http transport is configured. + pub fn replace_http(&mut self, other: impl Into) -> Result { + let other = other.into(); + self.remove_http_methods(other.method_names()); + self.merge_http(other) + } + + /// Replace the given [Methods] in the configured ipc methods. + /// + /// Fails if any of the methods in other is present already or if the method being removed is + /// not present + /// + /// Returns [Ok(false)] if no ipc transport is configured. + pub fn replace_ipc(&mut self, other: impl Into) -> Result { + let other = other.into(); + self.remove_ipc_methods(other.method_names()); + self.merge_ipc(other) + } + + /// Replace the given [Methods] in the configured ws methods. + /// + /// Fails if any of the methods in other is present already or if the method being removed is + /// not present + /// + /// Returns [Ok(false)] if no ws transport is configured. + pub fn replace_ws(&mut self, other: impl Into) -> Result { + let other = other.into(); + self.remove_ws_methods(other.method_names()); + self.merge_ws(other) + } + + /// Replaces the method with the given name from all configured transports. + /// + /// Returns `true` if the method was found and replaced, `false` otherwise + pub fn replace_configured( + &mut self, + other: impl Into, + ) -> Result { + let other = other.into(); + self.replace_http(other.clone())?; + self.replace_ws(other.clone())?; + self.replace_ipc(other)?; + Ok(true) + } } /// A handle to the spawned servers. @@ -2121,81 +2192,152 @@ mod tests { ) } - mod remove_methods { - use super::*; + fn create_test_module() -> RpcModule<()> { + let mut module = RpcModule::new(()); + module.register_method("anything", |_, _, _| "succeed").unwrap(); + module + } - fn create_test_module() -> RpcModule<()> { - let mut module = RpcModule::new(()); - module.register_method("anything", |_, _, _| "succeed").unwrap(); - module - } + #[test] + fn test_remove_http_method() { + let mut modules = + TransportRpcModules { http: Some(create_test_module()), ..Default::default() }; + // Remove a method that exists + assert!(modules.remove_http_method("anything")); + + // Remove a method that does not exist + assert!(!modules.remove_http_method("non_existent_method")); - #[test] - fn test_remove_http_method() { - let mut modules = - TransportRpcModules { http: Some(create_test_module()), ..Default::default() }; - // Remove a method that exists - assert!(modules.remove_http_method("anything")); + // Verify that the method was removed + assert!(modules.http.as_ref().unwrap().method("anything").is_none()); + } - // Remove a method that does not exist - assert!(!modules.remove_http_method("non_existent_method")); + #[test] + fn test_remove_ws_method() { + let mut modules = + TransportRpcModules { ws: Some(create_test_module()), ..Default::default() }; - // Verify that the method was removed - assert!(modules.http.as_ref().unwrap().method("anything").is_none()); - } + // Remove a method that exists + assert!(modules.remove_ws_method("anything")); - #[test] - fn test_remove_ws_method() { - let mut modules = - TransportRpcModules { ws: Some(create_test_module()), ..Default::default() }; + // Remove a method that does not exist + assert!(!modules.remove_ws_method("non_existent_method")); - // Remove a method that exists - assert!(modules.remove_ws_method("anything")); + // Verify that the method was removed + assert!(modules.ws.as_ref().unwrap().method("anything").is_none()); + } - // Remove a method that does not exist - assert!(!modules.remove_ws_method("non_existent_method")); + #[test] + fn test_remove_ipc_method() { + let mut modules = + TransportRpcModules { ipc: Some(create_test_module()), ..Default::default() }; - // Verify that the method was removed - assert!(modules.ws.as_ref().unwrap().method("anything").is_none()); - } + // Remove a method that exists + assert!(modules.remove_ipc_method("anything")); - #[test] - fn test_remove_ipc_method() { - let mut modules = - TransportRpcModules { ipc: Some(create_test_module()), ..Default::default() }; + // Remove a method that does not exist + assert!(!modules.remove_ipc_method("non_existent_method")); - // Remove a method that exists - assert!(modules.remove_ipc_method("anything")); + // Verify that the method was removed + assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); + } - // Remove a method that does not exist - assert!(!modules.remove_ipc_method("non_existent_method")); + #[test] + fn test_remove_method_from_configured() { + let mut modules = TransportRpcModules { + http: Some(create_test_module()), + ws: Some(create_test_module()), + ipc: Some(create_test_module()), + ..Default::default() + }; - // Verify that the method was removed - assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); - } + // Remove a method that exists + assert!(modules.remove_method_from_configured("anything")); - #[test] - fn test_remove_method_from_configured() { - let mut modules = TransportRpcModules { - http: Some(create_test_module()), - ws: Some(create_test_module()), - ipc: Some(create_test_module()), - ..Default::default() - }; + // Remove a method that was just removed (it does not exist anymore) + assert!(!modules.remove_method_from_configured("anything")); - // Remove a method that exists - assert!(modules.remove_method_from_configured("anything")); + // Remove a method that does not exist + assert!(!modules.remove_method_from_configured("non_existent_method")); - // Remove a method that was just removed (it does not exist anymore) - assert!(!modules.remove_method_from_configured("anything")); + // Verify that the method was removed from all transports + assert!(modules.http.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_none()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); + } - // Remove a method that does not exist - assert!(!modules.remove_method_from_configured("non_existent_method")); + #[test] + fn test_replace_http_method() { + let mut modules = + TransportRpcModules { http: Some(create_test_module()), ..Default::default() }; - // Verify that the method was removed from all transports - assert!(modules.http.as_ref().unwrap().method("anything").is_none()); - assert!(modules.ws.as_ref().unwrap().method("anything").is_none()); - assert!(modules.ipc.as_ref().unwrap().method("anything").is_none()); - } + let mut other_module = RpcModule::new(()); + other_module.register_method("something", |_, _, _| "fails").unwrap(); + + assert!(modules.replace_http(other_module.clone()).unwrap()); + + assert!(modules.http.as_ref().unwrap().method("something").is_some()); + + other_module.register_method("anything", |_, _, _| "fails").unwrap(); + assert!(modules.replace_http(other_module.clone()).unwrap()); + + assert!(modules.http.as_ref().unwrap().method("anything").is_some()); + } + #[test] + fn test_replace_ipc_method() { + let mut modules = + TransportRpcModules { ipc: Some(create_test_module()), ..Default::default() }; + + let mut other_module = RpcModule::new(()); + other_module.register_method("something", |_, _, _| "fails").unwrap(); + + assert!(modules.replace_ipc(other_module.clone()).unwrap()); + + assert!(modules.ipc.as_ref().unwrap().method("something").is_some()); + + other_module.register_method("anything", |_, _, _| "fails").unwrap(); + assert!(modules.replace_ipc(other_module.clone()).unwrap()); + + assert!(modules.ipc.as_ref().unwrap().method("anything").is_some()); + } + #[test] + fn test_replace_ws_method() { + let mut modules = + TransportRpcModules { ws: Some(create_test_module()), ..Default::default() }; + + let mut other_module = RpcModule::new(()); + other_module.register_method("something", |_, _, _| "fails").unwrap(); + + assert!(modules.replace_ws(other_module.clone()).unwrap()); + + assert!(modules.ws.as_ref().unwrap().method("something").is_some()); + + other_module.register_method("anything", |_, _, _| "fails").unwrap(); + assert!(modules.replace_ws(other_module.clone()).unwrap()); + + assert!(modules.ws.as_ref().unwrap().method("anything").is_some()); + } + + #[test] + fn test_replace_configured() { + let mut modules = TransportRpcModules { + http: Some(create_test_module()), + ws: Some(create_test_module()), + ipc: Some(create_test_module()), + ..Default::default() + }; + let mut other_module = RpcModule::new(()); + other_module.register_method("something", |_, _, _| "fails").unwrap(); + + assert!(modules.replace_configured(other_module).unwrap()); + + // Verify that the other_method was added + assert!(modules.http.as_ref().unwrap().method("something").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("something").is_some()); + assert!(modules.ws.as_ref().unwrap().method("something").is_some()); + + assert!(modules.http.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ipc.as_ref().unwrap().method("anything").is_some()); + assert!(modules.ws.as_ref().unwrap().method("anything").is_some()); } } diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 4463d375a0345..00503f2c1dd7f 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -55,7 +55,4 @@ reth-testing-utils.workspace = true alloy-rlp.workspace = true -assert_matches.workspace = true - -[features] -optimism = ["reth-primitives/optimism"] +assert_matches.workspace = true \ No newline at end of file diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 907297de1776b..252808c14a77f 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -464,48 +464,59 @@ where } /// Called to retrieve execution payload bodies by hashes. - fn get_payload_bodies_by_hash_with( + async fn get_payload_bodies_by_hash_with( &self, hashes: Vec, f: F, ) -> EngineApiResult>> where - F: Fn(Block) -> R, + F: Fn(Block) -> R + Send + 'static, + R: Send + 'static, { let len = hashes.len() as u64; if len > MAX_PAYLOAD_BODIES_LIMIT { - return Err(EngineApiError::PayloadRequestTooLarge { len }) + return Err(EngineApiError::PayloadRequestTooLarge { len }); } - let mut result = Vec::with_capacity(hashes.len()); - for hash in hashes { - let block = self - .inner - .provider - .block(BlockHashOrNumber::Hash(hash)) - .map_err(|err| EngineApiError::Internal(Box::new(err)))?; - result.push(block.map(&f)); - } + let (tx, rx) = oneshot::channel(); + let inner = self.inner.clone(); - Ok(result) + self.inner.task_spawner.spawn_blocking(Box::pin(async move { + let mut result = Vec::with_capacity(hashes.len()); + for hash in hashes { + let block_result = inner.provider.block(BlockHashOrNumber::Hash(hash)); + match block_result { + Ok(block) => { + result.push(block.map(&f)); + } + Err(err) => { + let _ = tx.send(Err(EngineApiError::Internal(Box::new(err)))); + return; + } + } + } + tx.send(Ok(result)).ok(); + })); + + rx.await.map_err(|err| EngineApiError::Internal(Box::new(err)))? } /// Called to retrieve execution payload bodies by hashes. - pub fn get_payload_bodies_by_hash_v1( + pub async fn get_payload_bodies_by_hash_v1( &self, hashes: Vec, ) -> EngineApiResult { - self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v1) + self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v1).await } /// Called to retrieve execution payload bodies by hashes. /// /// Same as [`Self::get_payload_bodies_by_hash_v1`] but as [`ExecutionPayloadBodiesV2`]. - pub fn get_payload_bodies_by_hash_v2( + pub async fn get_payload_bodies_by_hash_v2( &self, hashes: Vec, ) -> EngineApiResult { - self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v2) + self.get_payload_bodies_by_hash_with(hashes, convert_to_payload_body_v2).await } /// Called to verify network configuration parameters and ensure that Consensus and Execution @@ -832,7 +843,7 @@ where let start = Instant::now(); let res = Self::get_payload_bodies_by_hash_v1(self, block_hashes); self.inner.metrics.latency.get_payload_bodies_by_hash_v1.record(start.elapsed()); - Ok(res?) + Ok(res.await?) } async fn get_payload_bodies_by_hash_v2( @@ -843,7 +854,7 @@ where let start = Instant::now(); let res = Self::get_payload_bodies_by_hash_v2(self, block_hashes); self.inner.metrics.latency.get_payload_bodies_by_hash_v2.record(start.elapsed()); - Ok(res?) + Ok(res.await?) } /// Handler for `engine_getPayloadBodiesByRangeV1` @@ -1147,7 +1158,7 @@ mod tests { .collect::>(); let hashes = blocks.iter().map(|b| b.hash()).collect(); - let res = api.get_payload_bodies_by_hash_v1(hashes).unwrap(); + let res = api.get_payload_bodies_by_hash_v1(hashes).await.unwrap(); assert_eq!(res, expected); } } diff --git a/crates/rpc/rpc-eth-api/Cargo.toml b/crates/rpc/rpc-eth-api/Cargo.toml index 23dd46baecf19..e59ee39a694b4 100644 --- a/crates/rpc/rpc-eth-api/Cargo.toml +++ b/crates/rpc/rpc-eth-api/Cargo.toml @@ -59,8 +59,3 @@ tracing.workspace = true [features] js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] client = ["jsonrpsee/client", "jsonrpsee/async-client"] -optimism = [ - "reth-primitives/optimism", - "revm/optimism", - "reth-provider/optimism", -] diff --git a/crates/rpc/rpc-eth-api/src/helpers/block.rs b/crates/rpc/rpc-eth-api/src/helpers/block.rs index 3a26536cc7cf7..d7e081d80f807 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/block.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/block.rs @@ -6,7 +6,7 @@ use alloy_rpc_types::{Header, Index}; use futures::Future; use reth_primitives::{BlockId, Receipt, SealedBlock, SealedBlockWithSenders}; use reth_provider::{BlockIdReader, BlockReader, BlockReaderIdExt, HeaderProvider}; -use reth_rpc_eth_types::{EthApiError, EthStateCache}; +use reth_rpc_eth_types::EthStateCache; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; use crate::{FromEthApiError, FullEthApiTypes, RpcBlock, RpcReceipt}; @@ -47,13 +47,20 @@ pub trait EthBlocks: LoadBlock { async move { let Some(block) = self.block_with_senders(block_id).await? else { return Ok(None) }; let block_hash = block.hash(); - let total_difficulty = EthBlocks::provider(self) + let mut total_difficulty = EthBlocks::provider(self) .header_td_by_number(block.number) - .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(block_id))?; + .map_err(Self::Error::from_eth_err)?; + if total_difficulty.is_none() { + // if we failed to find td after we successfully loaded the block, try again using + // the hash this only matters if the chain is currently transitioning the merge block and there's a reorg: + total_difficulty = EthBlocks::provider(self) + .header_td(&block.hash()) + .map_err(Self::Error::from_eth_err)?; + } + let block = from_block::( block.unseal(), - total_difficulty, + total_difficulty.unwrap_or_default(), full.into(), Some(block_hash), ) diff --git a/crates/rpc/rpc-eth-api/src/helpers/call.rs b/crates/rpc/rpc-eth-api/src/helpers/call.rs index 8d34020d67bcd..5bc7d73b2221a 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/call.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/call.rs @@ -28,8 +28,8 @@ use reth_rpc_eth_types::{ cache::db::{StateCacheDbRefMutWrapper, StateProviderTraitObjWrapper}, error::ensure_success, revm_utils::{ - apply_block_overrides, apply_state_overrides, caller_gas_allowance, - cap_tx_gas_limit_with_caller_allowance, get_precompiles, CallFees, + apply_block_overrides, apply_state_overrides, caller_gas_allowance, get_precompiles, + CallFees, }, simulate::{self, EthSimulateError}, EthApiError, RevertError, RpcInvalidTransactionError, StateCacheDb, @@ -256,7 +256,6 @@ pub trait EthCall: Call + LoadPendingBlock { )?; let block = block.ok_or(EthApiError::HeaderNotFound(target_block))?; - let gas_limit = self.call_gas_limit(); // we're essentially replaying the transactions in the block here, hence we need the // state that points to the beginning of the block, which is the state at @@ -302,14 +301,7 @@ pub trait EthCall: Call + LoadPendingBlock { let overrides = EvmOverrides::new(state_overrides, block_overrides.clone()); let env = this - .prepare_call_env( - cfg.clone(), - block_env.clone(), - tx, - gas_limit, - &mut db, - overrides, - ) + .prepare_call_env(cfg.clone(), block_env.clone(), tx, &mut db, overrides) .map(Into::into)?; let (res, _) = this.transact(&mut db, env)?; @@ -387,8 +379,9 @@ pub trait EthCall: Call + LoadPendingBlock { let mut db = CacheDB::new(StateProviderDatabase::new(state)); if request.gas.is_none() && env.tx.gas_price > U256::ZERO { + let cap = caller_gas_allowance(&mut db, &env.tx)?; // no gas limit was provided in the request, so we need to cap the request's gas limit - cap_tx_gas_limit_with_caller_allowance(&mut db, &mut env.tx)?; + env.tx.gas_limit = cap.min(env.block.gas_limit).saturating_to(); } let from = request.from.unwrap_or_default(); @@ -559,14 +552,7 @@ pub trait Call: LoadState + SpawnBlocking { let mut db = CacheDB::new(StateProviderDatabase::new(StateProviderTraitObjWrapper(&state))); - let env = this.prepare_call_env( - cfg, - block_env, - request, - this.call_gas_limit(), - &mut db, - overrides, - )?; + let env = this.prepare_call_env(cfg, block_env, request, &mut db, overrides)?; f(StateCacheDbRefMutWrapper(&mut db), env) }) @@ -731,6 +717,7 @@ pub trait Call: LoadState + SpawnBlocking { // Keep a copy of gas related request values let tx_request_gas_limit = request.gas; let tx_request_gas_price = request.gas_price; + // the gas limit of the corresponding block let block_env_gas_limit = block.gas_limit; // Determine the highest possible gas limit, considering both the request's specified limit @@ -1038,7 +1025,14 @@ pub trait Call: LoadState + SpawnBlocking { block_env.get_blob_gasprice().map(U256::from), )?; - let gas_limit = gas.unwrap_or_else(|| block_env.gas_limit.min(U256::from(u64::MAX)).to()); + let gas_limit = gas.unwrap_or_else(|| { + // Use maximum allowed gas limit. The reason for this + // is that both Erigon and Geth use pre-configured gas cap even if + // it's possible to derive the gas limit from the block: + // + block_env.gas_limit.saturating_to() + }); #[allow(clippy::needless_update)] let env = TxEnv { @@ -1080,7 +1074,7 @@ pub trait Call: LoadState + SpawnBlocking { Ok(EnvWithHandlerCfg::new_with_cfg_env(cfg, block, tx)) } - /// Prepares the [`EnvWithHandlerCfg`] for execution. + /// Prepares the [`EnvWithHandlerCfg`] for execution of calls. /// /// Does not commit any changes to the underlying database. /// @@ -1092,14 +1086,12 @@ pub trait Call: LoadState + SpawnBlocking { /// - `disable_base_fee` is set to `true` /// - `nonce` is set to `None` /// - /// Additionally, the block gas limit so that higher tx gas limits can be used in `eth_call`. - /// - `disable_block_gas_limit` is set to `true` + /// In addition, this changes the block's gas limit to the configured [`Self::call_gas_limit`]. fn prepare_call_env( &self, mut cfg: CfgEnvWithHandlerCfg, mut block: BlockEnv, mut request: TransactionRequest, - gas_limit: u64, db: &mut CacheDB, overrides: EvmOverrides, ) -> Result @@ -1107,9 +1099,15 @@ pub trait Call: LoadState + SpawnBlocking { DB: DatabaseRef, EthApiError: From<::Error>, { - // we want to disable this in eth_call, since this is common practice used by other node - // impls and providers - cfg.disable_block_gas_limit = true; + if request.gas > Some(self.call_gas_limit()) { + // configured gas exceeds limit + return Err( + EthApiError::InvalidTransaction(RpcInvalidTransactionError::GasTooHigh).into() + ) + } + + // apply configured gas cap + block.gas_limit = U256::from(self.call_gas_limit()); // Disabled because eth_call is sometimes used with eoa senders // See @@ -1138,15 +1136,9 @@ pub trait Call: LoadState + SpawnBlocking { if env.tx.gas_price > U256::ZERO { // If gas price is specified, cap transaction gas limit with caller allowance trace!(target: "rpc::eth::call", ?env, "Applying gas limit cap with caller allowance"); - cap_tx_gas_limit_with_caller_allowance(db, &mut env.tx)?; - } else { - // If no gas price is specified, use maximum allowed gas limit. The reason for this - // is that both Erigon and Geth use pre-configured gas cap even if - // it's possible to derive the gas limit from the block: - // - trace!(target: "rpc::eth::call", ?env, "Applying gas limit cap as the maximum gas limit"); - env.tx.gas_limit = gas_limit; + let cap = caller_gas_allowance(db, &env.tx)?; + // ensure we cap gas_limit to the block's + env.tx.gas_limit = cap.min(env.block.gas_limit).saturating_to(); } } diff --git a/crates/rpc/rpc-eth-api/src/helpers/state.rs b/crates/rpc/rpc-eth-api/src/helpers/state.rs index dee4e58956385..d601e43d90a81 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/state.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/state.rs @@ -9,11 +9,12 @@ use reth_errors::RethError; use reth_evm::ConfigureEvmEnv; use reth_primitives::{BlockId, Header, KECCAK_EMPTY}; use reth_provider::{ - BlockIdReader, ChainSpecProvider, StateProvider, StateProviderBox, StateProviderFactory, + BlockIdReader, BlockNumReader, ChainSpecProvider, StateProvider, StateProviderBox, + StateProviderFactory, }; use reth_rpc_eth_types::{EthApiError, EthStateCache, PendingBlockEnv, RpcInvalidTransactionError}; use reth_rpc_types_compat::proof::from_primitive_account_proof; -use reth_transaction_pool::{PoolTransaction, TransactionPool}; +use reth_transaction_pool::TransactionPool; use revm_primitives::{BlockEnv, CfgEnvWithHandlerCfg, SpecId}; use crate::{EthApiTypes, FromEthApiError}; @@ -92,25 +93,26 @@ pub trait EthState: LoadState + SpawnBlocking { where Self: EthApiSpec, { - let chain_info = self.chain_info().map_err(Self::Error::from_eth_err)?; - let block_id = block_id.unwrap_or_default(); - - // Check whether the distance to the block exceeds the maximum configured window. - let block_number = LoadState::provider(self) - .block_number_for_id(block_id) - .map_err(Self::Error::from_eth_err)? - .ok_or(EthApiError::HeaderNotFound(block_id))?; - let max_window = self.max_proof_window(); - if chain_info.best_number.saturating_sub(block_number) > max_window { - return Err(EthApiError::ExceedsMaxProofWindow.into()) - } - Ok(async move { let _permit = self .acquire_owned() .await .map_err(RethError::other) .map_err(EthApiError::Internal)?; + + let chain_info = self.chain_info().map_err(Self::Error::from_eth_err)?; + let block_id = block_id.unwrap_or_default(); + + // Check whether the distance to the block exceeds the maximum configured window. + let block_number = LoadState::provider(self) + .block_number_for_id(block_id) + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(block_id))?; + let max_window = self.max_proof_window(); + if chain_info.best_number.saturating_sub(block_number) > max_window { + return Err(EthApiError::ExceedsMaxProofWindow.into()) + } + self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(block_id)?; let storage_keys = keys.iter().map(|key| key.0).collect::>(); @@ -131,10 +133,21 @@ pub trait EthState: LoadState + SpawnBlocking { ) -> impl Future, Self::Error>> + Send { self.spawn_blocking_io(move |this| { let state = this.state_at_block_id(block_id)?; - let account = state.basic_account(address).map_err(Self::Error::from_eth_err)?; let Some(account) = account else { return Ok(None) }; + // Check whether the distance to the block exceeds the maximum configured proof window. + let chain_info = + LoadState::provider(&this).chain_info().map_err(Self::Error::from_eth_err)?; + let block_number = LoadState::provider(&this) + .block_number_for_id(block_id) + .map_err(Self::Error::from_eth_err)? + .ok_or(EthApiError::HeaderNotFound(block_id))?; + let max_window = this.max_proof_window(); + if chain_info.best_number.saturating_sub(block_number) > max_window { + return Err(EthApiError::ExceedsMaxProofWindow.into()) + } + let balance = account.balance; let nonce = account.nonce; let code_hash = account.bytecode_hash.unwrap_or(KECCAK_EMPTY); @@ -280,23 +293,23 @@ pub trait LoadState: EthApiTypes { if block_id == Some(BlockId::pending()) { // for pending tag we need to find the highest nonce in the pool - let address_txs = this.pool().get_transactions_by_sender(address); - if let Some(highest_pool_nonce) = - address_txs.iter().map(|item| item.transaction.nonce()).max() + if let Some(highest_pool_tx) = + this.pool().get_highest_transaction_by_sender(address) { - // and the corresponding txcount is nonce + 1 - let next_nonce = - nonce.max(highest_pool_nonce).checked_add(1).ok_or_else(|| { - Self::Error::from(EthApiError::InvalidTransaction( - RpcInvalidTransactionError::NonceMaxValue, - )) - })?; - - let tx_count = nonce.max(next_nonce); - return Ok(U256::from(tx_count)) + { + // and the corresponding txcount is nonce + 1 + let next_nonce = + nonce.max(highest_pool_tx.nonce()).checked_add(1).ok_or_else(|| { + Self::Error::from(EthApiError::InvalidTransaction( + RpcInvalidTransactionError::NonceMaxValue, + )) + })?; + + let tx_count = nonce.max(next_nonce); + return Ok(U256::from(tx_count)); + } } } - Ok(U256::from(nonce)) }) } diff --git a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs index d98cb69bfc30d..c25b18c2f67b4 100644 --- a/crates/rpc/rpc-eth-api/src/helpers/transaction.rs +++ b/crates/rpc/rpc-eth-api/src/helpers/transaction.rs @@ -22,8 +22,7 @@ use reth_transaction_pool::{PoolTransaction, TransactionOrigin, TransactionPool} use crate::{FromEthApiError, FullEthApiTypes, IntoEthApiError, RpcReceipt, RpcTransaction}; use super::{ - Call, EthApiSpec, EthSigner, LoadBlock, LoadFee, LoadPendingBlock, LoadReceipt, LoadState, - SpawnBlocking, + Call, EthApiSpec, EthSigner, LoadBlock, LoadPendingBlock, LoadReceipt, LoadState, SpawnBlocking, }; /// Transaction related functions for the [`EthApiServer`](crate::EthApiServer) trait in @@ -98,7 +97,7 @@ pub trait EthTransactions: LoadTransaction { async move { // Note: this is mostly used to fetch pooled transactions so we check the pool first if let Some(tx) = - self.pool().get_pooled_transaction_element(hash).map(|tx| tx.envelope_encoded()) + self.pool().get_pooled_transaction_element(hash).map(|tx| tx.encoded_2718().into()) { return Ok(Some(tx)) } @@ -231,7 +230,7 @@ pub trait EthTransactions: LoadTransaction { LoadState::pool(self).get_transaction_by_sender_and_nonce(sender, nonce) { let transaction = tx.transaction.clone().into_consensus(); - return Ok(Some(from_recovered::(transaction))); + return Ok(Some(from_recovered::(transaction.into()))); } } @@ -324,7 +323,7 @@ pub trait EthTransactions: LoadTransaction { async move { let recovered = recover_raw_transaction(tx.clone())?; let pool_transaction = - ::Transaction::from_pooled(recovered); + ::Transaction::from_pooled(recovered.into()); // submit the transaction to the pool with a `Local` origin let hash = self @@ -344,7 +343,7 @@ pub trait EthTransactions: LoadTransaction { mut request: TransactionRequest, ) -> impl Future> + Send where - Self: EthApiSpec + LoadBlock + LoadPendingBlock + LoadFee + Call, + Self: EthApiSpec + LoadBlock + LoadPendingBlock + Call, { async move { let from = match request.from { @@ -376,7 +375,7 @@ pub trait EthTransactions: LoadTransaction { let recovered = signed_tx.into_ecrecovered().ok_or(EthApiError::InvalidTransactionSignature)?; - let pool_transaction = <::Pool as TransactionPool>::Transaction::try_from_consensus(recovered).map_err(|_| EthApiError::TransactionConversionError)?; + let pool_transaction = <::Pool as TransactionPool>::Transaction::try_from_consensus(recovered.into()).map_err(|_| EthApiError::TransactionConversionError)?; // submit the transaction to the pool with a `Local` origin let hash = LoadTransaction::pool(self) @@ -518,7 +517,7 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { if let Some(tx) = self.pool().get(&hash).map(|tx| tx.transaction.clone().into_consensus()) { - resp = Some(TransactionSource::Pool(tx)); + resp = Some(TransactionSource::Pool(tx.into())); } } @@ -566,7 +565,7 @@ pub trait LoadTransaction: SpawnBlocking + FullEthApiTypes { .get_block_with_senders(block_hash) .await .map_err(Self::Error::from_eth_err)?; - Ok(block.map(|block| (transaction, block.seal(block_hash)))) + Ok(block.map(|block| (transaction, (*block).clone().seal(block_hash)))) } } } diff --git a/crates/rpc/rpc-eth-types/src/cache/db.rs b/crates/rpc/rpc-eth-types/src/cache/db.rs index ad9804893a706..7422dcfb8a7b2 100644 --- a/crates/rpc/rpc-eth-types/src/cache/db.rs +++ b/crates/rpc/rpc-eth-types/src/cache/db.rs @@ -20,7 +20,7 @@ pub type StateCacheDb<'a> = CacheDB(pub &'a dyn StateProvider); -impl<'a> reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'a> { +impl reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'_> { fn state_root( &self, hashed_state: reth_trie::HashedPostState, @@ -50,7 +50,7 @@ impl<'a> reth_storage_api::StateRootProvider for StateProviderTraitObjWrapper<'a } } -impl<'a> reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'a> { +impl reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper<'_> { fn storage_root( &self, address: Address, @@ -58,9 +58,18 @@ impl<'a> reth_storage_api::StorageRootProvider for StateProviderTraitObjWrapper< ) -> ProviderResult { self.0.storage_root(address, hashed_storage) } + + fn storage_proof( + &self, + address: Address, + slot: B256, + hashed_storage: HashedStorage, + ) -> ProviderResult { + self.0.storage_proof(address, slot, hashed_storage) + } } -impl<'a> reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'a> { +impl reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<'_> { fn proof( &self, input: reth_trie::TrieInput, @@ -88,7 +97,7 @@ impl<'a> reth_storage_api::StateProofProvider for StateProviderTraitObjWrapper<' } } -impl<'a> reth_storage_api::AccountReader for StateProviderTraitObjWrapper<'a> { +impl reth_storage_api::AccountReader for StateProviderTraitObjWrapper<'_> { fn basic_account( &self, address: revm_primitives::Address, @@ -97,7 +106,7 @@ impl<'a> reth_storage_api::AccountReader for StateProviderTraitObjWrapper<'a> { } } -impl<'a> reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'a> { +impl reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'_> { fn block_hash( &self, block_number: alloy_primitives::BlockNumber, @@ -121,7 +130,7 @@ impl<'a> reth_storage_api::BlockHashReader for StateProviderTraitObjWrapper<'a> } } -impl<'a> StateProvider for StateProviderTraitObjWrapper<'a> { +impl StateProvider for StateProviderTraitObjWrapper<'_> { fn account_balance( &self, addr: revm_primitives::Address, @@ -164,7 +173,7 @@ impl<'a> StateProvider for StateProviderTraitObjWrapper<'a> { #[allow(missing_debug_implementations)] pub struct StateCacheDbRefMutWrapper<'a, 'b>(pub &'b mut StateCacheDb<'a>); -impl<'a, 'b> Database for StateCacheDbRefMutWrapper<'a, 'b> { +impl<'a> Database for StateCacheDbRefMutWrapper<'a, '_> { type Error = as Database>::Error; fn basic( &mut self, @@ -190,7 +199,7 @@ impl<'a, 'b> Database for StateCacheDbRefMutWrapper<'a, 'b> { } } -impl<'a, 'b> DatabaseRef for StateCacheDbRefMutWrapper<'a, 'b> { +impl<'a> DatabaseRef for StateCacheDbRefMutWrapper<'a, '_> { type Error = as Database>::Error; fn basic_ref( diff --git a/crates/rpc/rpc-eth-types/src/cache/mod.rs b/crates/rpc/rpc-eth-types/src/cache/mod.rs index fac611571aca1..e8353ecc4e166 100644 --- a/crates/rpc/rpc-eth-types/src/cache/mod.rs +++ b/crates/rpc/rpc-eth-types/src/cache/mod.rs @@ -38,7 +38,8 @@ type BlockTransactionsResponseSender = oneshot::Sender>>>; /// The type that can send the response to a requested [`BlockWithSenders`] -type BlockWithSendersResponseSender = oneshot::Sender>>; +type BlockWithSendersResponseSender = + oneshot::Sender>>>; /// The type that can send the response to the requested receipts of a block. type ReceiptsResponseSender = oneshot::Sender>>>>; @@ -48,7 +49,7 @@ type EnvResponseSender = oneshot::Sender = MultiConsumerLruCache< B256, - BlockWithSenders, + Arc, L, Either, >; @@ -151,7 +152,7 @@ impl EthStateCache { rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)?; if let Ok(Some(block_with_senders)) = block_with_senders_res { - Ok(Some(block_with_senders.block)) + Ok(Some(block_with_senders.block.clone())) } else { Ok(None) } @@ -186,7 +187,7 @@ impl EthStateCache { Ok(self .get_block_with_senders(block_hash) .await? - .map(|block| block.into_transactions_ecrecovered().collect())) + .map(|block| (*block).clone().into_transactions_ecrecovered().collect())) } /// Fetches both transactions and receipts for the given block hash. @@ -208,7 +209,7 @@ impl EthStateCache { pub async fn get_block_with_senders( &self, block_hash: B256, - ) -> ProviderResult> { + ) -> ProviderResult>> { let (response_tx, rx) = oneshot::channel(); let _ = self.to_service.send(CacheAction::GetBlockWithSenders { block_hash, response_tx }); rx.await.map_err(|_| ProviderError::CacheServiceUnavailable)? @@ -221,7 +222,10 @@ impl EthStateCache { &self, block_hash: B256, ) -> ProviderResult> { - Ok(self.get_block_with_senders(block_hash).await?.map(|block| block.seal(block_hash))) + Ok(self + .get_block_with_senders(block_hash) + .await? + .map(|block| (*block).clone().seal(block_hash))) } /// Requests the [Receipt] for the block hash @@ -288,7 +292,7 @@ pub(crate) struct EthStateCacheService< LimitReceipts = ByLength, LimitEnvs = ByLength, > where - LimitBlocks: Limiter, + LimitBlocks: Limiter>, LimitReceipts: Limiter>>, LimitEnvs: Limiter, { @@ -318,7 +322,11 @@ where Tasks: TaskSpawner + Clone + 'static, EvmConfig: ConfigureEvm
, { - fn on_new_block(&mut self, block_hash: B256, res: ProviderResult>) { + fn on_new_block( + &mut self, + block_hash: B256, + res: ProviderResult>>, + ) { if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { @@ -328,7 +336,7 @@ where } Either::Right(transaction_tx) => { let _ = transaction_tx.send(res.clone().map(|maybe_block| { - maybe_block.map(|block| block.block.body.transactions) + maybe_block.map(|block| block.block.body.transactions.clone()) })); } } @@ -360,6 +368,7 @@ where } fn on_reorg_block(&mut self, block_hash: B256, res: ProviderResult>) { + let res = res.map(|b| b.map(Arc::new)); if let Some(queued) = self.full_block_cache.remove(&block_hash) { // send the response to queued senders for tx in queued { @@ -369,7 +378,7 @@ where } Either::Right(transaction_tx) => { let _ = transaction_tx.send(res.clone().map(|maybe_block| { - maybe_block.map(|block| block.block.body.transactions) + maybe_block.map(|block| block.block.body.transactions.clone()) })); } } @@ -431,10 +440,12 @@ where let _permit = rate_limiter.acquire().await; // Only look in the database to prevent situations where we // looking up the tree is blocking - let block_sender = provider.block_with_senders( - BlockHashOrNumber::Hash(block_hash), - TransactionVariant::WithHash, - ); + let block_sender = provider + .block_with_senders( + BlockHashOrNumber::Hash(block_hash), + TransactionVariant::WithHash, + ) + .map(|maybe_block| maybe_block.map(Arc::new)); let _ = action_tx.send(CacheAction::BlockWithSendersResult { block_hash, res: block_sender, @@ -459,10 +470,12 @@ where let _permit = rate_limiter.acquire().await; // Only look in the database to prevent situations where we // looking up the tree is blocking - let res = provider.block_with_senders( - BlockHashOrNumber::Hash(block_hash), - TransactionVariant::WithHash, - ); + let res = provider + .block_with_senders( + BlockHashOrNumber::Hash(block_hash), + TransactionVariant::WithHash, + ) + .map(|b| b.map(Arc::new)); let _ = action_tx.send(CacheAction::BlockWithSendersResult { block_hash, res, @@ -561,7 +574,7 @@ where } CacheAction::CacheNewCanonicalChain { chain_change } => { for block in chain_change.blocks { - this.on_new_block(block.hash(), Ok(Some(block.unseal()))); + this.on_new_block(block.hash(), Ok(Some(Arc::new(block.unseal())))); } for block_receipts in chain_change.receipts { @@ -601,7 +614,7 @@ enum CacheAction { GetBlockTransactions { block_hash: B256, response_tx: BlockTransactionsResponseSender }, GetEnv { block_hash: B256, response_tx: EnvResponseSender }, GetReceipts { block_hash: B256, response_tx: ReceiptsResponseSender }, - BlockWithSendersResult { block_hash: B256, res: ProviderResult> }, + BlockWithSendersResult { block_hash: B256, res: ProviderResult>> }, ReceiptsResult { block_hash: B256, res: ProviderResult>>> }, EnvResult { block_hash: B256, res: Box> }, CacheNewCanonicalChain { chain_change: ChainChange }, diff --git a/crates/rpc/rpc-eth-types/src/error.rs b/crates/rpc/rpc-eth-types/src/error.rs index fbb93164ce9f5..212fca36d9c9a 100644 --- a/crates/rpc/rpc-eth-types/src/error.rs +++ b/crates/rpc/rpc-eth-types/src/error.rs @@ -365,7 +365,7 @@ pub enum RpcInvalidTransactionError { PrecompileOutOfGas(u64), /// An operand to an opcode was invalid or out of range. /// Contains the gas limit. - #[error("out of gas: invalid operand to an opcode; {0}")] + #[error("out of gas: invalid operand to an opcode: {0}")] InvalidOperandOutOfGas(u64), /// Thrown if executing a transaction failed during estimate/call #[error(transparent)] @@ -488,8 +488,14 @@ impl From for RpcInvalidTransactionError { InvalidTransaction::InvalidChainId => Self::InvalidChainId, InvalidTransaction::PriorityFeeGreaterThanMaxFee => Self::TipAboveFeeCap, InvalidTransaction::GasPriceLessThanBasefee => Self::FeeCapTooLow, - InvalidTransaction::CallerGasLimitMoreThanBlock | - InvalidTransaction::CallGasCostMoreThanGasLimit => Self::GasTooHigh, + InvalidTransaction::CallerGasLimitMoreThanBlock => { + // tx.gas > block.gas_limit + Self::GasTooHigh + } + InvalidTransaction::CallGasCostMoreThanGasLimit => { + // tx.gas < cost + Self::GasTooLow + } InvalidTransaction::RejectCallerWithCode => Self::SenderNoEOA, InvalidTransaction::LackOfFundForMaxFee { fee, balance } => { Self::InsufficientFunds { cost: *fee, balance: *balance } diff --git a/crates/rpc/rpc-eth-types/src/lib.rs b/crates/rpc/rpc-eth-types/src/lib.rs index fba893c15f590..fa36dae4c8810 100644 --- a/crates/rpc/rpc-eth-types/src/lib.rs +++ b/crates/rpc/rpc-eth-types/src/lib.rs @@ -36,7 +36,6 @@ pub use gas_oracle::{ GasCap, GasPriceOracle, GasPriceOracleConfig, GasPriceOracleResult, RPC_DEFAULT_GAS_CAP, }; pub use id_provider::EthSubscriptionIdProvider; -pub use logs_utils::EthFilterError; pub use pending_block::{PendingBlock, PendingBlockEnv, PendingBlockEnvOrigin}; pub use receipt::ReceiptBuilder; pub use transaction::TransactionSource; diff --git a/crates/rpc/rpc-eth-types/src/logs_utils.rs b/crates/rpc/rpc-eth-types/src/logs_utils.rs index bb44dc0e6669b..c64bbe055b79d 100644 --- a/crates/rpc/rpc-eth-types/src/logs_utils.rs +++ b/crates/rpc/rpc-eth-types/src/logs_utils.rs @@ -3,64 +3,12 @@ //! Log parsing for building filter. use alloy_primitives::TxHash; -use alloy_rpc_types::{FilterId, FilteredParams, Log}; +use alloy_rpc_types::{FilteredParams, Log}; use reth_chainspec::ChainInfo; use reth_errors::ProviderError; -use reth_primitives::{BlockNumHash, Receipt}; -use reth_rpc_server_types::result::rpc_error_with_code; +use reth_primitives::{BlockNumHash, Receipt, SealedBlock}; use reth_storage_api::BlockReader; -use crate::EthApiError; - -/// Errors that can occur in the handler implementation -#[derive(Debug, thiserror::Error)] -pub enum EthFilterError { - /// Filter not found. - #[error("filter not found")] - FilterNotFound(FilterId), - /// Invalid block range. - #[error("invalid block range params")] - InvalidBlockRangeParams, - /// Query scope is too broad. - #[error("query exceeds max block range {0}")] - QueryExceedsMaxBlocks(u64), - /// Query result is too large. - #[error("query exceeds max results {0}")] - QueryExceedsMaxResults(usize), - /// Error serving request in `eth_` namespace. - #[error(transparent)] - EthAPIError(#[from] EthApiError), - /// Error thrown when a spawned task failed to deliver a response. - #[error("internal filter error")] - InternalError, -} - -// convert the error -impl From for jsonrpsee_types::error::ErrorObject<'static> { - fn from(err: EthFilterError) -> Self { - match err { - EthFilterError::FilterNotFound(_) => { - rpc_error_with_code(jsonrpsee_types::error::INVALID_PARAMS_CODE, "filter not found") - } - err @ EthFilterError::InternalError => { - rpc_error_with_code(jsonrpsee_types::error::INTERNAL_ERROR_CODE, err.to_string()) - } - EthFilterError::EthAPIError(err) => err.into(), - err @ (EthFilterError::InvalidBlockRangeParams | - EthFilterError::QueryExceedsMaxBlocks(_) | - EthFilterError::QueryExceedsMaxResults(_)) => { - rpc_error_with_code(jsonrpsee_types::error::INVALID_PARAMS_CODE, err.to_string()) - } - } - } -} - -impl From for EthFilterError { - fn from(err: ProviderError) -> Self { - Self::EthAPIError(err.into()) - } -} - /// Returns all matching of a block's receipts when the transaction hashes are known. pub fn matching_block_logs_with_tx_hashes<'a, I>( filter: &FilteredParams, @@ -97,23 +45,32 @@ where all_logs } +/// Helper enum to fetch a transaction either from a block or from the provider. +#[derive(Debug)] +pub enum ProviderOrBlock<'a, P: BlockReader> { + /// Provider + Provider(&'a P), + /// [`SealedBlock`] + Block(SealedBlock), +} + /// Appends all matching logs of a block's receipts. /// If the log matches, look up the corresponding transaction hash. -pub fn append_matching_block_logs( +pub fn append_matching_block_logs( all_logs: &mut Vec, - provider: impl BlockReader, + provider_or_block: ProviderOrBlock<'_, P>, filter: &FilteredParams, block_num_hash: BlockNumHash, receipts: &[Receipt], removed: bool, block_timestamp: u64, -) -> Result<(), EthFilterError> { +) -> Result<(), ProviderError> { // Tracks the index of a log in the entire block. let mut log_index: u64 = 0; // Lazy loaded number of the first transaction in the block. - // This is useful for blocks with multiple matching logs because it prevents - // re-querying the block body indices. + // This is useful for blocks with multiple matching logs because it + // prevents re-querying the block body indices. let mut loaded_first_tx_num = None; // Iterate over receipts and append matching logs. @@ -123,27 +80,37 @@ pub fn append_matching_block_logs( for log in &receipt.logs { if log_matches_filter(block_num_hash, log, filter) { - let first_tx_num = match loaded_first_tx_num { - Some(num) => num, - None => { - let block_body_indices = - provider.block_body_indices(block_num_hash.number)?.ok_or( - ProviderError::BlockBodyIndicesNotFound(block_num_hash.number), - )?; - loaded_first_tx_num = Some(block_body_indices.first_tx_num); - block_body_indices.first_tx_num - } - }; - // if this is the first match in the receipt's logs, look up the transaction hash if transaction_hash.is_none() { - // This is safe because Transactions and Receipts have the same keys. - let transaction_id = first_tx_num + receipt_idx as u64; - let transaction = provider - .transaction_by_id(transaction_id)? - .ok_or_else(|| ProviderError::TransactionNotFound(transaction_id.into()))?; - - transaction_hash = Some(transaction.hash()); + transaction_hash = match &provider_or_block { + ProviderOrBlock::Block(block) => { + block.body.transactions.get(receipt_idx).map(|t| t.hash()) + } + ProviderOrBlock::Provider(provider) => { + let first_tx_num = match loaded_first_tx_num { + Some(num) => num, + None => { + let block_body_indices = provider + .block_body_indices(block_num_hash.number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound( + block_num_hash.number, + ))?; + loaded_first_tx_num = Some(block_body_indices.first_tx_num); + block_body_indices.first_tx_num + } + }; + + // This is safe because Transactions and Receipts have the same + // keys. + let transaction_id = first_tx_num + receipt_idx as u64; + let transaction = + provider.transaction_by_id(transaction_id)?.ok_or_else(|| { + ProviderError::TransactionNotFound(transaction_id.into()) + })?; + + Some(transaction.hash()) + } + }; } let log = Log { diff --git a/crates/rpc/rpc-eth-types/src/revm_utils.rs b/crates/rpc/rpc-eth-types/src/revm_utils.rs index f6fbdc2f7ac8b..25c54fd467772 100644 --- a/crates/rpc/rpc-eth-types/src/revm_utils.rs +++ b/crates/rpc/rpc-eth-types/src/revm_utils.rs @@ -23,25 +23,15 @@ pub fn get_precompiles(spec_id: SpecId) -> impl IntoIterator { Precompiles::new(spec).addresses().copied().map(Address::from) } -/// Caps the configured [`TxEnv`] `gas_limit` with the allowance of the caller. -pub fn cap_tx_gas_limit_with_caller_allowance(db: &mut DB, env: &mut TxEnv) -> EthResult<()> -where - DB: Database, - EthApiError: From<::Error>, -{ - if let Ok(gas_limit) = caller_gas_allowance(db, env)?.try_into() { - env.gas_limit = gas_limit; - } - - Ok(()) -} - /// Calculates the caller gas allowance. /// /// `allowance = (account.balance - tx.value) / tx.gas_price` /// /// Returns an error if the caller has insufficient funds. /// Caution: This assumes non-zero `env.gas_price`. Otherwise, zero allowance will be returned. +/// +/// Note: this takes the mut [Database] trait because the loaded sender can be reused for the +/// following operation like `eth_call`. pub fn caller_gas_allowance(db: &mut DB, env: &TxEnv) -> EthResult where DB: Database, diff --git a/crates/rpc/rpc-eth-types/src/utils.rs b/crates/rpc/rpc-eth-types/src/utils.rs index bb7c3d64846ee..596acc74ce1ee 100644 --- a/crates/rpc/rpc-eth-types/src/utils.rs +++ b/crates/rpc/rpc-eth-types/src/utils.rs @@ -1,5 +1,6 @@ //! Commonly used code snippets +use alloy_eips::eip2718::Decodable2718; use alloy_primitives::Bytes; use reth_primitives::{PooledTransactionsElement, PooledTransactionsElementEcRecovered}; use std::future::Future; @@ -8,13 +9,13 @@ use super::{EthApiError, EthResult}; /// Recovers a [`PooledTransactionsElementEcRecovered`] from an enveloped encoded byte stream. /// -/// See [`PooledTransactionsElement::decode_enveloped`] +/// See [`Decodable2718::decode_2718`] pub fn recover_raw_transaction(data: Bytes) -> EthResult { if data.is_empty() { return Err(EthApiError::EmptyRawTransactionData) } - let transaction = PooledTransactionsElement::decode_enveloped(&mut data.as_ref()) + let transaction = PooledTransactionsElement::decode_2718(&mut data.as_ref()) .map_err(|_| EthApiError::FailedToDecodeSignedTransaction)?; transaction.try_into_ecrecovered().or(Err(EthApiError::InvalidTransactionSignature)) diff --git a/crates/rpc/rpc-layer/src/auth_layer.rs b/crates/rpc/rpc-layer/src/auth_layer.rs index 41ebce32dfb35..cdca181cbd061 100644 --- a/crates/rpc/rpc-layer/src/auth_layer.rs +++ b/crates/rpc/rpc-layer/src/auth_layer.rs @@ -176,7 +176,7 @@ mod tests { missing_jwt_error().await; wrong_jwt_signature_error().await; invalid_issuance_timestamp_error().await; - jwt_decode_error().await; + jwt_decode_error().await } async fn valid_jwt() { diff --git a/crates/rpc/rpc-testing-util/src/debug.rs b/crates/rpc/rpc-testing-util/src/debug.rs index cdcb454a2486e..f50064e80ce95 100644 --- a/crates/rpc/rpc-testing-util/src/debug.rs +++ b/crates/rpc/rpc-testing-util/src/debug.rs @@ -304,7 +304,7 @@ impl<'a> DebugTraceTransactionsStream<'a> { } } -impl<'a> Stream for DebugTraceTransactionsStream<'a> { +impl Stream for DebugTraceTransactionsStream<'_> { type Item = TraceTransactionResult; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -312,7 +312,7 @@ impl<'a> Stream for DebugTraceTransactionsStream<'a> { } } -impl<'a> std::fmt::Debug for DebugTraceTransactionsStream<'a> { +impl std::fmt::Debug for DebugTraceTransactionsStream<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DebugTraceTransactionsStream").finish_non_exhaustive() } @@ -336,7 +336,7 @@ impl<'a> DebugTraceBlockStream<'a> { } } -impl<'a> Stream for DebugTraceBlockStream<'a> { +impl Stream for DebugTraceBlockStream<'_> { type Item = DebugTraceBlockResult; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -344,7 +344,7 @@ impl<'a> Stream for DebugTraceBlockStream<'a> { } } -impl<'a> std::fmt::Debug for DebugTraceBlockStream<'a> { +impl std::fmt::Debug for DebugTraceBlockStream<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DebugTraceBlockStream").finish_non_exhaustive() } diff --git a/crates/rpc/rpc-testing-util/src/trace.rs b/crates/rpc/rpc-testing-util/src/trace.rs index 13914a59eb3eb..c6dc16cf10635 100644 --- a/crates/rpc/rpc-testing-util/src/trace.rs +++ b/crates/rpc/rpc-testing-util/src/trace.rs @@ -114,7 +114,7 @@ pub struct TraceCallStream<'a> { stream: Pin + 'a>>, } -impl<'a> Stream for TraceCallStream<'a> { +impl Stream for TraceCallStream<'_> { type Item = TraceCallResult; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -122,7 +122,7 @@ impl<'a> Stream for TraceCallStream<'a> { } } -impl<'a> std::fmt::Debug for TraceCallStream<'a> { +impl std::fmt::Debug for TraceCallStream<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TraceCallStream").finish() } @@ -134,7 +134,7 @@ pub struct TraceFilterStream<'a> { stream: Pin + 'a>>, } -impl<'a> Stream for TraceFilterStream<'a> { +impl Stream for TraceFilterStream<'_> { type Item = TraceFilterResult; /// Attempts to pull out the next value of the stream. @@ -143,7 +143,7 @@ impl<'a> Stream for TraceFilterStream<'a> { } } -impl<'a> std::fmt::Debug for TraceFilterStream<'a> { +impl std::fmt::Debug for TraceFilterStream<'_> { /// Provides a debug representation of the `TraceFilterStream`. fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TraceFilterStream").finish_non_exhaustive() @@ -157,7 +157,7 @@ pub struct TraceGetStream<'a> { stream: Pin + 'a>>, } -impl<'a> Stream for TraceGetStream<'a> { +impl Stream for TraceGetStream<'_> { type Item = TraceGetResult; /// Attempts to pull out the next item of the stream @@ -166,7 +166,7 @@ impl<'a> Stream for TraceGetStream<'a> { } } -impl<'a> std::fmt::Debug for TraceGetStream<'a> { +impl std::fmt::Debug for TraceGetStream<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TraceGetStream").finish_non_exhaustive() } @@ -180,7 +180,7 @@ pub struct CallManyTraceStream<'a> { stream: Pin + 'a>>, } -impl<'a> Stream for CallManyTraceStream<'a> { +impl Stream for CallManyTraceStream<'_> { type Item = CallManyTraceResult; /// Polls for the next item from the stream. @@ -189,7 +189,7 @@ impl<'a> Stream for CallManyTraceStream<'a> { } } -impl<'a> std::fmt::Debug for CallManyTraceStream<'a> { +impl std::fmt::Debug for CallManyTraceStream<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("CallManyTraceStream").finish() } @@ -201,7 +201,7 @@ pub struct RawTransactionTraceStream<'a> { stream: RawTransactionTraceResult<'a>, } -impl<'a> Stream for RawTransactionTraceStream<'a> { +impl Stream for RawTransactionTraceStream<'_> { type Item = Result<(TraceResults, Bytes), (RpcError, Bytes)>; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -209,7 +209,7 @@ impl<'a> Stream for RawTransactionTraceStream<'a> { } } -impl<'a> std::fmt::Debug for RawTransactionTraceStream<'a> { +impl std::fmt::Debug for RawTransactionTraceStream<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("RawTransactionTraceStream").finish() } @@ -221,7 +221,7 @@ pub struct ReplayTransactionStream<'a> { stream: Pin + 'a>>, } -impl<'a> Stream for ReplayTransactionStream<'a> { +impl Stream for ReplayTransactionStream<'_> { type Item = ReplayTransactionResult; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -229,7 +229,7 @@ impl<'a> Stream for ReplayTransactionStream<'a> { } } -impl<'a> std::fmt::Debug for ReplayTransactionStream<'a> { +impl std::fmt::Debug for ReplayTransactionStream<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ReplayTransactionStream").finish() } @@ -393,7 +393,7 @@ impl<'a> TraceBlockStream<'a> { } } -impl<'a> Stream for TraceBlockStream<'a> { +impl Stream for TraceBlockStream<'_> { type Item = TraceBlockResult; fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { @@ -401,7 +401,7 @@ impl<'a> Stream for TraceBlockStream<'a> { } } -impl<'a> std::fmt::Debug for TraceBlockStream<'a> { +impl std::fmt::Debug for TraceBlockStream<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("TraceBlockStream").finish_non_exhaustive() } diff --git a/crates/rpc/rpc-types-compat/Cargo.toml b/crates/rpc/rpc-types-compat/Cargo.toml index a9d82d95779c8..81b4def204f98 100644 --- a/crates/rpc/rpc-types-compat/Cargo.toml +++ b/crates/rpc/rpc-types-compat/Cargo.toml @@ -26,7 +26,4 @@ alloy-serde.workspace = true alloy-rpc-types-engine.workspace = true [dev-dependencies] -serde_json.workspace = true - -[features] -optimism = ["reth-primitives/optimism"] +serde_json.workspace = true \ No newline at end of file diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 4665cd002ca4e..5399e50ce28a0 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -100,8 +100,6 @@ jsonrpsee = { workspace = true, features = ["client"] } js-tracer = ["revm-inspectors/js-tracer", "reth-rpc-eth-types/js-tracer"] optimism = [ "reth-primitives/optimism", - "reth-rpc-types-compat/optimism", "reth-provider/optimism", - "reth-rpc-eth-api/optimism", "reth-revm/optimism", ] diff --git a/crates/rpc/rpc/src/debug.rs b/crates/rpc/rpc/src/debug.rs index d7ee43720c199..fb04070397768 100644 --- a/crates/rpc/rpc/src/debug.rs +++ b/crates/rpc/rpc/src/debug.rs @@ -208,8 +208,8 @@ where .ok_or(EthApiError::HeaderNotFound(block_id))?; let ((cfg, block_env, _), block) = futures::try_join!( - self.inner.eth_api.evm_env_at(block_hash.into()), - self.inner.eth_api.block_with_senders(block_id), + self.eth_api().evm_env_at(block_hash.into()), + self.eth_api().block_with_senders(block_hash.into()), )?; let block = block.ok_or(EthApiError::HeaderNotFound(block_id))?; @@ -235,11 +235,11 @@ where tx_hash: B256, opts: GethDebugTracingOptions, ) -> Result { - let (transaction, block) = match self.inner.eth_api.transaction_and_block(tx_hash).await? { + let (transaction, block) = match self.eth_api().transaction_and_block(tx_hash).await? { None => return Err(EthApiError::TransactionNotFound.into()), Some(res) => res, }; - let (cfg, block_env, _) = self.inner.eth_api.evm_env_at(block.hash().into()).await?; + let (cfg, block_env, _) = self.eth_api().evm_env_at(block.hash().into()).await?; // we need to get the state of the parent block because we're essentially replaying the // block the transaction is included in @@ -248,8 +248,7 @@ where let block_txs = block.into_transactions_ecrecovered(); let this = self.clone(); - self.inner - .eth_api + self.eth_api() .spawn_with_state_at_block(state_at, move |state| { // configure env for the target transaction let tx = transaction.into_recovered(); @@ -312,8 +311,7 @@ where GethDebugBuiltInTracerType::FourByteTracer => { let mut inspector = FourByteInspector::default(); let inspector = self - .inner - .eth_api + .eth_api() .spawn_with_call_at(call, at, overrides, move |db, env| { this.eth_api().inspect(db, env, &mut inspector)?; Ok(inspector) @@ -331,8 +329,7 @@ where ); let frame = self - .inner - .eth_api + .eth_api() .spawn_with_call_at(call, at, overrides, move |db, env| { let (res, env) = this.eth_api().inspect(db, env, &mut inspector)?; let frame = inspector @@ -353,8 +350,7 @@ where ); let frame = self - .inner - .eth_api + .eth_api() .spawn_with_call_at(call, at, overrides, move |db, env| { // wrapper is hack to get around 'higher-ranked lifetime error', // see @@ -434,11 +430,10 @@ where GethDebugTracerType::JsTracer(code) => { let config = tracer_config.into_json(); - let (_, _, at) = self.inner.eth_api.evm_env_at(at).await?; + let (_, _, at) = self.eth_api().evm_env_at(at).await?; let res = self - .inner - .eth_api + .eth_api() .spawn_with_call_at(call, at, overrides, move |db, env| { // wrapper is hack to get around 'higher-ranked lifetime error', see // @@ -464,8 +459,7 @@ where let mut inspector = TracingInspector::new(inspector_config); let (res, tx_gas_limit, inspector) = self - .inner - .eth_api + .eth_api() .spawn_with_call_at(call, at, overrides, move |db, env| { let (res, env) = this.eth_api().inspect(db, env, &mut inspector)?; Ok((res, env.tx.gas_limit, inspector)) @@ -499,14 +493,13 @@ where let target_block = block_number.unwrap_or_default(); let ((cfg, mut block_env, _), block) = futures::try_join!( - self.inner.eth_api.evm_env_at(target_block), - self.inner.eth_api.block_with_senders(target_block), + self.eth_api().evm_env_at(target_block), + self.eth_api().block_with_senders(target_block), )?; let opts = opts.unwrap_or_default(); let block = block.ok_or(EthApiError::HeaderNotFound(target_block))?; let GethDebugTracingCallOptions { tracing_options, mut state_overrides, .. } = opts; - let gas_limit = self.inner.eth_api.call_gas_limit(); // we're essentially replaying the transactions in the block here, hence we need the state // that points to the beginning of the block, which is the state at the parent block @@ -525,8 +518,7 @@ where let this = self.clone(); - self.inner - .eth_api + self.eth_api() .spawn_with_state_at_block(at.into(), move |state| { // the outer vec for the bundles let mut all_bundles = Vec::with_capacity(bundles.len()); @@ -547,7 +539,7 @@ where ), handler_cfg: cfg.handler_cfg, }; - let (res, _) = this.inner.eth_api.transact(&mut db, env)?; + let (res, _) = this.eth_api().transact(&mut db, env)?; db.commit(res.state); } } @@ -570,7 +562,6 @@ where cfg.clone(), block_env.clone(), tx, - gas_limit, &mut db, overrides, )?; @@ -603,28 +594,34 @@ where pub async fn debug_execution_witness( &self, block_id: BlockNumberOrTag, - include_preimages: bool, ) -> Result { let this = self.clone(); let block = this - .inner - .eth_api + .eth_api() .block_with_senders(block_id.into()) .await? .ok_or(EthApiError::HeaderNotFound(block_id.into()))?; - self.inner - .eth_api + self.eth_api() .spawn_with_state_at_block(block.parent_hash.into(), move |state_provider| { let db = StateProviderDatabase::new(&state_provider); let block_executor = this.inner.block_executor.executor(db); let mut hashed_state = HashedPostState::default(); let mut keys = HashMap::default(); + let mut codes = HashMap::default(); + let _ = block_executor .execute_with_state_witness( (&block.clone().unseal(), block.difficulty).into(), |statedb| { + codes = statedb + .cache + .contracts + .iter() + .map(|(hash, code)| (*hash, code.original_bytes())) + .collect(); + for (address, account) in &statedb.cache.accounts { let hashed_address = keccak256(address); hashed_state.accounts.insert( @@ -638,24 +635,14 @@ where ); if let Some(account) = &account.account { - if include_preimages { - keys.insert( - hashed_address, - alloy_rlp::encode(address).into(), - ); - } + keys.insert(hashed_address, address.to_vec().into()); for (slot, value) in &account.storage { let slot = B256::from(*slot); let hashed_slot = keccak256(slot); storage.storage.insert(hashed_slot, *value); - if include_preimages { - keys.insert( - hashed_slot, - alloy_rlp::encode(slot).into(), - ); - } + keys.insert(hashed_slot, slot.into()); } } } @@ -667,8 +654,8 @@ where state_provider.witness(Default::default(), hashed_state).map_err(Into::into)?; Ok(ExecutionWitness { state: HashMap::from_iter(state.into_iter()), - codes: Default::default(), - keys: include_preimages.then_some(keys), + codes, + keys: Some(keys), }) }) .await @@ -872,7 +859,7 @@ where /// /// Returns the bytes of the transaction for the given hash. async fn raw_transaction(&self, hash: B256) -> RpcResult> { - self.inner.eth_api.raw_transaction_by_hash(hash).await.map_err(Into::into) + self.eth_api().raw_transaction_by_hash(hash).await.map_err(Into::into) } /// Handler for `debug_getRawTransactions` @@ -966,10 +953,9 @@ where async fn debug_execution_witness( &self, block: BlockNumberOrTag, - include_preimages: bool, ) -> RpcResult { let _permit = self.acquire_trace_permit().await; - Self::debug_execution_witness(self, block, include_preimages).await.map_err(Into::into) + Self::debug_execution_witness(self, block).await.map_err(Into::into) } /// Handler for `debug_traceCall` diff --git a/crates/rpc/rpc/src/eth/bundle.rs b/crates/rpc/rpc/src/eth/bundle.rs index bede4599e1a1b..e97497786ede4 100644 --- a/crates/rpc/rpc/src/eth/bundle.rs +++ b/crates/rpc/rpc/src/eth/bundle.rs @@ -37,6 +37,11 @@ impl EthBundle { pub fn new(eth_api: Eth, blocking_task_guard: BlockingTaskGuard) -> Self { Self { inner: Arc::new(EthBundleInner { eth_api, blocking_task_guard }) } } + + /// Access the underlying `Eth` API. + pub fn eth_api(&self) -> &Eth { + &self.inner.eth_api + } } impl EthBundle @@ -103,7 +108,7 @@ where let block_id: alloy_rpc_types::BlockId = state_block_number.into(); // Note: the block number is considered the `parent` block: - let (cfg, mut block_env, at) = self.inner.eth_api.evm_env_at(block_id).await?; + let (cfg, mut block_env, at) = self.eth_api().evm_env_at(block_id).await?; // need to adjust the timestamp for the next block if let Some(timestamp) = timestamp { @@ -125,12 +130,12 @@ where } else if cfg.handler_cfg.spec_id.is_enabled_in(SpecId::LONDON) { let parent_block = block_env.number.saturating_to::(); // here we need to fetch the _next_ block's basefee based on the parent block - let parent = LoadPendingBlock::provider(&self.inner.eth_api) + let parent = LoadPendingBlock::provider(self.eth_api()) .header_by_number(parent_block) .map_err(Eth::Error::from_eth_err)? .ok_or(EthApiError::HeaderNotFound(parent_block.into()))?; if let Some(base_fee) = parent.next_block_base_fee( - LoadPendingBlock::provider(&self.inner.eth_api) + LoadPendingBlock::provider(self.eth_api()) .chain_spec() .base_fee_params_at_block(parent_block), ) { @@ -142,10 +147,9 @@ where // use the block number of the request block_env.number = U256::from(block_number); - let eth_api = self.inner.eth_api.clone(); + let eth_api = self.eth_api().clone(); - self.inner - .eth_api + self.eth_api() .spawn_with_state_at_block(at, move |state| { let coinbase = block_env.coinbase; let basefee = Some(block_env.basefee.to::()); diff --git a/crates/rpc/rpc/src/eth/filter.rs b/crates/rpc/rpc/src/eth/filter.rs index 274347404c93d..9efecf3dae7f3 100644 --- a/crates/rpc/rpc/src/eth/filter.rs +++ b/crates/rpc/rpc/src/eth/filter.rs @@ -19,14 +19,14 @@ use async_trait::async_trait; use jsonrpsee::{core::RpcResult, server::IdProvider}; use reth_chainspec::ChainInfo; use reth_node_api::EthApiTypes; -use reth_primitives::TransactionSignedEcRecovered; +use reth_primitives::{Receipt, SealedBlock, TransactionSignedEcRecovered}; use reth_provider::{BlockIdReader, BlockReader, EvmEnvProvider, ProviderError}; use reth_rpc_eth_api::{EthFilterApiServer, FullEthApiTypes, RpcTransaction, TransactionCompat}; use reth_rpc_eth_types::{ - logs_utils::{self, append_matching_block_logs}, - EthApiError, EthFilterConfig, EthFilterError, EthStateCache, EthSubscriptionIdProvider, + logs_utils::{self, append_matching_block_logs, ProviderOrBlock}, + EthApiError, EthFilterConfig, EthStateCache, EthSubscriptionIdProvider, }; -use reth_rpc_server_types::ToRpcResult; +use reth_rpc_server_types::{result::rpc_error_with_code, ToRpcResult}; use reth_rpc_types_compat::transaction::from_recovered; use reth_tasks::TaskSpawner; use reth_transaction_pool::{NewSubpoolTransactionStream, PoolTransaction, TransactionPool}; @@ -376,29 +376,34 @@ where FilterBlockOption::AtBlockHash(block_hash) => { // for all matching logs in the block // get the block header with the hash - let block = self + let header = self .provider .header_by_hash_or_number(block_hash.into())? .ok_or_else(|| ProviderError::HeaderNotFound(block_hash.into()))?; + let block_num_hash = BlockNumHash::new(header.number, block_hash); + // we also need to ensure that the receipts are available and return an error if // not, in case the block hash been reorged - let receipts = self - .eth_cache - .get_receipts(block_hash) + let (receipts, maybe_block) = self + .receipts_and_maybe_block( + &block_num_hash, + self.provider.chain_info()?.best_number, + ) .await? .ok_or(EthApiError::HeaderNotFound(block_hash.into()))?; let mut all_logs = Vec::new(); - let filter = FilteredParams::new(Some(filter)); - logs_utils::append_matching_block_logs( + append_matching_block_logs( &mut all_logs, - &self.provider, - &filter, - (block_hash, block.number).into(), + maybe_block + .map(|b| ProviderOrBlock::Block(b)) + .unwrap_or_else(|| ProviderOrBlock::Provider(&self.provider)), + &FilteredParams::new(Some(filter)), + block_num_hash, &receipts, false, - block.timestamp, + header.timestamp, )?; Ok(all_logs) @@ -454,7 +459,6 @@ where chain_info: ChainInfo, ) -> Result, EthFilterError> { trace!(target: "rpc::eth::filter", from=from_block, to=to_block, ?filter, "finding logs in range"); - let best_number = chain_info.best_number; if to_block < from_block { return Err(EthFilterError::InvalidBlockRangeParams) @@ -467,27 +471,6 @@ where let mut all_logs = Vec::new(); let filter_params = FilteredParams::new(Some(filter.clone())); - if (to_block == best_number) && (from_block == best_number) { - // only one block to check and it's the current best block which we can fetch directly - // Note: In case of a reorg, the best block's hash might have changed, hence we only - // return early of we were able to fetch the best block's receipts - // perf: we're fetching the best block here which is expected to be cached - if let Some((block, receipts)) = - self.eth_cache.get_block_and_receipts(chain_info.best_hash).await? - { - logs_utils::append_matching_block_logs( - &mut all_logs, - &self.provider, - &filter_params, - chain_info.into(), - &receipts, - false, - block.header.timestamp, - )?; - } - return Ok(all_logs) - } - // derive bloom filters from filter input, so we can check headers for matching logs let address_filter = FilteredParams::address_filter(&filter.address); let topics_filter = FilteredParams::topics_filter(&filter.topics); @@ -514,12 +497,17 @@ where .ok_or_else(|| ProviderError::HeaderNotFound(header.number.into()))?, }; - if let Some(receipts) = self.eth_cache.get_receipts(block_hash).await? { + let num_hash = BlockNumHash::new(header.number, block_hash); + if let Some((receipts, maybe_block)) = + self.receipts_and_maybe_block(&num_hash, chain_info.best_number).await? + { append_matching_block_logs( &mut all_logs, - &self.provider, + maybe_block + .map(|block| ProviderOrBlock::Block(block)) + .unwrap_or_else(|| ProviderOrBlock::Provider(&self.provider)), &filter_params, - BlockNumHash::new(header.number, block_hash), + num_hash, &receipts, false, header.timestamp, @@ -540,6 +528,25 @@ where Ok(all_logs) } + + /// Retrieves receipts and block from cache if near the tip (4 blocks), otherwise only receipts. + async fn receipts_and_maybe_block( + &self, + block_num_hash: &BlockNumHash, + best_number: u64, + ) -> Result>, Option)>, EthFilterError> { + // The last 4 blocks are most likely cached, so we can just fetch them + let cached_range = best_number.saturating_sub(4)..=best_number; + let receipts_block = if cached_range.contains(&block_num_hash.number) { + self.eth_cache + .get_block_and_receipts(block_num_hash.hash) + .await? + .map(|(b, r)| (r, Some(b))) + } else { + self.eth_cache.get_receipts(block_num_hash.hash).await?.map(|r| (r, None)) + }; + Ok(receipts_block) + } } /// All active filters @@ -611,7 +618,7 @@ where /// Returns all new pending transactions received since the last poll. async fn drain(&self) -> FilterChanges where - T: PoolTransaction, + T: PoolTransaction>, { let mut pending_txs = Vec::new(); let mut prepared_stream = self.txs_stream.lock().await; @@ -633,7 +640,7 @@ trait FullTransactionsFilter: fmt::Debug + Send + Sync + Unpin + 'static { impl FullTransactionsFilter for FullTransactionsReceiver where - T: PoolTransaction + 'static, + T: PoolTransaction> + 'static, TxCompat: TransactionCompat + 'static, { async fn drain(&self) -> FilterChanges { @@ -695,6 +702,55 @@ impl Iterator for BlockRangeInclusiveIter { } } +/// Errors that can occur in the handler implementation +#[derive(Debug, thiserror::Error)] +pub enum EthFilterError { + /// Filter not found. + #[error("filter not found")] + FilterNotFound(FilterId), + /// Invalid block range. + #[error("invalid block range params")] + InvalidBlockRangeParams, + /// Query scope is too broad. + #[error("query exceeds max block range {0}")] + QueryExceedsMaxBlocks(u64), + /// Query result is too large. + #[error("query exceeds max results {0}")] + QueryExceedsMaxResults(usize), + /// Error serving request in `eth_` namespace. + #[error(transparent)] + EthAPIError(#[from] EthApiError), + /// Error thrown when a spawned task failed to deliver a response. + #[error("internal filter error")] + InternalError, +} + +impl From for jsonrpsee::types::error::ErrorObject<'static> { + fn from(err: EthFilterError) -> Self { + match err { + EthFilterError::FilterNotFound(_) => rpc_error_with_code( + jsonrpsee::types::error::INVALID_PARAMS_CODE, + "filter not found", + ), + err @ EthFilterError::InternalError => { + rpc_error_with_code(jsonrpsee::types::error::INTERNAL_ERROR_CODE, err.to_string()) + } + EthFilterError::EthAPIError(err) => err.into(), + err @ (EthFilterError::InvalidBlockRangeParams | + EthFilterError::QueryExceedsMaxBlocks(_) | + EthFilterError::QueryExceedsMaxResults(_)) => { + rpc_error_with_code(jsonrpsee::types::error::INVALID_PARAMS_CODE, err.to_string()) + } + } + } +} + +impl From for EthFilterError { + fn from(err: ProviderError) -> Self { + Self::EthAPIError(err.into()) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/rpc/rpc/src/eth/helpers/signer.rs b/crates/rpc/rpc/src/eth/helpers/signer.rs index b5109d09017a1..a5818aa494fd3 100644 --- a/crates/rpc/rpc/src/eth/helpers/signer.rs +++ b/crates/rpc/rpc/src/eth/helpers/signer.rs @@ -3,7 +3,6 @@ use std::collections::HashMap; use crate::EthApi; -use alloy_consensus::TxEnvelope; use alloy_dyn_abi::TypedData; use alloy_eips::eip2718::Decodable2718; use alloy_network::{eip2718::Encodable2718, EthereumWallet, TransactionBuilder}; @@ -91,7 +90,7 @@ impl EthSigner for DevSigner { let wallet = EthereumWallet::from(signer); // build and sign transaction with signer - let txn_envelope: TxEnvelope = + let txn_envelope = request.build(&wallet).await.map_err(|_| SignError::InvalidTransactionRequest)?; // decode transaction into signed transaction type diff --git a/crates/rpc/rpc/src/eth/helpers/state.rs b/crates/rpc/rpc/src/eth/helpers/state.rs index 006a0192f73a0..8a35842798bcc 100644 --- a/crates/rpc/rpc/src/eth/helpers/state.rs +++ b/crates/rpc/rpc/src/eth/helpers/state.rs @@ -48,7 +48,8 @@ mod tests { use alloy_primitives::{Address, StorageKey, StorageValue, U256}; use reth_chainspec::MAINNET; use reth_evm_ethereum::EthEvmConfig; - use reth_primitives::{constants::ETHEREUM_BLOCK_GAS_LIMIT, KECCAK_EMPTY}; + use reth_network_api::noop::NoopNetwork; + use reth_primitives::constants::ETHEREUM_BLOCK_GAS_LIMIT; use reth_provider::test_utils::{ExtendedAccount, MockEthProvider, NoopProvider}; use reth_rpc_eth_api::helpers::EthState; use reth_rpc_eth_types::{ @@ -61,7 +62,7 @@ mod tests { use reth_transaction_pool::test_utils::{testing_pool, TestPool}; use std::collections::HashMap; - fn noop_eth_api() -> EthApi { + fn noop_eth_api() -> EthApi { let pool = testing_pool(); let evm_config = EthEvmConfig::new(MAINNET.clone()); @@ -70,7 +71,7 @@ mod tests { EthApi::new( NoopProvider::default(), pool, - (), + NoopNetwork::default(), cache.clone(), GasPriceOracle::new(NoopProvider::default(), Default::default(), cache.clone()), ETHEREUM_BLOCK_GAS_LIMIT, @@ -102,7 +103,7 @@ mod tests { GasPriceOracle::new(mock_provider, Default::default(), cache.clone()), ETHEREUM_BLOCK_GAS_LIMIT, DEFAULT_MAX_SIMULATE_BLOCKS, - DEFAULT_ETH_PROOF_WINDOW, + DEFAULT_ETH_PROOF_WINDOW + 1, BlockingTaskPool::build().expect("failed to build tracing pool"), FeeHistoryCache::new(cache, FeeHistoryCacheConfig::default()), evm_config, @@ -139,16 +140,4 @@ mod tests { let account = eth_api.get_account(address, Default::default()).await.unwrap(); assert!(account.is_none()); } - - #[tokio::test] - async fn test_get_account_empty() { - let address = Address::random(); - let accounts = HashMap::from([(address, ExtendedAccount::new(0, U256::ZERO))]); - let eth_api = mock_eth_api(accounts); - - let account = eth_api.get_account(address, Default::default()).await.unwrap(); - let expected_account = - alloy_rpc_types::Account { code_hash: KECCAK_EMPTY, ..Default::default() }; - assert_eq!(Some(expected_account), account); - } } diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 35972d9136b97..31db343a104f8 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -106,11 +106,10 @@ where value: op.value, r#type: match op.kind { TransferKind::Call => OperationType::OpTransfer, - TransferKind::Create | TransferKind::EofCreate => { - OperationType::OpCreate - } + TransferKind::Create => OperationType::OpCreate, TransferKind::Create2 => OperationType::OpCreate2, TransferKind::SelfDestruct => OperationType::OpSelfDestruct, + TransferKind::EofCreate => OperationType::OpEofCreate, }, }) .collect::>() diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs index 185383d811b74..687762b74b5ed 100644 --- a/crates/rpc/rpc/src/trace.rs +++ b/crates/rpc/rpc/src/trace.rs @@ -120,7 +120,7 @@ where ) -> Result { let tx = recover_raw_transaction(tx)?; - let (cfg, block, at) = self.inner.eth_api.evm_env_at(block_id.unwrap_or_default()).await?; + let (cfg, block, at) = self.eth_api().evm_env_at(block_id.unwrap_or_default()).await?; let env = EnvWithHandlerCfg::new_with_cfg_env( cfg, @@ -130,8 +130,7 @@ where let config = TracingInspectorConfig::from_parity_config(&trace_types); - self.inner - .eth_api + self.eth_api() .spawn_trace_at_with_state(env, config, at, move |inspector, res, db| { inspector .into_parity_builder() @@ -151,9 +150,8 @@ where block_id: Option, ) -> Result, Eth::Error> { let at = block_id.unwrap_or(BlockId::pending()); - let (cfg, block_env, at) = self.inner.eth_api.evm_env_at(at).await?; + let (cfg, block_env, at) = self.eth_api().evm_env_at(at).await?; - let gas_limit = self.inner.eth_api.call_gas_limit(); let this = self.clone(); // execute all transactions on top of each other and record the traces self.eth_api() @@ -168,7 +166,6 @@ where cfg.clone(), block_env.clone(), call, - gas_limit, &mut db, Default::default(), )?; @@ -204,8 +201,7 @@ where trace_types: HashSet, ) -> Result { let config = TracingInspectorConfig::from_parity_config(&trace_types); - self.inner - .eth_api + self.eth_api() .spawn_trace_transaction_in_block(hash, config, move |_, inspector, res, db| { let trace_res = inspector .into_parity_builder() @@ -287,7 +283,7 @@ where let mut block_traces = Vec::with_capacity(blocks.len()); for block in &blocks { let matcher = matcher.clone(); - let traces = self.inner.eth_api.trace_block_until( + let traces = self.eth_api().trace_block_until( block.number.into(), None, TracingInspectorConfig::default_parity(), @@ -347,8 +343,7 @@ where &self, hash: B256, ) -> Result>, Eth::Error> { - self.inner - .eth_api + self.eth_api() .spawn_trace_transaction_in_block( hash, TracingInspectorConfig::default_parity(), @@ -366,7 +361,7 @@ where &self, block_id: BlockId, ) -> Result>, Eth::Error> { - let traces = self.inner.eth_api.trace_block_with( + let traces = self.eth_api().trace_block_with( block_id, TracingInspectorConfig::default_parity(), |tx_info, inspector, _, _, _| { @@ -376,7 +371,7 @@ where }, ); - let block = self.inner.eth_api.block(block_id); + let block = self.eth_api().block(block_id); let (maybe_traces, maybe_block) = futures::try_join!(traces, block)?; let mut maybe_traces = @@ -401,8 +396,7 @@ where block_id: BlockId, trace_types: HashSet, ) -> Result>, Eth::Error> { - self.inner - .eth_api + self.eth_api() .trace_block_with( block_id, TracingInspectorConfig::from_parity_config(&trace_types), @@ -433,8 +427,7 @@ where &self, tx_hash: B256, ) -> Result, Eth::Error> { - self.inner - .eth_api + self.eth_api() .spawn_trace_transaction_in_block_with_inspector( tx_hash, OpcodeGasInspector::default(), @@ -458,8 +451,7 @@ where block_id: BlockId, ) -> Result, Eth::Error> { let res = self - .inner - .eth_api + .eth_api() .trace_block_inspector( block_id, OpcodeGasInspector::default, @@ -475,7 +467,7 @@ where let Some(transactions) = res else { return Ok(None) }; - let Some(block) = self.inner.eth_api.block(block_id).await? else { return Ok(None) }; + let Some(block) = self.eth_api().block(block_id).await? else { return Ok(None) }; Ok(Some(BlockOpcodeGas { block_hash: block.hash(), diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index 4932a563409e6..5e26935ca1ba7 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -41,12 +41,12 @@ where tx: &Tx, content: &mut BTreeMap>, ) where - Tx: PoolTransaction, + Tx: PoolTransaction>, RpcTxB: TransactionCompat, { content.entry(tx.sender()).or_default().insert( tx.nonce().to_string(), - from_recovered::(tx.clone().into_consensus()), + from_recovered::(tx.clone().into_consensus().into()), ); } @@ -91,12 +91,12 @@ where trace!(target: "rpc::eth", "Serving txpool_inspect"); #[inline] - fn insert>( + fn insert>>( tx: &T, inspect: &mut BTreeMap>, ) { let entry = inspect.entry(tx.sender()).or_default(); - let tx = tx.clone().into_consensus(); + let tx: TransactionSignedEcRecovered = tx.clone().into_consensus().into(); entry.insert( tx.nonce().to_string(), TxpoolInspectSummary { diff --git a/crates/storage/db-api/src/cursor.rs b/crates/storage/db-api/src/cursor.rs index 134819a8e2cd8..585aa4947a282 100644 --- a/crates/storage/db-api/src/cursor.rs +++ b/crates/storage/db-api/src/cursor.rs @@ -149,7 +149,7 @@ where } } -impl<'cursor, T: Table, CURSOR: DbCursorRO> Iterator for Walker<'cursor, T, CURSOR> { +impl> Iterator for Walker<'_, T, CURSOR> { type Item = Result, DatabaseError>; fn next(&mut self) -> Option { let start = self.start.take(); @@ -174,7 +174,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRO> Walker<'cursor, T, CURSOR> { } } -impl<'cursor, T: Table, CURSOR: DbCursorRW + DbCursorRO> Walker<'cursor, T, CURSOR> { +impl + DbCursorRO> Walker<'_, T, CURSOR> { /// Delete current item that walker points to. pub fn delete_current(&mut self) -> Result<(), DatabaseError> { self.start.take(); @@ -217,7 +217,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRO> ReverseWalker<'cursor, T, CURSOR> } } -impl<'cursor, T: Table, CURSOR: DbCursorRW + DbCursorRO> ReverseWalker<'cursor, T, CURSOR> { +impl + DbCursorRO> ReverseWalker<'_, T, CURSOR> { /// Delete current item that walker points to. pub fn delete_current(&mut self) -> Result<(), DatabaseError> { self.start.take(); @@ -225,7 +225,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRW + DbCursorRO> ReverseWalker<'cu } } -impl<'cursor, T: Table, CURSOR: DbCursorRO> Iterator for ReverseWalker<'cursor, T, CURSOR> { +impl> Iterator for ReverseWalker<'_, T, CURSOR> { type Item = Result, DatabaseError>; fn next(&mut self) -> Option { @@ -266,7 +266,7 @@ where } } -impl<'cursor, T: Table, CURSOR: DbCursorRO> Iterator for RangeWalker<'cursor, T, CURSOR> { +impl> Iterator for RangeWalker<'_, T, CURSOR> { type Item = Result, DatabaseError>; fn next(&mut self) -> Option { @@ -316,7 +316,7 @@ impl<'cursor, T: Table, CURSOR: DbCursorRO> RangeWalker<'cursor, T, CURSOR> { } } -impl<'cursor, T: Table, CURSOR: DbCursorRW + DbCursorRO> RangeWalker<'cursor, T, CURSOR> { +impl + DbCursorRO> RangeWalker<'_, T, CURSOR> { /// Delete current item that walker points to. pub fn delete_current(&mut self) -> Result<(), DatabaseError> { self.start.take(); @@ -349,7 +349,7 @@ where } } -impl<'cursor, T: DupSort, CURSOR: DbCursorRW + DbDupCursorRO> DupWalker<'cursor, T, CURSOR> { +impl + DbDupCursorRO> DupWalker<'_, T, CURSOR> { /// Delete current item that walker points to. pub fn delete_current(&mut self) -> Result<(), DatabaseError> { self.start.take(); @@ -357,7 +357,7 @@ impl<'cursor, T: DupSort, CURSOR: DbCursorRW + DbDupCursorRO> DupWalker<'c } } -impl<'cursor, T: DupSort, CURSOR: DbDupCursorRO> Iterator for DupWalker<'cursor, T, CURSOR> { +impl> Iterator for DupWalker<'_, T, CURSOR> { type Item = Result, DatabaseError>; fn next(&mut self) -> Option { let start = self.start.take(); diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index ba012cf68af16..a075f77246378 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -50,6 +50,7 @@ derive_more.workspace = true paste.workspace = true rustc-hash = { workspace = true, optional = true } sysinfo = { version = "0.31", default-features = false, features = ["system"] } +parking_lot = { workspace = true, optional = true } # arbitrary utils strum = { workspace = true, features = ["derive"], optional = true } @@ -61,6 +62,7 @@ rand.workspace = true serde_json.workspace = true tempfile.workspace = true test-fuzz.workspace = true +parking_lot.workspace = true pprof = { workspace = true, features = [ "flamegraph", @@ -88,7 +90,7 @@ mdbx = [ "dep:strum", "dep:rustc-hash", ] -test-utils = ["dep:tempfile", "arbitrary"] +test-utils = ["dep:tempfile", "arbitrary", "parking_lot"] bench = [] arbitrary = ["reth-primitives/arbitrary", "reth-db-api/arbitrary"] optimism = [] diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 8feb6c90ab27e..2ff2789ea6982 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -180,7 +180,12 @@ struct MetricsHandler { /// If `true`, the backtrace of transaction has already been recorded and logged. /// See [`MetricsHandler::log_backtrace_on_long_read_transaction`]. backtrace_recorded: AtomicBool, + /// Shared database environment metrics. env_metrics: Arc, + /// Backtrace of the location where the transaction has been opened. Reported only with debug + /// assertions, because capturing the backtrace on every transaction opening is expensive. + #[cfg(debug_assertions)] + open_backtrace: Backtrace, _marker: PhantomData, } @@ -193,6 +198,8 @@ impl MetricsHandler { close_recorded: false, record_backtrace: true, backtrace_recorded: AtomicBool::new(false), + #[cfg(debug_assertions)] + open_backtrace: Backtrace::force_capture(), env_metrics, _marker: PhantomData, } @@ -232,11 +239,22 @@ impl MetricsHandler { let open_duration = self.start.elapsed(); if open_duration >= self.long_transaction_duration { self.backtrace_recorded.store(true, Ordering::Relaxed); + #[cfg(debug_assertions)] + let message = format!( + "The database read transaction has been open for too long. Open backtrace:\n{}\n\nCurrent backtrace:\n{}", + self.open_backtrace, + Backtrace::force_capture() + ); + #[cfg(not(debug_assertions))] + let message = format!( + "The database read transaction has been open for too long. Backtrace:\n{}", + Backtrace::force_capture() + ); warn!( target: "storage::db::mdbx", ?open_duration, %self.txn_id, - "The database read transaction has been open for too long. Backtrace:\n{}", Backtrace::force_capture() + "{message}" ); } } diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index a9f073d7b5462..7090b4262fd78 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -44,6 +44,7 @@ pub use reth_db_api::*; pub mod test_utils { use super::*; use crate::mdbx::DatabaseArguments; + use parking_lot::RwLock; use reth_db_api::{ database::Database, database_metrics::{DatabaseMetadata, DatabaseMetadataValue, DatabaseMetrics}, @@ -52,6 +53,7 @@ pub mod test_utils { use reth_fs_util; use reth_libmdbx::MaxReadTransactionDuration; use std::{ + fmt::Formatter, path::{Path, PathBuf}, sync::Arc, }; @@ -69,10 +71,19 @@ pub mod test_utils { pub const ERROR_TEMPDIR: &str = "Not able to create a temporary directory."; /// A database will delete the db dir when dropped. - #[derive(Debug)] pub struct TempDatabase { db: Option, path: PathBuf, + /// Executed right before a database transaction is created. + pre_tx_hook: RwLock>, + /// Executed right after a database transaction is created. + post_tx_hook: RwLock>, + } + + impl std::fmt::Debug for TempDatabase { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TempDatabase").field("db", &self.db).field("path", &self.path).finish() + } } impl Drop for TempDatabase { @@ -85,6 +96,16 @@ pub mod test_utils { } impl TempDatabase { + /// Create new [`TempDatabase`] instance. + pub fn new(db: DB, path: PathBuf) -> Self { + Self { + db: Some(db), + path, + pre_tx_hook: RwLock::new(Box::new(|| ())), + post_tx_hook: RwLock::new(Box::new(|| ())), + } + } + /// Returns the reference to inner db. pub fn db(&self) -> &DB { self.db.as_ref().unwrap() @@ -99,13 +120,28 @@ pub mod test_utils { pub fn into_inner_db(mut self) -> DB { self.db.take().unwrap() // take out db to avoid clean path in drop fn } + + /// Sets [`TempDatabase`] new pre transaction creation hook. + pub fn set_pre_transaction_hook(&self, hook: Box) { + let mut db_hook = self.pre_tx_hook.write(); + *db_hook = hook; + } + + /// Sets [`TempDatabase`] new post transaction creation hook. + pub fn set_post_transaction_hook(&self, hook: Box) { + let mut db_hook = self.post_tx_hook.write(); + *db_hook = hook; + } } impl Database for TempDatabase { type TX = ::TX; type TXMut = ::TXMut; fn tx(&self) -> Result { - self.db().tx() + self.pre_tx_hook.read()(); + let tx = self.db().tx()?; + self.post_tx_hook.read()(); + Ok(tx) } fn tx_mut(&self) -> Result { @@ -150,7 +186,7 @@ pub mod test_utils { ) .expect(&emsg); - Arc::new(TempDatabase { db: Some(db), path }) + Arc::new(TempDatabase::new(db, path)) } /// Create read/write database for testing @@ -162,7 +198,7 @@ pub mod test_utils { .with_max_read_transaction_duration(Some(MaxReadTransactionDuration::Unbounded)), ) .expect(ERROR_DB_CREATION); - Arc::new(TempDatabase { db: Some(db), path }) + Arc::new(TempDatabase::new(db, path)) } /// Create read only database for testing @@ -175,7 +211,7 @@ pub mod test_utils { init_db(path.as_path(), args.clone()).expect(ERROR_DB_CREATION); } let db = open_db_read_only(path.as_path(), args).expect(ERROR_DB_OPEN); - Arc::new(TempDatabase { db: Some(db), path }) + Arc::new(TempDatabase::new(db, path)) } } diff --git a/crates/storage/db/src/static_file/cursor.rs b/crates/storage/db/src/static_file/cursor.rs index f14e023087739..a9eadd9c12a60 100644 --- a/crates/storage/db/src/static_file/cursor.rs +++ b/crates/storage/db/src/static_file/cursor.rs @@ -115,7 +115,7 @@ impl<'a> From<&'a B256> for KeyOrNumber<'a> { } } -impl<'a> From for KeyOrNumber<'a> { +impl From for KeyOrNumber<'_> { fn from(value: u64) -> Self { KeyOrNumber::Number(value) } diff --git a/crates/storage/libmdbx-rs/src/codec.rs b/crates/storage/libmdbx-rs/src/codec.rs index a97ea28ca2ae9..c78f79db9f944 100644 --- a/crates/storage/libmdbx-rs/src/codec.rs +++ b/crates/storage/libmdbx-rs/src/codec.rs @@ -22,7 +22,7 @@ pub trait TableObject: Sized { } } -impl<'tx> TableObject for Cow<'tx, [u8]> { +impl TableObject for Cow<'_, [u8]> { fn decode(_: &[u8]) -> Result { unreachable!() } diff --git a/crates/storage/libmdbx-rs/src/cursor.rs b/crates/storage/libmdbx-rs/src/cursor.rs index d007cc03e490f..3deff0c249bcd 100644 --- a/crates/storage/libmdbx-rs/src/cursor.rs +++ b/crates/storage/libmdbx-rs/src/cursor.rs @@ -539,7 +539,7 @@ where }, } -impl<'cur, K, Key, Value> IntoIter<'cur, K, Key, Value> +impl IntoIter<'_, K, Key, Value> where K: TransactionKind, Key: TableObject, @@ -551,7 +551,7 @@ where } } -impl<'cur, K, Key, Value> Iterator for IntoIter<'cur, K, Key, Value> +impl Iterator for IntoIter<'_, K, Key, Value> where K: TransactionKind, Key: TableObject, @@ -646,7 +646,7 @@ where } } -impl<'cur, K, Key, Value> Iterator for Iter<'cur, K, Key, Value> +impl Iterator for Iter<'_, K, Key, Value> where K: TransactionKind, Key: TableObject, @@ -736,7 +736,7 @@ where } } -impl<'cur, K, Key, Value> fmt::Debug for IterDup<'cur, K, Key, Value> +impl fmt::Debug for IterDup<'_, K, Key, Value> where K: TransactionKind, Key: TableObject, diff --git a/crates/storage/nippy-jar/src/compression/zstd.rs b/crates/storage/nippy-jar/src/compression/zstd.rs index 494d79de52f6b..500247d176771 100644 --- a/crates/storage/nippy-jar/src/compression/zstd.rs +++ b/crates/storage/nippy-jar/src/compression/zstd.rs @@ -266,13 +266,13 @@ mod dictionaries_serde { #[derive(Serialize, Deserialize, Deref)] pub(crate) struct ZstdDictionaries<'a>(Vec>); -impl<'a> std::fmt::Debug for ZstdDictionaries<'a> { +impl std::fmt::Debug for ZstdDictionaries<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("ZstdDictionaries").field("num", &self.len()).finish_non_exhaustive() } } -impl<'a> ZstdDictionaries<'a> { +impl ZstdDictionaries<'_> { #[cfg(test)] /// Creates [`ZstdDictionaries`]. pub(crate) fn new(raw: Vec) -> Self { @@ -321,7 +321,7 @@ pub(crate) enum ZstdDictionary<'a> { Loaded(DecoderDictionary<'a>), } -impl<'a> ZstdDictionary<'a> { +impl ZstdDictionary<'_> { /// Returns a reference to the expected `RawDictionary` pub(crate) const fn raw(&self) -> Option<&RawDictionary> { match self { @@ -339,7 +339,7 @@ impl<'a> ZstdDictionary<'a> { } } -impl<'de, 'a> Deserialize<'de> for ZstdDictionary<'a> { +impl<'de> Deserialize<'de> for ZstdDictionary<'_> { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -349,7 +349,7 @@ impl<'de, 'a> Deserialize<'de> for ZstdDictionary<'a> { } } -impl<'a> Serialize for ZstdDictionary<'a> { +impl Serialize for ZstdDictionary<'_> { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -362,7 +362,7 @@ impl<'a> Serialize for ZstdDictionary<'a> { } #[cfg(test)] -impl<'a> PartialEq for ZstdDictionary<'a> { +impl PartialEq for ZstdDictionary<'_> { fn eq(&self, other: &Self) -> bool { if let (Self::Raw(a), Self::Raw(b)) = (self, &other) { return a == b diff --git a/crates/storage/nippy-jar/src/cursor.rs b/crates/storage/nippy-jar/src/cursor.rs index 7af55fd436e46..2677648272997 100644 --- a/crates/storage/nippy-jar/src/cursor.rs +++ b/crates/storage/nippy-jar/src/cursor.rs @@ -18,7 +18,7 @@ pub struct NippyJarCursor<'a, H = ()> { row: u64, } -impl<'a, H: NippyJarHeader> std::fmt::Debug for NippyJarCursor<'a, H> { +impl std::fmt::Debug for NippyJarCursor<'_, H> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("NippyJarCursor").field("config", &self.jar).finish_non_exhaustive() } diff --git a/crates/storage/provider/src/providers/blockchain_provider.rs b/crates/storage/provider/src/providers/blockchain_provider.rs index f5f838293eb41..54f28b77b9125 100644 --- a/crates/storage/provider/src/providers/blockchain_provider.rs +++ b/crates/storage/provider/src/providers/blockchain_provider.rs @@ -14,21 +14,27 @@ use reth_chain_state::{ BlockState, CanonicalInMemoryState, ForkChoiceNotifications, ForkChoiceSubscriptions, MemoryOverlayStateProvider, }; -use reth_chainspec::ChainInfo; +use reth_chainspec::{ChainInfo, EthereumHardforks}; +use reth_db::models::BlockNumberAddress; use reth_db_api::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_execution_types::ExecutionOutcome; +use reth_execution_types::{BundleStateInit, ExecutionOutcome, RevertsInit}; use reth_node_types::NodeTypesWithDB; use reth_primitives::{ - Account, Block, BlockWithSenders, EthereumHardforks, Header, Receipt, SealedBlock, - SealedBlockWithSenders, SealedHeader, TransactionMeta, TransactionSigned, - TransactionSignedNoHash, Withdrawal, Withdrawals, + Account, Block, BlockWithSenders, Header, Receipt, SealedBlock, SealedBlockWithSenders, + SealedHeader, StorageEntry, TransactionMeta, TransactionSigned, TransactionSignedNoHash, + Withdrawal, Withdrawals, }; use reth_prune_types::{PruneCheckpoint, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; +use reth_storage_api::StorageChangeSetReader; use reth_storage_errors::provider::ProviderResult; -use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; +use revm::{ + db::states::PlainStorageRevert, + primitives::{BlockEnv, CfgEnvWithHandlerCfg}, +}; use std::{ + collections::{hash_map, HashMap}, ops::{Add, Bound, RangeBounds, RangeInclusive, Sub}, sync::Arc, time::Instant, @@ -122,6 +128,145 @@ impl BlockchainProvider2 { (start, end) } + /// Return the last N blocks of state, recreating the [`ExecutionOutcome`]. + /// + /// If the range is empty, or there are no blocks for the given range, then this returns `None`. + pub fn get_state( + &self, + range: RangeInclusive, + ) -> ProviderResult> { + if range.is_empty() { + return Ok(None) + } + let start_block_number = *range.start(); + let end_block_number = *range.end(); + + // We are not removing block meta as it is used to get block changesets. + let mut block_bodies = Vec::new(); + for block_num in range.clone() { + let block_body = self + .block_body_indices(block_num)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(block_num))?; + block_bodies.push((block_num, block_body)) + } + + // get transaction receipts + let Some(from_transaction_num) = block_bodies.first().map(|body| body.1.first_tx_num()) + else { + return Ok(None) + }; + let Some(to_transaction_num) = block_bodies.last().map(|body| body.1.last_tx_num()) else { + return Ok(None) + }; + + let mut account_changeset = Vec::new(); + for block_num in range.clone() { + let changeset = + self.account_block_changeset(block_num)?.into_iter().map(|elem| (block_num, elem)); + account_changeset.extend(changeset); + } + + let mut storage_changeset = Vec::new(); + for block_num in range { + let changeset = self.storage_changeset(block_num)?; + storage_changeset.extend(changeset); + } + + let (state, reverts) = + self.populate_bundle_state(account_changeset, storage_changeset, end_block_number)?; + + let mut receipt_iter = + self.receipts_by_tx_range(from_transaction_num..=to_transaction_num)?.into_iter(); + + let mut receipts = Vec::with_capacity(block_bodies.len()); + // loop break if we are at the end of the blocks. + for (_, block_body) in block_bodies { + let mut block_receipts = Vec::with_capacity(block_body.tx_count as usize); + for tx_num in block_body.tx_num_range() { + let receipt = + receipt_iter.next().ok_or(ProviderError::ReceiptNotFound(tx_num.into()))?; + block_receipts.push(Some(receipt)); + } + receipts.push(block_receipts); + } + + Ok(Some(ExecutionOutcome::new_init( + state, + reverts, + // We skip new contracts since we never delete them from the database + Vec::new(), + receipts.into(), + start_block_number, + Vec::new(), + ))) + } + + /// Populate a [`BundleStateInit`] and [`RevertsInit`] using cursors over the + /// [`reth_db::PlainAccountState`] and [`reth_db::PlainStorageState`] tables, based on the given + /// storage and account changesets. + fn populate_bundle_state( + &self, + account_changeset: Vec<(u64, AccountBeforeTx)>, + storage_changeset: Vec<(BlockNumberAddress, StorageEntry)>, + block_range_end: BlockNumber, + ) -> ProviderResult<(BundleStateInit, RevertsInit)> { + let mut state: BundleStateInit = HashMap::new(); + let mut reverts: RevertsInit = HashMap::new(); + let state_provider = self.state_by_block_number_or_tag(block_range_end.into())?; + + // add account changeset changes + for (block_number, account_before) in account_changeset.into_iter().rev() { + let AccountBeforeTx { info: old_info, address } = account_before; + match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let new_info = state_provider.basic_account(address)?; + entry.insert((old_info, new_info, HashMap::new())); + } + hash_map::Entry::Occupied(mut entry) => { + // overwrite old account state. + entry.get_mut().0 = old_info; + } + } + // insert old info into reverts. + reverts.entry(block_number).or_default().entry(address).or_default().0 = Some(old_info); + } + + // add storage changeset changes + for (block_and_address, old_storage) in storage_changeset.into_iter().rev() { + let BlockNumberAddress((block_number, address)) = block_and_address; + // get account state or insert from plain state. + let account_state = match state.entry(address) { + hash_map::Entry::Vacant(entry) => { + let present_info = state_provider.basic_account(address)?; + entry.insert((present_info, present_info, HashMap::new())) + } + hash_map::Entry::Occupied(entry) => entry.into_mut(), + }; + + // match storage. + match account_state.2.entry(old_storage.key) { + hash_map::Entry::Vacant(entry) => { + let new_storage_value = + state_provider.storage(address, old_storage.key)?.unwrap_or_default(); + entry.insert((old_storage.value, new_storage_value)); + } + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().0 = old_storage.value; + } + }; + + reverts + .entry(block_number) + .or_default() + .entry(address) + .or_default() + .1 + .push(old_storage); + } + + Ok((state, reverts)) + } + /// Fetches a range of data from both in-memory state and persistent storage while a predicate /// is met. /// @@ -268,12 +413,10 @@ impl BlockchainProvider2 { // Get the last block number stored in the storage which does NOT overlap with in-memory // chain. - let mut last_database_block_number = provider.last_block_number()?; - if let Some(lowest_in_mem_block) = in_mem_chain.last() { - if lowest_in_mem_block.number() <= last_database_block_number { - last_database_block_number = lowest_in_mem_block.number().saturating_sub(1); - } - } + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; // Get the next tx number for the last block stored in the storage, which marks the start of // the in-memory state. @@ -320,11 +463,18 @@ impl BlockchainProvider2 { let block_tx_count = block_state.block_ref().block().body.transactions.len(); let remaining = (tx_range.end() - tx_range.start() + 1) as usize; - // This should only be more than 0 in the first iteration, in case of a partial range + // If the transaction range start is equal or higher than the next block first + // transaction, advance + if *tx_range.start() >= in_memory_tx_num + block_tx_count as u64 { + in_memory_tx_num += block_tx_count as u64; + continue + } + + // This should only be more than 0 once, in case of a partial range inside a block. let skip = (tx_range.start() - in_memory_tx_num) as usize; items.extend(fetch_from_block_state( - skip..=(remaining.min(block_tx_count) - 1), + skip..=skip + (remaining.min(block_tx_count - skip) - 1), block_state, )?); @@ -361,12 +511,10 @@ impl BlockchainProvider2 { // Get the last block number stored in the database which does NOT overlap with in-memory // chain. - let mut last_database_block_number = provider.last_block_number()?; - if let Some(lowest_in_mem_block) = in_mem_chain.last() { - if lowest_in_mem_block.number() <= last_database_block_number { - last_database_block_number = lowest_in_mem_block.number().saturating_sub(1); - } - } + let last_database_block_number = in_mem_chain + .last() + .map(|b| Ok(b.anchor().number)) + .unwrap_or_else(|| provider.last_block_number())?; // Get the next tx number for the last block stored in the database and consider it the // first tx number of the in-memory state @@ -385,7 +533,7 @@ impl BlockchainProvider2 { // Iterate from the lowest block to the highest for block_state in in_mem_chain.into_iter().rev() { - let executed_block = block_state.block(); + let executed_block = block_state.block_ref(); let block = executed_block.block(); for tx_index in 0..block.body.transactions.len() { @@ -486,7 +634,7 @@ impl HeaderProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_block( (*block_hash).into(), |db_provider| db_provider.header(block_hash), - |block_state| Ok(Some(block_state.block().block().header.header().clone())), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), ) } @@ -494,7 +642,7 @@ impl HeaderProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_block( num.into(), |db_provider| db_provider.header_by_number(num), - |block_state| Ok(Some(block_state.block().block().header.header().clone())), + |block_state| Ok(Some(block_state.block_ref().block().header.header().clone())), ) } @@ -532,7 +680,7 @@ impl HeaderProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.headers_range(range), - |block_state, _| Some(block_state.block().block().header.header().clone()), + |block_state, _| Some(block_state.block_ref().block().header.header().clone()), |_| true, ) } @@ -541,7 +689,7 @@ impl HeaderProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_block( number.into(), |db_provider| db_provider.sealed_header(number), - |block_state| Ok(Some(block_state.block().block().header.clone())), + |block_state| Ok(Some(block_state.block_ref().block().header.clone())), ) } @@ -552,7 +700,7 @@ impl HeaderProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.sealed_headers_range(range), - |block_state, _| Some(block_state.block().block().header.clone()), + |block_state, _| Some(block_state.block_ref().block().header.clone()), |_| true, ) } @@ -566,7 +714,8 @@ impl HeaderProvider for BlockchainProvider2 { range, |db_provider, range, predicate| db_provider.sealed_headers_while(range, predicate), |block_state, predicate| { - Some(block_state.block().block().header.clone()).filter(|header| predicate(header)) + let header = &block_state.block_ref().block().header; + predicate(header).then(|| header.clone()) }, predicate, ) @@ -644,7 +793,7 @@ impl BlockReader for BlockchainProvider2 { self.get_in_memory_or_storage_by_block( hash.into(), |db_provider| db_provider.find_block_by_hash(hash, source), - |block_state| Ok(Some(block_state.block().block().clone().unseal())), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), ) } BlockSource::Pending => { @@ -657,7 +806,7 @@ impl BlockReader for BlockchainProvider2 { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.block(id), - |block_state| Ok(Some(block_state.block().block().clone().unseal())), + |block_state| Ok(Some(block_state.block_ref().block().clone().unseal())), ) } @@ -687,7 +836,7 @@ impl BlockReader for BlockchainProvider2 { return Ok(Some(Vec::new())) } - Ok(Some(block_state.block().block().body.ommers.clone())) + Ok(Some(block_state.block_ref().block().body.ommers.clone())) }, ) } @@ -696,34 +845,34 @@ impl BlockReader for BlockchainProvider2 { &self, number: BlockNumber, ) -> ProviderResult> { - if let Some(indices) = self.database.block_body_indices(number)? { - Ok(Some(indices)) - } else if let Some(state) = self.canonical_in_memory_state.state_by_number(number) { - // we have to construct the stored indices for the in memory blocks - // - // To calculate this we will fetch the anchor block and walk forward from all parents - let mut parent_chain = state.parent_state_chain(); - parent_chain.reverse(); - let anchor_num = state.anchor().number; - let mut stored_indices = self - .database - .block_body_indices(anchor_num)? - .ok_or(ProviderError::BlockBodyIndicesNotFound(anchor_num))?; - stored_indices.first_tx_num = stored_indices.next_tx_num(); - - for state in parent_chain { - let txs = state.block().block.body.transactions.len() as u64; - if state.block().block().number == number { - stored_indices.tx_count = txs; - } else { - stored_indices.first_tx_num += txs; + self.get_in_memory_or_storage_by_block( + number.into(), + |db_provider| db_provider.block_body_indices(number), + |block_state| { + // Find the last block indices on database + let last_storage_block_number = block_state.anchor().number; + let mut stored_indices = self + .database + .block_body_indices(last_storage_block_number)? + .ok_or(ProviderError::BlockBodyIndicesNotFound(last_storage_block_number))?; + + // Prepare our block indices + stored_indices.first_tx_num = stored_indices.next_tx_num(); + stored_indices.tx_count = 0; + + // Iterate from the lowest block in memory until our target block + for state in block_state.chain().into_iter().rev() { + let block_tx_count = state.block_ref().block.body.transactions.len() as u64; + if state.block_ref().block().number == number { + stored_indices.tx_count = block_tx_count; + } else { + stored_indices.first_tx_num += block_tx_count; + } } - } - Ok(Some(stored_indices)) - } else { - Ok(None) - } + Ok(Some(stored_indices)) + }, + ) } /// Returns the block with senders with matching number or hash from database. @@ -760,7 +909,7 @@ impl BlockReader for BlockchainProvider2 { self.get_in_memory_or_storage_by_block_range_while( range, |db_provider, range, _| db_provider.block_range(range), - |block_state, _| Some(block_state.block().block().clone().unseal()), + |block_state, _| Some(block_state.block_ref().block().clone().unseal()), |_| true, ) } @@ -804,7 +953,7 @@ impl TransactionsProvider for BlockchainProvider2 { id.into(), |provider| provider.transaction_by_id(id), |tx_index, _, block_state| { - Ok(block_state.block().block().body.transactions.get(tx_index).cloned()) + Ok(block_state.block_ref().block().body.transactions.get(tx_index).cloned()) }, ) } @@ -818,7 +967,7 @@ impl TransactionsProvider for BlockchainProvider2 { |provider| provider.transaction_by_id_no_hash(id), |tx_index, _, block_state| { Ok(block_state - .block() + .block_ref() .block() .body .transactions @@ -854,7 +1003,7 @@ impl TransactionsProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_block(id), - |_, _, block_state| Ok(Some(block_state.block().block().number)), + |_, _, block_state| Ok(Some(block_state.block_ref().block().number)), ) } @@ -865,7 +1014,7 @@ impl TransactionsProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_block( id, |provider| provider.transactions_by_block(id), - |block_state| Ok(Some(block_state.block().block().body.transactions.clone())), + |block_state| Ok(Some(block_state.block_ref().block().body.transactions.clone())), ) } @@ -873,35 +1022,12 @@ impl TransactionsProvider for BlockchainProvider2 { &self, range: impl RangeBounds, ) -> ProviderResult>> { - let (start, end) = self.convert_range_bounds(range, || { - self.canonical_in_memory_state.get_canonical_block_number() - }); - - let mut transactions = Vec::new(); - let mut last_in_memory_block = None; - - for number in start..=end { - if let Some(block_state) = self.canonical_in_memory_state.state_by_number(number) { - // TODO: there might be an update between loop iterations, we - // need to handle that situation. - transactions.push(block_state.block().block().body.transactions.clone()); - last_in_memory_block = Some(number); - } else { - break - } - } - - if let Some(last_block) = last_in_memory_block { - if last_block < end { - let mut db_transactions = - self.database.transactions_by_block_range((last_block + 1)..=end)?; - transactions.append(&mut db_transactions); - } - } else { - transactions = self.database.transactions_by_block_range(start..=end)?; - } - - Ok(transactions) + self.get_in_memory_or_storage_by_block_range_while( + range, + |db_provider, range, _| db_provider.transactions_by_block_range(range), + |block_state, _| Some(block_state.block_ref().block().body.transactions.clone()), + |_| true, + ) } fn transactions_by_tx_range( @@ -936,7 +1062,7 @@ impl TransactionsProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_tx( id.into(), |provider| provider.transaction_sender(id), - |tx_index, _, block_state| Ok(block_state.block().senders.get(tx_index).copied()), + |tx_index, _, block_state| Ok(block_state.block_ref().senders.get(tx_index).copied()), ) } } @@ -954,7 +1080,7 @@ impl ReceiptProvider for BlockchainProvider2 { fn receipt_by_hash(&self, hash: TxHash) -> ProviderResult> { for block_state in self.canonical_in_memory_state.canonical_chain() { - let executed_block = block_state.block(); + let executed_block = block_state.block_ref(); let block = executed_block.block(); let receipts = block_state.executed_block_receipts(); @@ -1041,7 +1167,7 @@ impl WithdrawalsProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.withdrawals_by_block(id, timestamp), - |block_state| Ok(block_state.block().block().body.withdrawals.clone()), + |block_state| Ok(block_state.block_ref().block().body.withdrawals.clone()), ) } @@ -1052,7 +1178,13 @@ impl WithdrawalsProvider for BlockchainProvider2 { best_block_num.into(), |db_provider| db_provider.latest_withdrawal(), |block_state| { - Ok(block_state.block().block().body.withdrawals.clone().and_then(|mut w| w.pop())) + Ok(block_state + .block_ref() + .block() + .body + .withdrawals + .clone() + .and_then(|mut w| w.pop())) }, ) } @@ -1071,7 +1203,7 @@ impl RequestsProvider for BlockchainProvider2 { self.get_in_memory_or_storage_by_block( id, |db_provider| db_provider.requests_by_block(id, timestamp), - |block_state| Ok(block_state.block().block().body.requests.clone()), + |block_state| Ok(block_state.block_ref().block().body.requests.clone()), ) } } @@ -1440,6 +1572,57 @@ impl ForkChoiceSubscriptions for BlockchainProvider2 { } } +impl StorageChangeSetReader for BlockchainProvider2 { + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { + let changesets = state + .block() + .execution_output + .bundle + .reverts + .clone() + .into_plain_state_reverts() + .storage + .into_iter() + .flatten() + .flat_map(|revert: PlainStorageRevert| { + revert.storage_revert.into_iter().map(move |(key, value)| { + ( + BlockNumberAddress((block_number, revert.address)), + StorageEntry { key: key.into(), value: value.to_previous_value() }, + ) + }) + }) + .collect(); + Ok(changesets) + } else { + // Perform checks on whether or not changesets exist for the block. + let provider = self.database.provider()?; + + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let storage_history_exists = provider + .get_prune_checkpoint(PruneSegment::StorageHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !storage_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + provider.storage_changeset(block_number) + } + } +} + impl ChangeSetReader for BlockchainProvider2 { fn account_block_changeset( &self, @@ -1447,7 +1630,7 @@ impl ChangeSetReader for BlockchainProvider2 { ) -> ProviderResult> { if let Some(state) = self.canonical_in_memory_state.state_by_number(block_number) { let changesets = state - .block() + .block_ref() .execution_output .bundle .reverts @@ -1460,7 +1643,25 @@ impl ChangeSetReader for BlockchainProvider2 { .collect(); Ok(changesets) } else { - self.database.provider()?.account_block_changeset(block_number) + // Perform checks on whether or not changesets exist for the block. + let provider = self.database.provider()?; + // No prune checkpoint means history should exist and we should `unwrap_or(true)` + let account_history_exists = provider + .get_prune_checkpoint(PruneSegment::AccountHistory)? + .and_then(|checkpoint| { + // return true if the block number is ahead of the prune checkpoint. + // + // The checkpoint stores the highest pruned block number, so we should make + // sure the block_number is strictly greater. + checkpoint.block_number.map(|checkpoint| block_number > checkpoint) + }) + .unwrap_or(true); + + if !account_history_exists { + return Err(ProviderError::StateAtBlockPruned(block_number)) + } + + provider.account_block_changeset(block_number) } } } @@ -1475,12 +1676,21 @@ impl AccountReader for BlockchainProvider2 { } impl StateReader for BlockchainProvider2 { + /// Re-constructs the [`ExecutionOutcome`] from in-memory and database state, if necessary. + /// + /// If data for the block does not exist, this will return [`None`]. + /// + /// NOTE: This cannot be called safely in a loop outside of the blockchain tree thread. This is + /// because the [`CanonicalInMemoryState`] could change during a reorg, causing results to be + /// inconsistent. Currently this can safely be called within the blockchain tree thread, + /// because the tree thread is responsible for modifying the [`CanonicalInMemoryState`] in the + /// first place. fn get_state(&self, block: BlockNumber) -> ProviderResult> { if let Some(state) = self.canonical_in_memory_state.state_by_number(block) { - let state = state.block().execution_outcome().clone(); + let state = state.block_ref().execution_outcome().clone(); Ok(Some(state)) } else { - self.database.provider()?.get_state(block..=block) + self.get_state(block..=block) } } } @@ -1500,24 +1710,29 @@ mod tests { MockNodeTypesWithDB, }, writer::UnifiedStorageWriter, - BlockWriter, CanonChainTracker, StaticFileProviderFactory, StaticFileWriter, + BlockWriter, CanonChainTracker, ProviderFactory, StaticFileProviderFactory, + StaticFileWriter, }; use alloy_eips::{BlockHashOrNumber, BlockNumHash, BlockNumberOrTag}; - use alloy_primitives::B256; + use alloy_primitives::{BlockNumber, TxNumber, B256}; use itertools::Itertools; use rand::Rng; use reth_chain_state::{ test_utils::TestBlockBuilder, CanonStateNotification, CanonStateSubscriptions, - ExecutedBlock, NewCanonicalChain, + CanonicalInMemoryState, ExecutedBlock, NewCanonicalChain, }; use reth_chainspec::{ ChainSpec, ChainSpecBuilder, ChainSpecProvider, EthereumHardfork, MAINNET, }; - use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; + use reth_db::{ + models::{AccountBeforeTx, StoredBlockBodyIndices}, + tables, + }; + use reth_db_api::{cursor::DbCursorRO, transaction::DbTx}; + use reth_errors::ProviderError; use reth_execution_types::{Chain, ExecutionOutcome}; use reth_primitives::{ - BlockWithSenders, Receipt, SealedBlock, SealedBlockWithSenders, StaticFileSegment, - TransactionMeta, TransactionSignedNoHash, Withdrawals, + Receipt, SealedBlock, StaticFileSegment, TransactionSignedNoHash, Withdrawals, }; use reth_storage_api::{ BlockHashReader, BlockIdReader, BlockNumReader, BlockReader, BlockReaderIdExt, BlockSource, @@ -1600,9 +1815,27 @@ mod tests { let factory = create_test_provider_factory_with_chain_spec(chain_spec); let provider_rw = factory.database_provider_rw()?; + let static_file_provider = factory.static_file_provider(); + + // Write transactions to static files with the right `tx_num`` + let mut bodies_cursor = provider_rw.tx_ref().cursor_read::()?; + let mut tx_num = bodies_cursor + .seek_exact(database_blocks.first().as_ref().unwrap().number.saturating_sub(1))? + .map(|(_, indices)| indices.next_tx_num()) + .unwrap_or_default(); // Insert blocks into the database for block in &database_blocks { + // TODO: this should be moved inside `insert_historical_block`: + let mut transactions_writer = + static_file_provider.latest_writer(StaticFileSegment::Transactions)?; + transactions_writer.increment_block(block.number)?; + for tx in block.body.transactions() { + let tx: TransactionSignedNoHash = tx.clone().into(); + transactions_writer.append_transaction(tx_num, &tx)?; + tx_num += 1; + } + provider_rw.insert_historical_block( block.clone().seal_with_senders().expect("failed to seal block with senders"), )?; @@ -1616,7 +1849,9 @@ mod tests { .append_receipts_from_blocks( // The initial block number is required database_blocks.first().map(|b| b.number).unwrap_or_default(), - receipts.iter().map(|vec| vec.clone().into_iter().map(Some).collect::>()), + receipts[..database_blocks.len()] + .iter() + .map(|vec| vec.clone().into_iter().map(Some).collect::>()), )?; // Commit to both storages: database and static files @@ -1682,6 +1917,42 @@ mod tests { ) } + /// This will persist the last block in-memory and delete it from + /// `canonical_in_memory_state` right after a database read transaction is created. + /// + /// This simulates a RPC method having a different view than when its database transaction was + /// created. + fn persist_block_after_db_tx_creation( + provider: Arc>, + block_number: BlockNumber, + ) { + let hook_provider = provider.clone(); + provider.database.db_ref().set_post_transaction_hook(Box::new(move || { + if let Some(state) = hook_provider.canonical_in_memory_state.head_state() { + if state.anchor().number + 1 == block_number { + let mut lowest_memory_block = + state.parent_state_chain().last().expect("qed").block(); + let num_hash = lowest_memory_block.block().num_hash(); + + let mut execution_output = (*lowest_memory_block.execution_output).clone(); + execution_output.first_block = lowest_memory_block.block().number; + lowest_memory_block.execution_output = Arc::new(execution_output); + + // Push to disk + let provider_rw = hook_provider.database_provider_rw().unwrap(); + UnifiedStorageWriter::from(&provider_rw, &hook_provider.static_file_provider()) + .save_blocks(&[lowest_memory_block]) + .unwrap(); + UnifiedStorageWriter::commit(provider_rw, hook_provider.static_file_provider()) + .unwrap(); + + // Remove from memory + hook_provider.canonical_in_memory_state.remove_persisted_blocks(num_hash); + } + } + })); + } + #[test] fn test_block_reader_find_block_by_hash() -> eyre::Result<()> { // Initialize random number generator and provider factory @@ -1987,883 +2258,119 @@ mod tests { } #[test] - fn test_block_with_senders_by_hash_in_memory() -> eyre::Result<()> { + fn test_block_hash_reader() -> eyre::Result<()> { let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( + let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( &mut rng, TEST_BLOCKS_COUNT, TEST_BLOCKS_COUNT, BlockRangeParams::default(), )?; - // Get the first in-memory block - let first_in_mem_block = in_memory_blocks.first().unwrap(); - let block_hash = first_in_mem_block.hash(); - - // Get the block with senders by hash and check if it matches the first in-memory block - let block_with_senders = provider - .block_with_senders(BlockHashOrNumber::Hash(block_hash), TransactionVariant::WithHash)? - .unwrap(); - assert_eq!(block_with_senders.block.seal(block_hash), first_in_mem_block.clone()); - assert_eq!(block_with_senders.senders, first_in_mem_block.senders().unwrap()); - - Ok(()) - } + let database_block = database_blocks.first().unwrap().clone(); + let in_memory_block = in_memory_blocks.last().unwrap().clone(); - #[test] - fn test_block_with_senders_by_number_in_memory() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; + assert_eq!(provider.block_hash(database_block.number)?, Some(database_block.hash())); + assert_eq!(provider.block_hash(in_memory_block.number)?, Some(in_memory_block.hash())); - // Get the first in-memory block - let first_in_mem_block = in_memory_blocks.first().unwrap(); - let block_number = first_in_mem_block.number; - - // Get the block with senders by number and check if it matches the first in-memory block - let block_with_senders = provider - .block_with_senders( - BlockHashOrNumber::Number(block_number), - TransactionVariant::WithHash, - )? - .unwrap(); assert_eq!( - block_with_senders.block.seal(first_in_mem_block.hash()), - first_in_mem_block.clone() + provider.canonical_hashes_range(0, 10)?, + [database_blocks, in_memory_blocks] + .concat() + .iter() + .map(|block| block.hash()) + .collect::>() ); - assert_eq!(block_with_senders.senders, first_in_mem_block.senders().unwrap()); Ok(()) } #[test] - fn test_block_with_senders_by_hash_in_database() -> eyre::Result<()> { + fn test_header_provider() -> eyre::Result<()> { let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( + let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( &mut rng, TEST_BLOCKS_COUNT, TEST_BLOCKS_COUNT, BlockRangeParams::default(), )?; - // Get the first database block - let first_db_block = database_blocks.first().unwrap(); - let block_hash = first_db_block.hash(); + let database_block = database_blocks.first().unwrap().clone(); + let in_memory_block = in_memory_blocks.last().unwrap().clone(); + // make sure that the finalized block is on db + let finalized_block = database_blocks.get(database_blocks.len() - 3).unwrap(); + provider.set_finalized(finalized_block.header.clone()); - // Get the block with senders by hash and check if it matches the first database block - let block_with_senders = provider - .block_with_senders(BlockHashOrNumber::Hash(block_hash), TransactionVariant::WithHash)? - .unwrap(); - assert_eq!(block_with_senders.block.seal(block_hash), first_db_block.clone()); - assert_eq!(block_with_senders.senders, first_db_block.senders().unwrap()); + let blocks = [database_blocks, in_memory_blocks].concat(); - Ok(()) - } + assert_eq!( + provider.header_td_by_number(database_block.number)?, + Some(database_block.difficulty) + ); - #[test] - fn test_block_with_senders_by_number_in_database() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; + assert_eq!( + provider.header_td_by_number(in_memory_block.number)?, + Some(in_memory_block.difficulty) + ); - // Get the first database block - let first_db_block = database_blocks.first().unwrap(); - let block_number = first_db_block.number; - - // Get the block with senders by number and check if it matches the first database block - let block_with_senders = provider - .block_with_senders( - BlockHashOrNumber::Number(block_number), - TransactionVariant::WithHash, - )? - .unwrap(); - assert_eq!(block_with_senders.block.seal(first_db_block.hash()), first_db_block.clone()); - assert_eq!(block_with_senders.senders, first_db_block.senders().unwrap()); + assert_eq!( + provider.sealed_headers_while(0..=10, |header| header.number <= 8)?, + blocks + .iter() + .take_while(|header| header.number <= 8) + .map(|b| b.header.clone()) + .collect::>() + ); Ok(()) } - #[test] - fn test_block_with_senders_non_existent_block() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Generate a random hash (non-existent block) - let non_existent_hash = B256::random(); - let result = provider.block_with_senders( - BlockHashOrNumber::Hash(non_existent_hash), - TransactionVariant::WithHash, - )?; - // The block should not be found - assert!(result.is_none()); - - // Generate a random number (non-existent block) - let non_existent_number = 9999; - let result = provider.block_with_senders( - BlockHashOrNumber::Number(non_existent_number), - TransactionVariant::WithHash, - )?; - // The block should not be found - assert!(result.is_none()); + #[tokio::test] + async fn test_canon_state_subscriptions() -> eyre::Result<()> { + let factory = create_test_provider_factory(); - Ok(()) - } + // Generate a random block to initialise the blockchain provider. + let mut test_block_builder = TestBlockBuilder::default(); + let block_1 = test_block_builder.generate_random_block(0, B256::ZERO); + let block_hash_1 = block_1.hash(); - #[test] - fn test_sealed_block_with_senders_by_hash_in_memory() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; + // Insert and commit the block. + let provider_rw = factory.provider_rw()?; + provider_rw.insert_historical_block(block_1)?; + provider_rw.commit()?; - // Get the first in-memory block - let first_in_mem_block = in_memory_blocks.first().unwrap(); - let block_hash = first_in_mem_block.hash(); - - // Get the sealed block with senders by hash and check if it matches the first in-memory - // block - let sealed_block_with_senders = provider - .sealed_block_with_senders( - BlockHashOrNumber::Hash(block_hash), - TransactionVariant::WithHash, - )? - .unwrap(); - assert_eq!(sealed_block_with_senders.block, first_in_mem_block.clone()); - assert_eq!(sealed_block_with_senders.senders, first_in_mem_block.senders().unwrap()); + let provider = BlockchainProvider2::new(factory)?; - Ok(()) - } + // Subscribe twice for canonical state updates. + let in_memory_state = provider.canonical_in_memory_state(); + let mut rx_1 = provider.subscribe_to_canonical_state(); + let mut rx_2 = provider.subscribe_to_canonical_state(); - #[test] - fn test_sealed_block_with_senders_by_number_in_memory() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; + // Send and receive commit notifications. + let block_2 = test_block_builder.generate_random_block(1, block_hash_1); + let chain = Chain::new(vec![block_2], ExecutionOutcome::default(), None); + let commit = CanonStateNotification::Commit { new: Arc::new(chain.clone()) }; + in_memory_state.notify_canon_state(commit.clone()); + let (notification_1, notification_2) = tokio::join!(rx_1.recv(), rx_2.recv()); + assert_eq!(notification_1, Ok(commit.clone())); + assert_eq!(notification_2, Ok(commit.clone())); - // Get the first in-memory block - let first_in_mem_block = in_memory_blocks.first().unwrap(); - let block_number = first_in_mem_block.number; - - // Get the sealed block with senders by number and check if it matches the first in-memory - let sealed_block_with_senders = provider - .sealed_block_with_senders( - BlockHashOrNumber::Number(block_number), - TransactionVariant::WithHash, - )? - .unwrap(); - assert_eq!(sealed_block_with_senders.block, first_in_mem_block.clone()); - assert_eq!(sealed_block_with_senders.senders, first_in_mem_block.senders().unwrap()); + // Send and receive re-org notifications. + let block_3 = test_block_builder.generate_random_block(1, block_hash_1); + let block_4 = test_block_builder.generate_random_block(2, block_3.hash()); + let new_chain = Chain::new(vec![block_3, block_4], ExecutionOutcome::default(), None); + let re_org = + CanonStateNotification::Reorg { old: Arc::new(chain), new: Arc::new(new_chain) }; + in_memory_state.notify_canon_state(re_org.clone()); + let (notification_1, notification_2) = tokio::join!(rx_1.recv(), rx_2.recv()); + assert_eq!(notification_1, Ok(re_org.clone())); + assert_eq!(notification_2, Ok(re_org.clone())); Ok(()) } #[test] - fn test_sealed_block_with_senders_by_hash_in_database() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Get the first database block - let first_db_block = database_blocks.first().unwrap(); - let block_hash = first_db_block.hash(); - - // Get the sealed block with senders by hash and check if it matches the first database - // block - let sealed_block_with_senders = provider - .sealed_block_with_senders( - BlockHashOrNumber::Hash(block_hash), - TransactionVariant::WithHash, - )? - .unwrap(); - assert_eq!(sealed_block_with_senders.block, first_db_block.clone()); - assert_eq!(sealed_block_with_senders.senders, first_db_block.senders().unwrap()); - - Ok(()) - } - - #[test] - fn test_sealed_block_with_senders_by_number_in_database() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Get the first database block - let first_db_block = database_blocks.first().unwrap(); - let block_number = first_db_block.number; - - // Get the sealed block with senders by number and check if it matches the first database - // block - let sealed_block_with_senders = provider - .sealed_block_with_senders( - BlockHashOrNumber::Number(block_number), - TransactionVariant::WithHash, - )? - .unwrap(); - assert_eq!(sealed_block_with_senders.block, first_db_block.clone()); - assert_eq!(sealed_block_with_senders.senders, first_db_block.senders().unwrap()); - - Ok(()) - } - - #[test] - fn test_sealed_block_with_senders_non_existent_block() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Generate a random hash (non-existent block) - let non_existent_hash = B256::random(); - let result = provider.sealed_block_with_senders( - BlockHashOrNumber::Hash(non_existent_hash), - TransactionVariant::WithHash, - )?; - // The block should not be found - assert!(result.is_none()); - - // Generate a random number (non-existent block) - let non_existent_number = 9999; - let result = provider.sealed_block_with_senders( - BlockHashOrNumber::Number(non_existent_number), - TransactionVariant::WithHash, - )?; - // The block should not be found - assert!(result.is_none()); - - Ok(()) - } - - #[test] - fn test_block_range_in_memory_only() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Get the range of in-memory blocks - let start_block_number = in_memory_blocks.first().unwrap().number; - let end_block_number = in_memory_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let blocks = provider.block_range(range)?; - - // Check if the retrieved blocks match the in-memory blocks - assert_eq!(blocks.len(), in_memory_blocks.len()); - // Check if the blocks are equal - for (retrieved_block, expected_block) in blocks.iter().zip(in_memory_blocks.iter()) { - assert_eq!(retrieved_block, &expected_block.clone().unseal()); - } - - // Check for partial in-memory ranges - let blocks = provider.block_range(start_block_number + 1..=end_block_number)?; - assert_eq!(blocks.len(), in_memory_blocks.len() - 1); - for (retrieved_block, expected_block) in blocks.iter().zip(in_memory_blocks.iter().skip(1)) - { - assert_eq!(retrieved_block, &expected_block.clone().unseal()); - } - - let blocks = provider.block_range(start_block_number + 1..=end_block_number - 1)?; - assert_eq!(blocks.len(), in_memory_blocks.len() - 2); - for (retrieved_block, expected_block) in blocks.iter().zip(in_memory_blocks.iter().skip(1)) - { - assert_eq!(retrieved_block, &expected_block.clone().unseal()); - } - - let blocks = provider.block_range(start_block_number + 1..=end_block_number + 1)?; - assert_eq!(blocks.len(), in_memory_blocks.len() - 1); - for (retrieved_block, expected_block) in blocks.iter().zip(in_memory_blocks.iter().skip(1)) - { - assert_eq!(retrieved_block, &expected_block.clone().unseal()); - } - - Ok(()) - } - - #[test] - fn test_block_range_in_database_only() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - 0, // No blocks in memory - BlockRangeParams::default(), - )?; - - // Get the range of database blocks - let start_block_number = database_blocks.first().unwrap().number; - let end_block_number = database_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let blocks = provider.block_range(range)?; - - // Check if the retrieved blocks match the database blocks - assert_eq!(blocks.len(), database_blocks.len()); - // Check if the blocks are equal - for (retrieved_block, expected_block) in blocks.iter().zip(database_blocks.iter()) { - assert_eq!(retrieved_block, &expected_block.clone().unseal()); - } - - Ok(()) - } - - #[test] - fn test_block_range_across_memory_and_database() -> eyre::Result<()> { - let mut rng = generators::rng(); - let mid_point = TEST_BLOCKS_COUNT / 2; - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - mid_point, - TEST_BLOCKS_COUNT - mid_point, - BlockRangeParams::default(), - )?; - - // Get the range of blocks across memory and database - let start_block_number = database_blocks.first().unwrap().number; - let end_block_number = in_memory_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let blocks = provider.block_range(range)?; - - // Check if the retrieved blocks match the database and in-memory blocks - assert_eq!(blocks.len(), TEST_BLOCKS_COUNT); - let all_expected_blocks = - database_blocks.iter().chain(in_memory_blocks.iter()).collect::>(); - // Check if the blocks are equal - for (retrieved_block, expected_block) in blocks.iter().zip(all_expected_blocks.iter()) { - assert_eq!(retrieved_block.clone(), (*expected_block).clone().unseal()); - } - - Ok(()) - } - - #[test] - fn test_block_range_non_existent_range() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Generate a non-existent range - let non_existent_range = 9999..=10000; - let blocks = provider.block_range(non_existent_range)?; - - // The range is non-existent, so the blocks should be empty - assert!(blocks.is_empty()); - - Ok(()) - } - - #[test] - fn test_block_range_partial_overlap() -> eyre::Result<()> { - let mut rng = generators::rng(); - let mid_point = TEST_BLOCKS_COUNT / 2; - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - mid_point, - mid_point, - BlockRangeParams::default(), - )?; - - // Get the range of blocks across memory and database - let start_block_number = database_blocks.last().unwrap().number; - let end_block_number = in_memory_blocks.first().unwrap().number; - - let range = start_block_number..=end_block_number; - let blocks = provider.block_range(range)?; - - assert_eq!(blocks.len(), 2); // Only one block from each side of the overlap - assert_eq!(blocks[0], database_blocks.last().unwrap().clone().unseal()); - assert_eq!(blocks[1], in_memory_blocks.first().unwrap().clone().unseal()); - - Ok(()) - } - - #[test] - fn test_block_with_senders_range_across_memory_and_database() -> eyre::Result<()> { - let mut rng = generators::rng(); - let mid_point = TEST_BLOCKS_COUNT / 2; - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - mid_point, - TEST_BLOCKS_COUNT - mid_point, - BlockRangeParams::default(), - )?; - - // Get the range of blocks across memory and database - let start_block_number = database_blocks.first().unwrap().number; - let end_block_number = in_memory_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let blocks_with_senders = provider.block_with_senders_range(range)?; - - // Check if the retrieved blocks match the database and in-memory blocks - assert_eq!(blocks_with_senders.len(), TEST_BLOCKS_COUNT); - - let all_expected_blocks_with_senders = database_blocks - .iter() - .chain(in_memory_blocks.iter()) - .map(|sealed_block| BlockWithSenders { - block: sealed_block.clone().unseal(), - senders: sealed_block.senders().unwrap(), - }) - .collect::>(); - - // Check if the blocks are equal - for (retrieved_block_with_senders, expected_block_with_senders) in - blocks_with_senders.iter().zip(all_expected_blocks_with_senders.iter()) - { - assert_eq!(retrieved_block_with_senders.block, expected_block_with_senders.block); - assert_eq!(retrieved_block_with_senders.senders, expected_block_with_senders.senders); - } - - Ok(()) - } - - #[test] - fn test_block_with_senders_range_only_in_memory() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Get the range of in-memory blocks - let start_block_number = in_memory_blocks.first().unwrap().number; - let end_block_number = in_memory_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let blocks_with_senders = provider.block_with_senders_range(range)?; - - // Check if the retrieved blocks match the in-memory blocks - assert_eq!(blocks_with_senders.len(), TEST_BLOCKS_COUNT); - - let expected_blocks_with_senders = in_memory_blocks - .iter() - .map(|sealed_block| BlockWithSenders { - block: sealed_block.clone().unseal(), - senders: sealed_block.senders().unwrap(), - }) - .collect::>(); - - // Check if the blocks are equal - for (retrieved_block_with_senders, expected_block_with_senders) in - blocks_with_senders.iter().zip(expected_blocks_with_senders.iter()) - { - assert_eq!(retrieved_block_with_senders.block, expected_block_with_senders.block); - assert_eq!(retrieved_block_with_senders.senders, expected_block_with_senders.senders); - } - - Ok(()) - } - - #[test] - fn test_block_with_senders_range_only_in_database() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - 0, - BlockRangeParams::default(), - )?; - - // Get the range of database blocks - let start_block_number = database_blocks.first().unwrap().number; - let end_block_number = database_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let blocks_with_senders = provider.block_with_senders_range(range)?; - - // Check if the retrieved blocks match the database blocks - assert_eq!(blocks_with_senders.len(), TEST_BLOCKS_COUNT); - - let expected_blocks_with_senders = database_blocks - .iter() - .map(|sealed_block| BlockWithSenders { - block: sealed_block.clone().unseal(), - senders: sealed_block.senders().unwrap(), - }) - .collect::>(); - - // Check if the blocks are equal - for (retrieved_block_with_senders, expected_block_with_senders) in - blocks_with_senders.iter().zip(expected_blocks_with_senders.iter()) - { - assert_eq!(retrieved_block_with_senders.block, expected_block_with_senders.block); - assert_eq!(retrieved_block_with_senders.senders, expected_block_with_senders.senders); - } - - Ok(()) - } - - #[test] - fn test_block_with_senders_range_non_existent_range() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Assuming this range does not exist - let start_block_number = 1000; - let end_block_number = 2000; - - let range = start_block_number..=end_block_number; - let blocks_with_senders = provider.block_with_senders_range(range)?; - - // The range is non-existent, so the blocks should be empty - assert!(blocks_with_senders.is_empty()); - - Ok(()) - } - - #[test] - fn test_sealed_block_with_senders_range_across_memory_and_database() -> eyre::Result<()> { - let mut rng = generators::rng(); - let mid_point = TEST_BLOCKS_COUNT / 2; - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - mid_point, - TEST_BLOCKS_COUNT - mid_point, - BlockRangeParams::default(), - )?; - - // Get the range of blocks across memory and database - let start_block_number = database_blocks.first().unwrap().number; - let end_block_number = in_memory_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let sealed_blocks_with_senders = provider.sealed_block_with_senders_range(range)?; - - // Check if the retrieved blocks match the database and in-memory blocks - assert_eq!(sealed_blocks_with_senders.len(), TEST_BLOCKS_COUNT); - - let all_expected_sealed_blocks_with_senders = database_blocks - .iter() - .chain(in_memory_blocks.iter()) - .map(|sealed_block| SealedBlockWithSenders { - block: sealed_block.clone(), - senders: sealed_block.senders().unwrap(), - }) - .collect::>(); - - // Check if the blocks are equal - for (retrieved_sealed_block_with_senders, expected_sealed_block_with_senders) in - sealed_blocks_with_senders.iter().zip(all_expected_sealed_blocks_with_senders.iter()) - { - assert_eq!( - retrieved_sealed_block_with_senders.block, - expected_sealed_block_with_senders.block - ); - assert_eq!( - retrieved_sealed_block_with_senders.senders, - expected_sealed_block_with_senders.senders - ); - } - - Ok(()) - } - - #[test] - fn test_sealed_block_with_senders_range_only_in_memory() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Get the range of in-memory blocks - let start_block_number = in_memory_blocks.first().unwrap().number; - let end_block_number = in_memory_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let sealed_blocks_with_senders = provider.sealed_block_with_senders_range(range)?; - - // Check if the retrieved blocks match the in-memory blocks - assert_eq!(sealed_blocks_with_senders.len(), TEST_BLOCKS_COUNT); - - let expected_sealed_blocks_with_senders = in_memory_blocks - .iter() - .map(|sealed_block| SealedBlockWithSenders { - block: sealed_block.clone(), - senders: sealed_block.senders().unwrap(), - }) - .collect::>(); - - // Check if the blocks are equal - for (retrieved_sealed_block_with_senders, expected_sealed_block_with_senders) in - sealed_blocks_with_senders.iter().zip(expected_sealed_blocks_with_senders.iter()) - { - assert_eq!( - retrieved_sealed_block_with_senders.block, - expected_sealed_block_with_senders.block - ); - assert_eq!( - retrieved_sealed_block_with_senders.senders, - expected_sealed_block_with_senders.senders - ); - } - - Ok(()) - } - - #[test] - fn test_sealed_block_with_senders_range_only_in_database() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - 0, - BlockRangeParams::default(), - )?; - - // Get the range of database blocks - let start_block_number = database_blocks.first().unwrap().number; - let end_block_number = database_blocks.last().unwrap().number; - - let range = start_block_number..=end_block_number; - let sealed_blocks_with_senders = provider.sealed_block_with_senders_range(range)?; - - // Check if the retrieved blocks match the database blocks - assert_eq!(sealed_blocks_with_senders.len(), TEST_BLOCKS_COUNT); - - let expected_sealed_blocks_with_senders = database_blocks - .iter() - .map(|sealed_block| SealedBlockWithSenders { - block: sealed_block.clone(), - senders: sealed_block.senders().unwrap(), - }) - .collect::>(); - - // Check if the blocks are equal - for (retrieved_sealed_block_with_senders, expected_sealed_block_with_senders) in - sealed_blocks_with_senders.iter().zip(expected_sealed_blocks_with_senders.iter()) - { - assert_eq!( - retrieved_sealed_block_with_senders.block, - expected_sealed_block_with_senders.block - ); - assert_eq!( - retrieved_sealed_block_with_senders.senders, - expected_sealed_block_with_senders.senders - ); - } - - Ok(()) - } - - #[test] - fn test_sealed_block_with_senders_range_non_existent_range() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Assuming this range does not exist - let start_block_number = 1000; - let end_block_number = 2000; - - let range = start_block_number..=end_block_number; - let sealed_blocks_with_senders = provider.sealed_block_with_senders_range(range)?; - - // The range is non-existent, so the blocks should be empty - assert!(sealed_blocks_with_senders.is_empty()); - - Ok(()) - } - - #[test] - fn test_block_hash_reader() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - let database_block = database_blocks.first().unwrap().clone(); - let in_memory_block = in_memory_blocks.last().unwrap().clone(); - - assert_eq!(provider.block_hash(database_block.number)?, Some(database_block.hash())); - assert_eq!(provider.block_hash(in_memory_block.number)?, Some(in_memory_block.hash())); - - assert_eq!( - provider.canonical_hashes_range(0, 10)?, - [database_blocks, in_memory_blocks] - .concat() - .iter() - .map(|block| block.hash()) - .collect::>() - ); - - Ok(()) - } - - #[test] - fn test_header_provider() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - let database_block = database_blocks.first().unwrap().clone(); - let in_memory_block = in_memory_blocks.last().unwrap().clone(); - // make sure that the finalized block is on db - let finalized_block = database_blocks.get(database_blocks.len() - 3).unwrap(); - provider.set_finalized(finalized_block.header.clone()); - - let blocks = [database_blocks, in_memory_blocks].concat(); - - assert_eq!(provider.header(&database_block.hash())?, Some(database_block.header().clone())); - assert_eq!( - provider.header(&in_memory_block.hash())?, - Some(in_memory_block.header().clone()) - ); - - assert_eq!( - provider.header_by_number(database_block.number)?, - Some(database_block.header().clone()) - ); - assert_eq!( - provider.header_by_number(in_memory_block.number)?, - Some(in_memory_block.header().clone()) - ); - - assert_eq!( - provider.header_td_by_number(database_block.number)?, - Some(database_block.difficulty) - ); - assert_eq!( - provider.header_td_by_number(in_memory_block.number)?, - Some(in_memory_block.difficulty) - ); - - assert_eq!( - provider.headers_range(0..=10)?, - blocks.iter().map(|b| b.header().clone()).collect::>() - ); - - assert_eq!( - provider.sealed_header(database_block.number)?, - Some(database_block.header.clone()) - ); - assert_eq!( - provider.sealed_header(in_memory_block.number)?, - Some(in_memory_block.header.clone()) - ); - - assert_eq!( - provider.sealed_headers_range(0..=10)?, - blocks.iter().map(|b| b.header.clone()).collect::>() - ); - - assert_eq!( - provider.sealed_headers_while(0..=10, |header| header.number <= 8)?, - blocks - .iter() - .take_while(|header| header.number <= 8) - .map(|b| b.header.clone()) - .collect::>() - ); - - Ok(()) - } - - #[tokio::test] - async fn test_canon_state_subscriptions() -> eyre::Result<()> { - let factory = create_test_provider_factory(); - - // Generate a random block to initialise the blockchain provider. - let mut test_block_builder = TestBlockBuilder::default(); - let block_1 = test_block_builder.generate_random_block(0, B256::ZERO); - let block_hash_1 = block_1.hash(); - - // Insert and commit the block. - let provider_rw = factory.provider_rw()?; - provider_rw.insert_historical_block(block_1)?; - provider_rw.commit()?; - - let provider = BlockchainProvider2::new(factory)?; - - // Subscribe twice for canonical state updates. - let in_memory_state = provider.canonical_in_memory_state(); - let mut rx_1 = provider.subscribe_to_canonical_state(); - let mut rx_2 = provider.subscribe_to_canonical_state(); - - // Send and receive commit notifications. - let block_2 = test_block_builder.generate_random_block(1, block_hash_1); - let chain = Chain::new(vec![block_2], ExecutionOutcome::default(), None); - let commit = CanonStateNotification::Commit { new: Arc::new(chain.clone()) }; - in_memory_state.notify_canon_state(commit.clone()); - let (notification_1, notification_2) = tokio::join!(rx_1.recv(), rx_2.recv()); - assert_eq!(notification_1, Ok(commit.clone())); - assert_eq!(notification_2, Ok(commit.clone())); - - // Send and receive re-org notifications. - let block_3 = test_block_builder.generate_random_block(1, block_hash_1); - let block_4 = test_block_builder.generate_random_block(2, block_3.hash()); - let new_chain = Chain::new(vec![block_3, block_4], ExecutionOutcome::default(), None); - let re_org = - CanonStateNotification::Reorg { old: Arc::new(chain), new: Arc::new(new_chain) }; - in_memory_state.notify_canon_state(re_org.clone()); - let (notification_1, notification_2) = tokio::join!(rx_1.recv(), rx_2.recv()); - assert_eq!(notification_1, Ok(re_org.clone())); - assert_eq!(notification_2, Ok(re_org.clone())); - - Ok(()) - } - - #[test] - fn test_withdrawals_provider() -> eyre::Result<()> { + fn test_withdrawals_provider() -> eyre::Result<()> { let mut rng = generators::rng(); let chain_spec = Arc::new(ChainSpecBuilder::mainnet().shanghai_activated().build()); let (provider, database_blocks, in_memory_blocks, _) = @@ -3127,37 +2634,6 @@ mod tests { Ok(()) } - #[test] - fn test_receipt_provider() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, receipts) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { tx_count: 1..3, ..Default::default() }, - )?; - - let blocks = [database_blocks, in_memory_blocks].concat(); - - for block in blocks { - let block_number = block.number as usize; - for (txn_number, _) in block.body.transactions.iter().enumerate() { - let txn_hash = block.body.transactions.get(txn_number).unwrap().hash(); - let txn_id = provider.transaction_id(txn_hash)?.unwrap(); - assert_eq!( - provider.receipt(txn_id)?.unwrap(), - receipts.get(block_number).unwrap().clone().get(txn_number).unwrap().clone() - ); - assert_eq!( - provider.receipt_by_hash(txn_hash)?.unwrap(), - receipts.get(block_number).unwrap().clone().get(txn_number).unwrap().clone() - ); - } - } - - Ok(()) - } - #[test] fn test_receipt_provider_id_ext_receipts_by_block_id() -> eyre::Result<()> { let mut rng = generators::rng(); @@ -3539,766 +3015,725 @@ mod tests { ); // test state by block tag for finalized block let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); - in_memory_provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); - assert_eq!( - finalized_block.hash(), - in_memory_provider - .state_by_block_number_or_tag(BlockNumberOrTag::Finalized)? - .block_hash(finalized_block.number)? - .unwrap() - ); - // test state by block tag for earliest block - let earliest_block = blocks.first().unwrap().clone(); - assert_eq!( - earliest_block.hash(), - only_database_provider - .state_by_block_number_or_tag(BlockNumberOrTag::Earliest)? - .block_hash(earliest_block.number)? - .unwrap() - ); - - Ok(()) - } - - #[test] - fn test_canon_state_tracker() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, _, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - let before = Instant::now(); - provider.on_forkchoice_update_received(&Default::default()); - let last_update_ts = provider.last_received_update_timestamp().unwrap(); - let after = Instant::now(); - - // Ensure the timestamp is updated and between the before and after timestamps - assert!(before < last_update_ts && last_update_ts < after); - - let before = Instant::now(); - provider.on_transition_configuration_exchanged(); - let last_update_ts = provider.last_exchanged_transition_configuration_timestamp().unwrap(); - let after = Instant::now(); - - // Ensure the timestamp is updated and between the before and after timestamps - assert!(before < last_update_ts && last_update_ts < after); - - Ok(()) - } - - #[test] - fn test_block_id_reader() -> eyre::Result<()> { - // Create a new provider - let mut rng = generators::rng(); - let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams::default(), - )?; - - // Set the pending block in memory - let pending_block = in_memory_blocks.last().unwrap(); - provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { - block: Arc::new(pending_block.clone()), - senders: Default::default(), - execution_output: Default::default(), - hashed_state: Default::default(), - trie: Default::default(), - }); - - // Set the safe block in memory - let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); - provider.canonical_in_memory_state.set_safe(safe_block.header.clone()); - - // Set the finalized block in memory - let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); - provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); - - // Verify the pending block number and hash - assert_eq!( - provider.pending_block_num_hash()?, - Some(BlockNumHash { number: pending_block.number, hash: pending_block.hash() }) - ); - - // Verify the safe block number and hash - assert_eq!( - provider.safe_block_num_hash()?, - Some(BlockNumHash { number: safe_block.number, hash: safe_block.hash() }) - ); - - // Verify the finalized block number and hash - assert_eq!( - provider.finalized_block_num_hash()?, - Some(BlockNumHash { number: finalized_block.number, hash: finalized_block.hash() }) - ); - - Ok(()) - } - - #[test] - fn test_transaction_id() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, - )?; - - // Database - // Choose a random transaction from the database blocks - let tx = &database_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Ensure the transaction ID can be found in the database - let result = provider.transaction_id(tx_hash)?; - assert_eq!(result, Some(0)); - - // In memory - // Choose a random transaction from the in-memory blocks - let tx = &in_memory_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Ensure the transaction ID can be found in the in-memory state - let result = provider.transaction_id(tx_hash)?; - assert!(result.is_some(), "Transaction ID should be found in the in-memory state"); - - // Check that the transaction ID is greater than the last database transaction ID - let last_db_tx_id = provider.database.last_block_number()?; - let last_db_tx_id = - provider.database.block_body_indices(last_db_tx_id)?.unwrap().last_tx_num(); - - assert!( - result.unwrap() > last_db_tx_id, - "In-memory transaction ID should be greater than the last database transaction ID" - ); - assert_eq!(result, Some(last_db_tx_id + 1)); - - // Generate a random hash not present in any transaction - let random_tx_hash = B256::random(); - - // Ensure the transaction ID is not found - let result = provider.transaction_id(random_tx_hash)?; - assert!(result.is_none(), "Transaction ID should not be found"); - - Ok(()) - } - - #[test] - fn test_transaction_by_id() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, - )?; - - // In memory - // Choose a random transaction ID from in-memory blocks - let tx = &in_memory_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Fetch the transaction ID - let tx_id = provider.transaction_id(tx_hash)?.unwrap(); - - // Ensure the transaction can be retrieved by its ID - let result = provider.transaction_by_id(tx_id)?; - assert_eq!( - result.unwrap(), - *tx, - "The retrieved transaction should match the expected transaction" - ); - - // Database - // Choose a random transaction ID from the database blocks - let tx = &database_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Fetch the transaction ID - let tx_id = provider.transaction_id(tx_hash)?.unwrap(); - - // Ensure the transaction can be retrieved by its ID - let result = provider.transaction_by_id(tx_id)?; - assert!(result.is_some(), "Transaction should be found in the database"); - assert_eq!( - result.unwrap(), - *tx, - "The retrieved transaction should match the expected transaction" - ); - - // Generate a random transaction ID not present in any block - let random_tx_id = 999999; - - // Ensure the transaction is not found - let result = provider.transaction_by_id(random_tx_id)?; - assert!(result.is_none(), "Transaction should not be found"); - - Ok(()) - } - - #[test] - fn test_transaction_by_id_no_hash() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, - )?; - - // In memory - // Choose a random transaction ID from in-memory blocks - let tx = &in_memory_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Fetch the transaction ID - let tx_id = provider.transaction_id(tx_hash)?.unwrap(); - - // Ensure the transaction can be retrieved by its ID without hash - let result = provider.transaction_by_id_no_hash(tx_id)?; - let expected_tx: TransactionSignedNoHash = tx.clone().into(); - assert_eq!( - result.unwrap(), - expected_tx, - "The retrieved transaction without hash should match the expected transaction" - ); - - // Database - // Choose a random transaction ID from the database blocks - let tx = &database_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Fetch the transaction ID - let tx_id = provider.transaction_id(tx_hash)?.unwrap(); - - // Ensure the transaction can be retrieved by its ID without hash - let result = provider.transaction_by_id_no_hash(tx_id)?; - let expected_tx: TransactionSignedNoHash = tx.clone().into(); - assert_eq!( - result.unwrap(), - expected_tx, - "The retrieved transaction without hash should match the expected transaction" - ); - - // Generate a random transaction ID not present in any block - let random_tx_id = 7656898; - - // Ensure the transaction is not found without hash - let result = provider.transaction_by_id_no_hash(random_tx_id)?; - assert!(result.is_none(), "Transaction should not be found without hash"); - - Ok(()) - } - - #[test] - fn test_transaction_by_hash() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, - )?; - - // In memory - // Choose a random transaction hash from the in-memory blocks - let tx = &in_memory_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Ensure the transaction can be retrieved by its hash from the in-memory state - let result = provider.transaction_by_hash(tx_hash)?; - assert_eq!( - result.unwrap(), - *tx, - "The retrieved transaction should match the expected transaction" - ); - - // Database - // Choose a random transaction hash from the database blocks - let tx = &database_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Ensure the transaction can be retrieved by its hash from the database - let result = provider.transaction_by_hash(tx_hash)?; - assert_eq!( - result.unwrap(), - *tx, - "The retrieved transaction should match the expected transaction" - ); - - // Generate a random hash not present in any transaction - let random_tx_hash = B256::random(); - - // Ensure the transaction is not found by the random hash - let result = provider.transaction_by_hash(random_tx_hash)?; - assert!(result.is_none(), "Transaction should not be found"); - - Ok(()) - } - - #[test] - fn test_transaction_by_hash_with_meta() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, - )?; - - // In memory - // Choose a random transaction from the in-memory block - let tx = &in_memory_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Create the expected metadata for this transaction - let meta = TransactionMeta { - tx_hash, - index: 0, - block_hash: in_memory_blocks[0].header.hash(), - block_number: in_memory_blocks[0].header.number, - base_fee: in_memory_blocks[0].header.base_fee_per_gas, - excess_blob_gas: None, - timestamp: in_memory_blocks[0].header.timestamp, - }; - - // Ensure the transaction and its metadata can be retrieved from the in-memory state - let result = provider.transaction_by_hash_with_meta(tx_hash)?; - let (retrieved_tx, retrieved_meta) = result.unwrap(); - assert_eq!( - retrieved_tx, *tx, - "The retrieved transaction should match the expected transaction" - ); - assert_eq!( - retrieved_meta, meta, - "The retrieved metadata should match the expected metadata" - ); - - // Database - // Choose a random transaction from the database blocks - let tx = &database_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Create the expected metadata for this transaction - let meta = TransactionMeta { - tx_hash, - index: 0, - block_hash: database_blocks[0].header.hash(), - block_number: database_blocks[0].header.number, - base_fee: database_blocks[0].header.base_fee_per_gas, - excess_blob_gas: None, - timestamp: database_blocks[0].header.timestamp, - }; - - // Ensure the transaction and its metadata can be retrieved from the database - let result = provider.transaction_by_hash_with_meta(tx_hash)?; - let (retrieved_tx, retrieved_meta) = result.unwrap(); + in_memory_provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); assert_eq!( - retrieved_tx, *tx, - "The retrieved transaction should match the expected transaction" + finalized_block.hash(), + in_memory_provider + .state_by_block_number_or_tag(BlockNumberOrTag::Finalized)? + .block_hash(finalized_block.number)? + .unwrap() ); + // test state by block tag for earliest block + let earliest_block = blocks.first().unwrap().clone(); assert_eq!( - retrieved_meta, meta, - "The retrieved metadata should match the expected metadata" + earliest_block.hash(), + only_database_provider + .state_by_block_number_or_tag(BlockNumberOrTag::Earliest)? + .block_hash(earliest_block.number)? + .unwrap() ); - // Generate a random hash not present in any transaction - let random_tx_hash = B256::random(); - - // Ensure the transaction with metadata is not found by the random hash - let result = provider.transaction_by_hash_with_meta(random_tx_hash)?; - assert!(result.is_none(), "Transaction with metadata should not be found"); - Ok(()) } #[test] - fn test_transaction_block() -> eyre::Result<()> { + fn test_canon_state_tracker() -> eyre::Result<()> { let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( + let (provider, _, _, _) = provider_with_random_blocks( &mut rng, TEST_BLOCKS_COUNT, TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, + BlockRangeParams::default(), )?; - // In memory - // Choose a random transaction ID from in-memory blocks - let tx = &in_memory_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Fetch the transaction ID - let tx_id = provider.transaction_id(tx_hash)?.unwrap(); - - // Retrieve the block number for this transaction - let result = provider.transaction_block(tx_id)?; - let block_number = result.unwrap(); - - // Ensure the block number matches the expected block number - assert_eq!( - block_number, in_memory_blocks[0].header.number, - "The block number should match the in-memory block number" - ); - - // Database - // Choose a random transaction from the database block - let tx = &database_blocks[0].body.transactions[0]; - let tx_hash = tx.hash(); - - // Fetch the transaction ID - let tx_id = provider.transaction_id(tx_hash)?.unwrap(); + let before = Instant::now(); + provider.on_forkchoice_update_received(&Default::default()); + let last_update_ts = provider.last_received_update_timestamp().unwrap(); + let after = Instant::now(); - // Retrieve the block number for this transaction - let result = provider.transaction_block(tx_id)?; - assert_eq!(Some(0), result, "The block number should match the database block number"); + // Ensure the timestamp is updated and between the before and after timestamps + assert!(before < last_update_ts && last_update_ts < after); - // Ensure that invalid transaction ID returns None - let result = provider.transaction_block(67675657)?; + let before = Instant::now(); + provider.on_transition_configuration_exchanged(); + let last_update_ts = provider.last_exchanged_transition_configuration_timestamp().unwrap(); + let after = Instant::now(); - assert!(result.is_none(), "Block number should not be found for an invalid transaction ID"); + // Ensure the timestamp is updated and between the before and after timestamps + assert!(before < last_update_ts && last_update_ts < after); Ok(()) } #[test] - fn transactions_found_by_block_hash() -> eyre::Result<()> { + fn test_block_id_reader() -> eyre::Result<()> { + // Create a new provider let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( + let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( &mut rng, TEST_BLOCKS_COUNT, TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, + BlockRangeParams::default(), )?; - // In memory - // Choose a random block hash from in-memory blocks - let block_hash = in_memory_blocks[0].header.hash(); + // Set the pending block in memory + let pending_block = in_memory_blocks.last().unwrap(); + provider.canonical_in_memory_state.set_pending_block(ExecutedBlock { + block: Arc::new(pending_block.clone()), + senders: Default::default(), + execution_output: Default::default(), + hashed_state: Default::default(), + trie: Default::default(), + }); + + // Set the safe block in memory + let safe_block = in_memory_blocks[in_memory_blocks.len() - 2].clone(); + provider.canonical_in_memory_state.set_safe(safe_block.header.clone()); - // Retrieve the transactions for this block using the block hash - let result = provider.transactions_by_block(BlockHashOrNumber::Hash(block_hash))?; - let transactions = result.unwrap(); + // Set the finalized block in memory + let finalized_block = in_memory_blocks[in_memory_blocks.len() - 3].clone(); + provider.canonical_in_memory_state.set_finalized(finalized_block.header.clone()); - // Ensure the transactions match the expected transactions in the block + // Verify the pending block number and hash assert_eq!( - transactions, in_memory_blocks[0].body.transactions, - "The transactions should match the in-memory block transactions" + provider.pending_block_num_hash()?, + Some(BlockNumHash { number: pending_block.number, hash: pending_block.hash() }) ); - // Database - // Choose a random block hash from the database blocks - let block_hash = database_blocks[0].header.hash(); - - // Retrieve the transactions for this block using the block hash - let result = provider.transactions_by_block(BlockHashOrNumber::Hash(block_hash))?; - let transactions = result.unwrap(); + // Verify the safe block number and hash + assert_eq!( + provider.safe_block_num_hash()?, + Some(BlockNumHash { number: safe_block.number, hash: safe_block.hash() }) + ); - // Ensure the transactions match the expected transactions in the block + // Verify the finalized block number and hash assert_eq!( - transactions, database_blocks[0].body.transactions, - "The transactions should match the database block transactions" + provider.finalized_block_num_hash()?, + Some(BlockNumHash { number: finalized_block.number, hash: finalized_block.hash() }) ); - // Generate a random block hash that does not exist - let random_block_hash = B256::random(); + Ok(()) + } - // Try to retrieve transactions for a non-existent block hash - let result = provider.transactions_by_block(BlockHashOrNumber::Hash(random_block_hash))?; + macro_rules! test_by_tx_range { + ([$(($method:ident, $data_extractor:expr)),* $(,)?]) => {{ - // Ensure no transactions are found - assert!(result.is_none(), "No transactions should be found for a non-existent block hash"); + // Get the number methods being tested. + // Since each method tested will move a block from memory to storage, this ensures we have enough. + let extra_blocks = [$(stringify!($method)),*].len(); - Ok(()) - } + let mut rng = generators::rng(); + let (provider, mut database_blocks, mut in_memory_blocks, receipts) = provider_with_random_blocks( + &mut rng, + TEST_BLOCKS_COUNT, + TEST_BLOCKS_COUNT + extra_blocks, + BlockRangeParams { + tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, + ..Default::default() + }, + )?; + let provider = Arc::new(provider); - #[test] - fn transactions_found_by_block_number() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, - )?; + $( + // Since data moves for each tried method, need to recalculate everything + let db_tx_count = + database_blocks.iter().map(|b| b.body.transactions.len()).sum::() as u64; + let in_mem_tx_count = + in_memory_blocks.iter().map(|b| b.body.transactions.len()).sum::() as u64; - // In memory - // Choose a random block number from in-memory blocks - let block_number = in_memory_blocks[0].header.number; + let db_range = 0..=(db_tx_count - 1); + let in_mem_range = db_tx_count..=(in_mem_tx_count + db_range.end()); - // Retrieve the transactions for this block using the block number - let result = provider.transactions_by_block(BlockHashOrNumber::Number(block_number))?; - let transactions = result.unwrap(); + // Retrieve the expected database data + let database_data = + database_blocks.iter().flat_map(|b| $data_extractor(b, &receipts)).collect::>(); + assert_eq!(provider.$method(db_range.clone())?, database_data, "full db data"); - // Ensure the transactions match the expected transactions in the block - assert_eq!( - transactions, in_memory_blocks[0].body.transactions, - "The transactions should match the in-memory block transactions" - ); + // Retrieve the expected in-memory data + let in_memory_data = + in_memory_blocks.iter().flat_map(|b| $data_extractor(b, &receipts)).collect::>(); + assert_eq!(provider.$method(in_mem_range.clone())?, in_memory_data, "full mem data"); - // Database - // Choose a random block number from the database blocks - let block_number = database_blocks[0].header.number; + // Test partial in-memory range + assert_eq!( + &provider.$method(in_mem_range.start() + 1..=in_mem_range.end() - 1)?, + &in_memory_data[1..in_memory_data.len() - 1], + "partial mem data" + ); - // Retrieve the transactions for this block using the block number - let result = provider.transactions_by_block(BlockHashOrNumber::Number(block_number))?; - let transactions = result.unwrap(); + // Test range in in-memory to unbounded end + assert_eq!(provider.$method(in_mem_range.start() + 1..)?, &in_memory_data[1..], "unbounded mem data"); - // Ensure the transactions match the expected transactions in the block - assert_eq!( - transactions, database_blocks[0].body.transactions, - "The transactions should match the database block transactions" - ); + // Test last element in-memory + assert_eq!(provider.$method(in_mem_range.end()..)?, &in_memory_data[in_memory_data.len() -1 ..], "last mem data"); - // Generate a block number that is out of range (non-existent) - let non_existent_block_number = u64::MAX; + // Test range that spans database and in-memory with unbounded end + assert_eq!( + provider.$method(in_mem_range.start() - 2..)?, + database_data[database_data.len() - 2..] + .iter() + .chain(&in_memory_data[..]) + .cloned() + .collect::>(), + "unbounded span data" + ); - // Try to retrieve transactions for a non-existent block number - let result = - provider.transactions_by_block(BlockHashOrNumber::Number(non_existent_block_number))?; + // Test range that spans database and in-memory + #[allow(unused_assignments)] + { + // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); + + assert_eq!( + provider.$method(in_mem_range.start() - 2..=in_mem_range.end() - 1)?, + database_data[database_data.len() - 2..] + .iter() + .chain(&in_memory_data[..in_memory_data.len() - 1]) + .cloned() + .collect::>(), + "span data" + ); + + // Adjust our blocks accordingly + database_blocks.push(in_memory_blocks.remove(0)); + } - // Ensure no transactions are found - assert!( - result.is_none(), - "No transactions should be found for a non-existent block number" - ); + // Test invalid range + let start_tx_num = u64::MAX; + let end_tx_num = u64::MAX; + let result = provider.$method(start_tx_num..end_tx_num)?; + assert!(result.is_empty(), "No data should be found for an invalid transaction range"); - Ok(()) + // Test empty range + let result = provider.$method(in_mem_range.end()+10..in_mem_range.end()+20)?; + assert!(result.is_empty(), "No data should be found for an empty transaction range"); + )* + }}; } #[test] - fn transactions_found_entirely_in_memory() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, - )?; + fn test_methods_by_tx_range() -> eyre::Result<()> { + test_by_tx_range!([ + (senders_by_tx_range, |block: &SealedBlock, _: &Vec>| block + .senders() + .unwrap()), + (transactions_by_tx_range, |block: &SealedBlock, _: &Vec>| block + .body + .transactions + .iter() + .map(|tx| Into::::into(tx.clone())) + .collect::>()), + (receipts_by_tx_range, |block: &SealedBlock, receipts: &Vec>| receipts + [block.number as usize] + .clone()) + ]); + + Ok(()) + } - // In memory - // Define a block range entirely within in-memory blocks - let start_block = in_memory_blocks[0].header.number; - let end_block = in_memory_blocks[1].header.number; + macro_rules! test_by_block_range { + ([$(($method:ident, $data_extractor:expr)),* $(,)?]) => {{ + // Get the number methods being tested. + // Since each method tested will move a block from memory to storage, this ensures we have enough. + let extra_blocks = [$(stringify!($method)),*].len(); - // Retrieve the transactions for this block range - let result = provider.transactions_by_block_range(start_block..=end_block)?; + let mut rng = generators::rng(); + let (provider, mut database_blocks, mut in_memory_blocks, _) = provider_with_random_blocks( + &mut rng, + TEST_BLOCKS_COUNT, + TEST_BLOCKS_COUNT + extra_blocks, + BlockRangeParams { + tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, + ..Default::default() + }, + )?; + let provider = Arc::new(provider); - // Ensure the transactions match the expected transactions in the in-memory blocks - assert_eq!(result.len(), 2); - assert_eq!(result[0], in_memory_blocks[0].body.transactions); - assert_eq!(result[1], in_memory_blocks[1].body.transactions); + $( + // Since data moves for each tried method, need to recalculate everything + let db_block_count = database_blocks.len() as u64; + let in_mem_block_count = in_memory_blocks.len() as u64; - // Database - // Define a block range entirely within database blocks - let start_block = database_blocks[0].header.number; - let end_block = database_blocks[1].header.number; + let db_range = 0..=db_block_count - 1; + let in_mem_range = db_block_count..=(in_mem_block_count + db_range.end()); - // Retrieve the transactions for this block range - let result = provider.transactions_by_block_range(start_block..=end_block)?; + // Retrieve the expected database data + let database_data = + database_blocks.iter().map(|b| $data_extractor(b)).collect::>(); + assert_eq!(provider.$method(db_range.clone())?, database_data); - // Ensure the transactions match the expected transactions in the database blocks - assert_eq!(result.len(), 2); - assert_eq!(result[0], database_blocks[0].body.transactions); - assert_eq!(result[1], database_blocks[1].body.transactions); + // Retrieve the expected in-memory data + let in_memory_data = + in_memory_blocks.iter().map(|b| $data_extractor(b)).collect::>(); + assert_eq!(provider.$method(in_mem_range.clone())?, in_memory_data); - Ok(()) - } + // Test partial in-memory range + assert_eq!( + &provider.$method(in_mem_range.start() + 1..=in_mem_range.end() - 1)?, + &in_memory_data[1..in_memory_data.len() - 1] + ); - #[test] - fn test_transactions_by_tx_range() -> eyre::Result<()> { - let mut rng = generators::rng(); - let (provider, database_blocks, _, _) = provider_with_random_blocks( - &mut rng, - TEST_BLOCKS_COUNT, - 0, - BlockRangeParams { - tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, - ..Default::default() - }, - )?; + // Test range that spans database and in-memory + { - // Define a valid transaction range within the database - let start_tx_num = 0; - let end_tx_num = 1; + // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); - // Retrieve the transactions for this transaction number range - let result = provider.transactions_by_tx_range(start_tx_num..=end_tx_num)?; + assert_eq!( + provider.$method(in_mem_range.start() - 2..=in_mem_range.end() - 1)?, + database_data[database_data.len() - 2..] + .iter() + .chain(&in_memory_data[..in_memory_data.len() - 1]) + .cloned() + .collect::>() + ); - // Ensure the transactions match the expected transactions in the database - assert_eq!(result.len(), 2); - assert_eq!(result[0], database_blocks[0].body.transactions[0].clone().into()); - assert_eq!(result[1], database_blocks[0].body.transactions[1].clone().into()); + // Adjust our blocks accordingly + database_blocks.push(in_memory_blocks.remove(0)); + } - // Define an empty range that should return no transactions - let start_tx_num = u64::MAX; - let end_tx_num = u64::MAX; + // Test invalid range + let start_block_num = u64::MAX; + let end_block_num = u64::MAX; + let result = provider.$method(start_block_num..=end_block_num-1)?; + assert!(result.is_empty(), "No data should be found for an invalid block range"); - // Retrieve the transactions for this range - let result = provider.transactions_by_tx_range(start_tx_num..end_tx_num)?; + // Test valid range with empty results + let result = provider.$method(in_mem_range.end() + 10..=in_mem_range.end() + 20)?; + assert!(result.is_empty(), "No data should be found for an empty block range"); + )* + }}; + } - // Ensure no transactions are returned - assert!( - result.is_empty(), - "No transactions should be found for an empty transaction range" - ); + #[test] + fn test_methods_by_block_range() -> eyre::Result<()> { + // todo(joshie) add canonical_hashes_range below after changing its interface into range + // instead start end + test_by_block_range!([ + (headers_range, |block: &SealedBlock| block.header().clone()), + (sealed_headers_range, |block: &SealedBlock| block.header.clone()), + (block_range, |block: &SealedBlock| block.clone().unseal()), + (block_with_senders_range, |block: &SealedBlock| block + .clone() + .unseal() + .with_senders_unchecked(vec![])), + (sealed_block_with_senders_range, |block: &SealedBlock| block + .clone() + .with_senders_unchecked(vec![])), + (transactions_by_block_range, |block: &SealedBlock| block.body.transactions.clone()), + ]); Ok(()) } - #[test] - fn test_senders_by_tx_range() -> eyre::Result<()> { + /// Helper macro to call a provider method based on argument count and check its result + macro_rules! call_method { + ($provider:expr, $method:ident, ($($args:expr),*), $expected_item:expr) => {{ + let result = $provider.$method($($args),*)?; + assert_eq!( + result, + $expected_item, + "{}: item does not match the expected item for arguments {:?}", + stringify!($method), + ($($args),*) + ); + }}; + + // Handle valid or invalid arguments for one argument + (ONE, $provider:expr, $method:ident, $item_extractor:expr, $txnum:expr, $txhash:expr, $block:expr, $receipts:expr) => {{ + let (arg, expected_item) = $item_extractor($block, $txnum($block), $txhash($block), $receipts); + call_method!($provider, $method, (arg), expected_item); + }}; + + // Handle valid or invalid arguments for two arguments + (TWO, $provider:expr, $method:ident, $item_extractor:expr, $txnum:expr, $txhash:expr, $block:expr, $receipts:expr) => {{ + let ((arg1, arg2), expected_item) = $item_extractor($block, $txnum($block), $txhash($block), $receipts); + call_method!($provider, $method, (arg1, arg2), expected_item); + }}; + } + + /// Macro to test non-range methods. + /// + /// ( `NUMBER_ARGUMENTS`, METHOD, FN -> ((`METHOD_ARGUMENT(s)`,...), `EXPECTED_RESULT`), + /// `INVALID_ARGUMENTS`) + macro_rules! test_non_range { + ([$(($arg_count:ident, $method:ident, $item_extractor:expr, $invalid_args:expr)),* $(,)?]) => {{ + + // Get the number methods being tested. + // Since each method tested will move a block from memory to storage, this ensures we have enough. + let extra_blocks = [$(stringify!($arg_count)),*].len(); + let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( + let (provider, mut database_blocks, in_memory_blocks, receipts) = provider_with_random_blocks( &mut rng, TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, + TEST_BLOCKS_COUNT + extra_blocks, BlockRangeParams { tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, ..Default::default() }, )?; + let provider = Arc::new(provider); - let db_tx_count = - database_blocks.iter().map(|b| b.body.transactions.len()).sum::() as u64; - let in_mem_tx_count = - in_memory_blocks.iter().map(|b| b.body.transactions.len()).sum::() as u64; + let mut in_memory_blocks: std::collections::VecDeque<_> = in_memory_blocks.into(); - let db_range = 0..=(db_tx_count - 1); - let in_mem_range = db_tx_count..=(in_mem_tx_count + db_range.end()); + $( + let tx_hash = |block: &SealedBlock| block.body.transactions[0].hash(); + let tx_num = |block: &SealedBlock| { + database_blocks + .iter() + .chain(in_memory_blocks.iter()) + .take_while(|b| b.number < block.number) + .map(|b| b.body.transactions.len()) + .sum::() as u64 + }; - // Retrieve the senders for the whole database range - let database_senders = - database_blocks.iter().flat_map(|b| b.senders().unwrap()).collect::>(); - assert_eq!(provider.senders_by_tx_range(db_range)?, database_senders); + // Ensure that the first generated in-memory block exists + { + // This block will be persisted to disk and removed from memory AFTER the firsk database query. This ensures that we query the in-memory state before the database avoiding any race condition. + persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); - // Retrieve the senders for the whole in-memory range - let in_memory_senders = - in_memory_blocks.iter().flat_map(|b| b.senders().unwrap()).collect::>(); - assert_eq!(provider.senders_by_tx_range(in_mem_range.clone())?, in_memory_senders); + call_method!($arg_count, provider, $method, $item_extractor, tx_num, tx_hash, &in_memory_blocks[0], &receipts); - // Retrieve the senders for a partial in-memory range - assert_eq!( - &provider.senders_by_tx_range(in_mem_range.start() + 1..=in_mem_range.end() - 1)?, - &in_memory_senders[1..in_memory_senders.len() - 1] - ); + // Move the block as well in our own structures + database_blocks.push(in_memory_blocks.pop_front().unwrap()); + } - // Retrieve the senders for a range that spans database and in-memory - assert_eq!( - provider.senders_by_tx_range(in_mem_range.start() - 2..=in_mem_range.end() - 1)?, - database_senders[database_senders.len() - 2..] - .iter() - .chain(&in_memory_senders[..in_memory_senders.len() - 1]) - .copied() - .collect::>() - ); + // database_blocks is changed above + let tx_num = |block: &SealedBlock| { + database_blocks + .iter() + .chain(in_memory_blocks.iter()) + .take_while(|b| b.number < block.number) + .map(|b| b.body.transactions.len()) + .sum::() as u64 + }; + + // Invalid/Non-existent argument should return `None` + { + call_method!($arg_count, provider, $method, |_,_,_,_| ( ($invalid_args, None)), tx_num, tx_hash, &in_memory_blocks[0], &receipts); + } - // Define an empty range that should return no sender addresses - let start_tx_num = u64::MAX; - let end_tx_num = u64::MAX; + // Check that the item is only in memory and not in database + { + let last_mem_block = &in_memory_blocks[in_memory_blocks.len() - 1]; - // Retrieve the senders for this range - let result = provider.senders_by_tx_range(start_tx_num..end_tx_num)?; + let (args, expected_item) = $item_extractor(last_mem_block, tx_num(last_mem_block), tx_hash(last_mem_block), &receipts); + call_method!($arg_count, provider, $method, |_,_,_,_| (args.clone(), expected_item), tx_num, tx_hash, last_mem_block, &receipts); - // Ensure no sender addresses are returned - assert!( - result.is_empty(), - "No sender addresses should be found for an empty transaction range" - ); + // Ensure the item is not in storage + call_method!($arg_count, provider.database, $method, |_,_,_,_| ( (args, None)), tx_num, tx_hash, last_mem_block, &receipts); + } + )* + }}; +} + + #[test] + fn test_non_range_methods() -> eyre::Result<()> { + let test_tx_index = 0; + + test_non_range!([ + // TODO: header should use B256 like others instead of &B256 + // ( + // ONE, + // header, + // |block: &SealedBlock, tx_num: TxNumber, tx_hash: B256, receipts: &Vec>| (&block.hash(), Some(block.header.header().clone())), + // (&B256::random()) + // ), + ( + ONE, + header_by_number, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + block.number, + Some(block.header.header().clone()) + ), + u64::MAX + ), + ( + ONE, + sealed_header, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + block.number, + Some(block.header.clone()) + ), + u64::MAX + ), + ( + ONE, + block_hash, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + block.number, + Some(block.hash()) + ), + u64::MAX + ), + ( + ONE, + block_number, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + block.hash(), + Some(block.number) + ), + B256::random() + ), + ( + ONE, + block, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + BlockHashOrNumber::Hash(block.hash()), + Some(block.clone().unseal()) + ), + BlockHashOrNumber::Hash(B256::random()) + ), + ( + ONE, + block, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + BlockHashOrNumber::Number(block.number), + Some(block.clone().unseal()) + ), + BlockHashOrNumber::Number(u64::MAX) + ), + ( + ONE, + block_body_indices, + |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( + block.number, + Some(StoredBlockBodyIndices { + first_tx_num: tx_num, + tx_count: block.body.transactions.len() as u64 + }) + ), + u64::MAX + ), + ( + TWO, + block_with_senders, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), + block.clone().unseal().with_recovered_senders() + ), + (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) + ), + ( + TWO, + block_with_senders, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), + block.clone().unseal().with_recovered_senders() + ), + (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) + ), + ( + TWO, + sealed_block_with_senders, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + (BlockHashOrNumber::Number(block.number), TransactionVariant::WithHash), + Some( + block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + ) + ), + (BlockHashOrNumber::Number(u64::MAX), TransactionVariant::WithHash) + ), + ( + TWO, + sealed_block_with_senders, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + (BlockHashOrNumber::Hash(block.hash()), TransactionVariant::WithHash), + Some( + block.clone().unseal().with_recovered_senders().unwrap().seal(block.hash()) + ) + ), + (BlockHashOrNumber::Hash(B256::random()), TransactionVariant::WithHash) + ), + ( + ONE, + transaction_id, + |_: &SealedBlock, tx_num: TxNumber, tx_hash: B256, _: &Vec>| ( + tx_hash, + Some(tx_num) + ), + B256::random() + ), + ( + ONE, + transaction_by_id, + |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( + tx_num, + Some(block.body.transactions[test_tx_index].clone()) + ), + u64::MAX + ), + ( + ONE, + transaction_by_id_no_hash, + |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( + tx_num, + Some(Into::::into( + block.body.transactions[test_tx_index].clone() + )) + ), + u64::MAX + ), + ( + ONE, + transaction_by_hash, + |block: &SealedBlock, _: TxNumber, tx_hash: B256, _: &Vec>| ( + tx_hash, + Some(block.body.transactions[test_tx_index].clone()) + ), + B256::random() + ), + ( + ONE, + transaction_block, + |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( + tx_num, + Some(block.number) + ), + u64::MAX + ), + ( + ONE, + transactions_by_block, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + BlockHashOrNumber::Number(block.number), + Some(block.body.transactions.clone()) + ), + BlockHashOrNumber::Number(u64::MAX) + ), + ( + ONE, + transactions_by_block, + |block: &SealedBlock, _: TxNumber, _: B256, _: &Vec>| ( + BlockHashOrNumber::Hash(block.hash()), + Some(block.body.transactions.clone()) + ), + BlockHashOrNumber::Number(u64::MAX) + ), + ( + ONE, + transaction_sender, + |block: &SealedBlock, tx_num: TxNumber, _: B256, _: &Vec>| ( + tx_num, + block.body.transactions[test_tx_index].recover_signer() + ), + u64::MAX + ), + ( + ONE, + receipt, + |block: &SealedBlock, tx_num: TxNumber, _: B256, receipts: &Vec>| ( + tx_num, + Some(receipts[block.number as usize][test_tx_index].clone()) + ), + u64::MAX + ), + ( + ONE, + receipt_by_hash, + |block: &SealedBlock, _: TxNumber, tx_hash: B256, receipts: &Vec>| ( + tx_hash, + Some(receipts[block.number as usize][test_tx_index].clone()) + ), + B256::random() + ), + ( + ONE, + receipts_by_block, + |block: &SealedBlock, _: TxNumber, _: B256, receipts: &Vec>| ( + BlockHashOrNumber::Number(block.number), + Some(receipts[block.number as usize].clone()) + ), + BlockHashOrNumber::Number(u64::MAX) + ), + ( + ONE, + receipts_by_block, + |block: &SealedBlock, _: TxNumber, _: B256, receipts: &Vec>| ( + BlockHashOrNumber::Hash(block.hash()), + Some(receipts[block.number as usize].clone()) + ), + BlockHashOrNumber::Hash(B256::random()) + ), + // TODO: withdrawals, requests, ommers + ]); Ok(()) } #[test] - fn transaction_sender_found_in_memory() -> eyre::Result<()> { + fn test_race() -> eyre::Result<()> { let mut rng = generators::rng(); - let (provider, database_blocks, in_memory_blocks, _) = provider_with_random_blocks( + let (provider, _, in_memory_blocks, _) = provider_with_random_blocks( &mut rng, - TEST_BLOCKS_COUNT, - TEST_BLOCKS_COUNT, + TEST_BLOCKS_COUNT - 1, + TEST_BLOCKS_COUNT + 1, BlockRangeParams { tx_count: TEST_TRANSACTIONS_COUNT..TEST_TRANSACTIONS_COUNT, ..Default::default() }, )?; - // In memory - // Choose a random transaction from the in-memory block - let tx = &in_memory_blocks[0].body.transactions[0]; - - // Retrieve the transaction ID - let tx_id = provider.transaction_id(tx.hash())?.unwrap(); - - // Retrieve the sender address for this transaction - let result = provider.transaction_sender(tx_id)?; - - // Ensure the sender address matches the expected sender address - let expected_sender = tx.recover_signer().unwrap(); - assert_eq!( - result, - Some(expected_sender), - "The sender address should match the expected sender address" - ); - - // Database - // Choose a random transaction from the database block - let tx = &database_blocks[0].body.transactions[0]; - - // Retrieve the transaction ID - let tx_id = provider.transaction_id(tx.hash())?.unwrap(); - - // Retrieve the sender address for this transaction - let result = provider.transaction_sender(tx_id)?; + let provider = Arc::new(provider); + + // Old implementation was querying the database first. This is problematic, if there are + // changes AFTER the database transaction is created. + let old_transaction_hash_fn = + |hash: B256, + canonical_in_memory_state: CanonicalInMemoryState, + factory: ProviderFactory| { + assert!(factory.transaction_by_hash(hash)?.is_none(), "should not be in database"); + Ok::<_, ProviderError>(canonical_in_memory_state.transaction_by_hash(hash)) + }; + + // Correct implementation queries in-memory first + let correct_transaction_hash_fn = + |hash: B256, + canonical_in_memory_state: CanonicalInMemoryState, + _factory: ProviderFactory| { + if let Some(tx) = canonical_in_memory_state.transaction_by_hash(hash) { + return Ok::<_, ProviderError>(Some(tx)) + } + panic!("should not be in database"); + // _factory.transaction_by_hash(hash) + }; - // Ensure the sender address matches the expected sender address - let expected_sender = tx.recover_signer().unwrap(); - assert_eq!( - result, - Some(expected_sender), - "The sender address should match the expected sender address" - ); + // OLD BEHAVIOUR + { + // This will persist block 1 AFTER a database is created. Moving it from memory to + // storage. + persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[0].number); + let to_be_persisted_tx = in_memory_blocks[0].body.transactions[0].clone(); - // Generate a random transaction ID that does not exist - let invalid_tx_id = u64::MAX; + // Even though the block exists, given the order of provider queries done in the method + // above, we do not see it. + assert_eq!( + old_transaction_hash_fn( + to_be_persisted_tx.hash(), + provider.canonical_in_memory_state(), + provider.database.clone() + ), + Ok(None) + ); + } - // Attempt to retrieve the sender address for this invalid transaction ID - let result = provider.transaction_sender(invalid_tx_id)?; + // CORRECT BEHAVIOUR + { + // This will persist block 1 AFTER a database is created. Moving it from memory to + // storage. + persist_block_after_db_tx_creation(provider.clone(), in_memory_blocks[1].number); + let to_be_persisted_tx = in_memory_blocks[1].body.transactions[0].clone(); - // Ensure no sender address is found - assert!( - result.is_none(), - "No sender address should be found for an invalid transaction ID" - ); + assert_eq!( + correct_transaction_hash_fn( + to_be_persisted_tx.hash(), + provider.canonical_in_memory_state(), + provider.database.clone() + ), + Ok(Some(to_be_persisted_tx)) + ); + } Ok(()) } diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index 6fe3fa85cb895..296dae8c6ab7e 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -31,6 +31,20 @@ impl BundleStateProvider pub const fn new(state_provider: SP, block_execution_data_provider: EDP) -> Self { Self { state_provider, block_execution_data_provider } } + + /// Retrieve hashed storage for target address. + fn get_hashed_storage(&self, address: Address) -> HashedStorage { + let bundle_state = self.block_execution_data_provider.execution_outcome().state(); + bundle_state + .account(&address) + .map(|account| { + HashedStorage::from_plain_storage( + account.status, + account.storage.iter().map(|(slot, value)| (slot, &value.present_value)), + ) + }) + .unwrap_or_else(|| HashedStorage::new(false)) + } } /* Implement StateProvider traits */ @@ -109,19 +123,21 @@ impl StorageRootProvider address: Address, hashed_storage: HashedStorage, ) -> ProviderResult { - let bundle_state = self.block_execution_data_provider.execution_outcome().state(); - let mut storage = bundle_state - .account(&address) - .map(|account| { - HashedStorage::from_plain_storage( - account.status, - account.storage.iter().map(|(slot, value)| (slot, &value.present_value)), - ) - }) - .unwrap_or_else(|| HashedStorage::new(false)); + let mut storage = self.get_hashed_storage(address); storage.extend(&hashed_storage); self.state_provider.storage_root(address, storage) } + + fn storage_proof( + &self, + address: Address, + slot: B256, + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut storage = self.get_hashed_storage(address); + storage.extend(&hashed_storage); + self.state_provider.storage_proof(address, slot, storage) + } } impl StateProofProvider diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 8627cacabb4ca..1afd4da3fa8cc 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -46,7 +46,7 @@ use reth_primitives::{ }; use reth_prune_types::{PruneCheckpoint, PruneModes, PruneSegment}; use reth_stages_types::{StageCheckpoint, StageId}; -use reth_storage_api::TryIntoHistoricalStateProvider; +use reth_storage_api::{StorageChangeSetReader, TryIntoHistoricalStateProvider}; use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use reth_trie::{ prefix_set::{PrefixSet, PrefixSetMut, TriePrefixSets}, @@ -1414,6 +1414,21 @@ impl AccountExtReader for DatabaseProvider StorageChangeSetReader for DatabaseProvider { + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult> { + let range = block_number..=block_number; + let storage_range = BlockNumberAddress::range(range); + self.tx + .cursor_dup_read::()? + .walk_range(storage_range)? + .map(|result| -> ProviderResult<_> { Ok(result?) }) + .collect() + } +} + impl ChangeSetReader for DatabaseProvider { fn account_block_changeset( &self, diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index de30f89c98ee5..781a11f6decac 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -17,12 +17,14 @@ use reth_primitives::{constants::EPOCH_SLOTS, Account, Bytecode, StaticFileSegme use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - proof::Proof, updates::TrieUpdates, witness::TrieWitness, AccountProof, HashedPostState, - HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + proof::{Proof, StorageProof}, + updates::TrieUpdates, + witness::TrieWitness, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, }; use reth_trie_db::{ DatabaseHashedPostState, DatabaseHashedStorage, DatabaseProof, DatabaseStateRoot, - DatabaseStorageRoot, DatabaseTrieWitness, + DatabaseStorageProof, DatabaseStorageRoot, DatabaseTrieWitness, }; use std::fmt::Debug; @@ -227,7 +229,7 @@ impl<'b, TX: DbTx> HistoricalStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> AccountReader for HistoricalStateProviderRef<'b, TX> { +impl AccountReader for HistoricalStateProviderRef<'_, TX> { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { match self.account_history_lookup(address)? { @@ -249,7 +251,7 @@ impl<'b, TX: DbTx> AccountReader for HistoricalStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> BlockHashReader for HistoricalStateProviderRef<'b, TX> { +impl BlockHashReader for HistoricalStateProviderRef<'_, TX> { /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( @@ -285,7 +287,7 @@ impl<'b, TX: DbTx> BlockHashReader for HistoricalStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { +impl StateRootProvider for HistoricalStateProviderRef<'_, TX> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { let mut revert_state = self.revert_state()?; revert_state.extend(hashed_state); @@ -319,7 +321,7 @@ impl<'b, TX: DbTx> StateRootProvider for HistoricalStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> StorageRootProvider for HistoricalStateProviderRef<'b, TX> { +impl StorageRootProvider for HistoricalStateProviderRef<'_, TX> { fn storage_root( &self, address: Address, @@ -330,9 +332,21 @@ impl<'b, TX: DbTx> StorageRootProvider for HistoricalStateProviderRef<'b, TX> { StorageRoot::overlay_root(self.tx, address, revert_storage) .map_err(|err| ProviderError::Database(err.into())) } + + fn storage_proof( + &self, + address: Address, + slot: B256, + hashed_storage: HashedStorage, + ) -> ProviderResult { + let mut revert_storage = self.revert_storage(address)?; + revert_storage.extend(&hashed_storage); + StorageProof::overlay_storage_proof(self.tx, address, slot, revert_storage) + .map_err(Into::::into) + } } -impl<'b, TX: DbTx> StateProofProvider for HistoricalStateProviderRef<'b, TX> { +impl StateProofProvider for HistoricalStateProviderRef<'_, TX> { /// Get account and storage proofs. fn proof( &self, @@ -364,7 +378,7 @@ impl<'b, TX: DbTx> StateProofProvider for HistoricalStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> StateProvider for HistoricalStateProviderRef<'b, TX> { +impl StateProvider for HistoricalStateProviderRef<'_, TX> { /// Get storage. fn storage( &self, diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index f63eaee23862c..fdcbfc4937fec 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -15,10 +15,15 @@ use reth_primitives::{Account, Bytecode, StaticFileSegment}; use reth_storage_api::{StateProofProvider, StorageRootProvider}; use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{ - proof::Proof, updates::TrieUpdates, witness::TrieWitness, AccountProof, HashedPostState, - HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, + proof::{Proof, StorageProof}, + updates::TrieUpdates, + witness::TrieWitness, + AccountProof, HashedPostState, HashedStorage, MultiProof, StateRoot, StorageRoot, TrieInput, +}; +use reth_trie_db::{ + DatabaseProof, DatabaseStateRoot, DatabaseStorageProof, DatabaseStorageRoot, + DatabaseTrieWitness, }; -use reth_trie_db::{DatabaseProof, DatabaseStateRoot, DatabaseStorageRoot, DatabaseTrieWitness}; /// State provider over latest state that takes tx reference. #[derive(Debug)] @@ -36,14 +41,14 @@ impl<'b, TX: DbTx> LatestStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> AccountReader for LatestStateProviderRef<'b, TX> { +impl AccountReader for LatestStateProviderRef<'_, TX> { /// Get basic account information. fn basic_account(&self, address: Address) -> ProviderResult> { self.tx.get::(address).map_err(Into::into) } } -impl<'b, TX: DbTx> BlockHashReader for LatestStateProviderRef<'b, TX> { +impl BlockHashReader for LatestStateProviderRef<'_, TX> { /// Get block hash by number. fn block_hash(&self, number: u64) -> ProviderResult> { self.static_file_provider.get_with_static_file_or_database( @@ -79,7 +84,7 @@ impl<'b, TX: DbTx> BlockHashReader for LatestStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { +impl StateRootProvider for LatestStateProviderRef<'_, TX> { fn state_root(&self, hashed_state: HashedPostState) -> ProviderResult { StateRoot::overlay_root(self.tx, hashed_state) .map_err(|err| ProviderError::Database(err.into())) @@ -107,7 +112,7 @@ impl<'b, TX: DbTx> StateRootProvider for LatestStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> StorageRootProvider for LatestStateProviderRef<'b, TX> { +impl StorageRootProvider for LatestStateProviderRef<'_, TX> { fn storage_root( &self, address: Address, @@ -116,9 +121,19 @@ impl<'b, TX: DbTx> StorageRootProvider for LatestStateProviderRef<'b, TX> { StorageRoot::overlay_root(self.tx, address, hashed_storage) .map_err(|err| ProviderError::Database(err.into())) } + + fn storage_proof( + &self, + address: Address, + slot: B256, + hashed_storage: HashedStorage, + ) -> ProviderResult { + StorageProof::overlay_storage_proof(self.tx, address, slot, hashed_storage) + .map_err(Into::::into) + } } -impl<'b, TX: DbTx> StateProofProvider for LatestStateProviderRef<'b, TX> { +impl StateProofProvider for LatestStateProviderRef<'_, TX> { fn proof( &self, input: TrieInput, @@ -146,7 +161,7 @@ impl<'b, TX: DbTx> StateProofProvider for LatestStateProviderRef<'b, TX> { } } -impl<'b, TX: DbTx> StateProvider for LatestStateProviderRef<'b, TX> { +impl StateProvider for LatestStateProviderRef<'_, TX> { /// Get storage. fn storage( &self, diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 388b59ab0a1e1..b90924354c434 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -48,7 +48,8 @@ macro_rules! delegate_provider_impls { fn state_root_from_nodes_with_updates(&self, input: reth_trie::TrieInput) -> reth_storage_errors::provider::ProviderResult<(alloy_primitives::B256, reth_trie::updates::TrieUpdates)>; } StorageRootProvider $(where [$($generics)*])? { - fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; + fn storage_root(&self, address: alloy_primitives::Address, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; + fn storage_proof(&self, address: alloy_primitives::Address, slot: alloy_primitives::B256, storage: reth_trie::HashedStorage) -> reth_storage_errors::provider::ProviderResult; } StateProofProvider $(where [$($generics)*])? { fn proof(&self, input: reth_trie::TrieInput, address: alloy_primitives::Address, slots: &[alloy_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 6372bad244244..8d1dbd117cfb9 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -75,7 +75,7 @@ impl<'a> StaticFileJarProvider<'a> { } } -impl<'a> HeaderProvider for StaticFileJarProvider<'a> { +impl HeaderProvider for StaticFileJarProvider<'_> { fn header(&self, block_hash: &BlockHash) -> ProviderResult> { Ok(self .cursor()? @@ -147,7 +147,7 @@ impl<'a> HeaderProvider for StaticFileJarProvider<'a> { } } -impl<'a> BlockHashReader for StaticFileJarProvider<'a> { +impl BlockHashReader for StaticFileJarProvider<'_> { fn block_hash(&self, number: u64) -> ProviderResult> { self.cursor()?.get_one::>(number.into()) } @@ -169,7 +169,7 @@ impl<'a> BlockHashReader for StaticFileJarProvider<'a> { } } -impl<'a> BlockNumReader for StaticFileJarProvider<'a> { +impl BlockNumReader for StaticFileJarProvider<'_> { fn chain_info(&self) -> ProviderResult { // Information on live database Err(ProviderError::UnsupportedProvider) @@ -194,7 +194,7 @@ impl<'a> BlockNumReader for StaticFileJarProvider<'a> { } } -impl<'a> TransactionsProvider for StaticFileJarProvider<'a> { +impl TransactionsProvider for StaticFileJarProvider<'_> { fn transaction_id(&self, hash: TxHash) -> ProviderResult> { let mut cursor = self.cursor()?; @@ -290,7 +290,7 @@ impl<'a> TransactionsProvider for StaticFileJarProvider<'a> { } } -impl<'a> ReceiptProvider for StaticFileJarProvider<'a> { +impl ReceiptProvider for StaticFileJarProvider<'_> { fn receipt(&self, num: TxNumber) -> ProviderResult> { self.cursor()?.get_one::>(num.into()) } diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index d086c5693ca52..3858f1b140233 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -67,14 +67,14 @@ pub struct StaticFileProviderRWRefMut<'a>( pub(crate) RwLockWriteGuard<'a, RawRwLock, Option>, ); -impl<'a> std::ops::DerefMut for StaticFileProviderRWRefMut<'a> { +impl std::ops::DerefMut for StaticFileProviderRWRefMut<'_> { fn deref_mut(&mut self) -> &mut Self::Target { // This is always created by [`StaticFileWriters::get_or_create`] self.0.as_mut().expect("static file writer provider should be init") } } -impl<'a> std::ops::Deref for StaticFileProviderRWRefMut<'a> { +impl std::ops::Deref for StaticFileProviderRWRefMut<'_> { type Target = StaticFileProviderRW; fn deref(&self) -> &Self::Target { diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 4f2faad8abe4f..3325d3ae9edba 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -31,7 +31,8 @@ use reth_storage_api::{ }; use reth_storage_errors::provider::{ConsistentViewError, ProviderError, ProviderResult}; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, + TrieInput, }; use revm::primitives::{BlockEnv, CfgEnvWithHandlerCfg}; use std::{ @@ -639,6 +640,15 @@ impl StorageRootProvider for MockEthProvider { ) -> ProviderResult { Ok(EMPTY_ROOT_HASH) } + + fn storage_proof( + &self, + _address: Address, + slot: B256, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(StorageProof::new(slot)) + } } impl StateProofProvider for MockEthProvider { diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index e8b7760b880b7..0a205389c9b61 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -356,6 +356,15 @@ impl StorageRootProvider for NoopProvider { ) -> ProviderResult { Ok(B256::default()) } + + fn storage_proof( + &self, + _address: Address, + slot: B256, + _hashed_storage: HashedStorage, + ) -> ProviderResult { + Ok(reth_trie::StorageProof::new(slot)) + } } impl StateProofProvider for NoopProvider { diff --git a/crates/storage/provider/src/writer/database.rs b/crates/storage/provider/src/writer/database.rs index 3ae42b4bf1cb4..1436fb8a6ab95 100644 --- a/crates/storage/provider/src/writer/database.rs +++ b/crates/storage/provider/src/writer/database.rs @@ -9,7 +9,7 @@ use reth_storage_api::ReceiptWriter; pub(crate) struct DatabaseWriter<'a, W>(pub(crate) &'a mut W); -impl<'a, W> ReceiptWriter for DatabaseWriter<'a, W> +impl ReceiptWriter for DatabaseWriter<'_, W> where W: DbCursorRO + DbCursorRW, { diff --git a/crates/storage/provider/src/writer/mod.rs b/crates/storage/provider/src/writer/mod.rs index ecb1de335559f..5b16b2da4e5a2 100644 --- a/crates/storage/provider/src/writer/mod.rs +++ b/crates/storage/provider/src/writer/mod.rs @@ -147,7 +147,7 @@ impl UnifiedStorageWriter<'_, (), ()> { } } -impl<'a, 'b, ProviderDB> UnifiedStorageWriter<'a, ProviderDB, &'b StaticFileProvider> +impl UnifiedStorageWriter<'_, ProviderDB, &StaticFileProvider> where ProviderDB: DBProvider + BlockWriter @@ -318,7 +318,7 @@ where } } -impl<'a, 'b, ProviderDB> UnifiedStorageWriter<'a, ProviderDB, StaticFileProviderRWRefMut<'b>> +impl UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> where ProviderDB: DBProvider + HeaderProvider, { @@ -429,7 +429,7 @@ where } } -impl<'a, 'b, ProviderDB> UnifiedStorageWriter<'a, ProviderDB, StaticFileProviderRWRefMut<'b>> +impl UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> where ProviderDB: DBProvider + HeaderProvider, { @@ -510,8 +510,8 @@ where } } -impl<'a, 'b, ProviderDB> StateWriter - for UnifiedStorageWriter<'a, ProviderDB, StaticFileProviderRWRefMut<'b>> +impl StateWriter + for UnifiedStorageWriter<'_, ProviderDB, StaticFileProviderRWRefMut<'_>> where ProviderDB: DBProvider + StateChangeWriter + HeaderProvider, { diff --git a/crates/storage/provider/src/writer/static_file.rs b/crates/storage/provider/src/writer/static_file.rs index aca226ca9b752..5514e211e58f9 100644 --- a/crates/storage/provider/src/writer/static_file.rs +++ b/crates/storage/provider/src/writer/static_file.rs @@ -6,7 +6,7 @@ use reth_storage_api::ReceiptWriter; pub(crate) struct StaticFileWriter<'a, W>(pub(crate) &'a mut W); -impl<'a> ReceiptWriter for StaticFileWriter<'a, StaticFileProviderRWRefMut<'_>> { +impl ReceiptWriter for StaticFileWriter<'_, StaticFileProviderRWRefMut<'_>> { fn append_block_receipts( &mut self, first_tx_index: TxNumber, diff --git a/crates/storage/storage-api/src/storage.rs b/crates/storage/storage-api/src/storage.rs index 91d0bc8c73535..e1443347e4bb8 100644 --- a/crates/storage/storage-api/src/storage.rs +++ b/crates/storage/storage-api/src/storage.rs @@ -1,4 +1,5 @@ use alloy_primitives::{Address, BlockNumber, B256}; +use reth_db_api::models::BlockNumberAddress; use reth_primitives::StorageEntry; use reth_storage_errors::provider::ProviderResult; use std::{ @@ -30,3 +31,13 @@ pub trait StorageReader: Send + Sync { range: RangeInclusive, ) -> ProviderResult>>; } + +/// Storage ChangeSet reader +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait StorageChangeSetReader: Send + Sync { + /// Iterate over storage changesets and return the storage state from before this block. + fn storage_changeset( + &self, + block_number: BlockNumber, + ) -> ProviderResult>; +} diff --git a/crates/storage/storage-api/src/trie.rs b/crates/storage/storage-api/src/trie.rs index d989def8bb0d0..f7d41066d0699 100644 --- a/crates/storage/storage-api/src/trie.rs +++ b/crates/storage/storage-api/src/trie.rs @@ -4,7 +4,8 @@ use alloy_primitives::{ }; use reth_storage_errors::provider::ProviderResult; use reth_trie::{ - updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, TrieInput, + updates::TrieUpdates, AccountProof, HashedPostState, HashedStorage, MultiProof, StorageProof, + TrieInput, }; /// A type that can compute the state root of a given post state. @@ -46,6 +47,15 @@ pub trait StorageRootProvider: Send + Sync { /// state. fn storage_root(&self, address: Address, hashed_storage: HashedStorage) -> ProviderResult; + + /// Returns the storage proof of the `HashedStorage` for target slot on top of the current + /// state. + fn storage_proof( + &self, + address: Address, + slot: B256, + hashed_storage: HashedStorage, + ) -> ProviderResult; } /// A type that can generate state proof on top of a given post state. diff --git a/crates/transaction-pool/src/config.rs b/crates/transaction-pool/src/config.rs index 623493e6c9d43..1b4b010a8e14f 100644 --- a/crates/transaction-pool/src/config.rs +++ b/crates/transaction-pool/src/config.rs @@ -7,7 +7,8 @@ use reth_primitives::{ constants::{ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE}, EIP4844_TX_TYPE_ID, }; -use std::collections::HashSet; +use std::{collections::HashSet, ops::Mul}; + /// Guarantees max transactions for one sender, compatible with geth/erigon pub const TXPOOL_MAX_ACCOUNT_SLOTS_PER_SENDER: usize = 16; @@ -107,6 +108,15 @@ impl SubPoolLimit { } } +impl Mul for SubPoolLimit { + type Output = Self; + + fn mul(self, rhs: usize) -> Self::Output { + let Self { max_txs, max_size } = self; + Self { max_txs: max_txs * rhs, max_size: max_size * rhs } + } +} + impl Default for SubPoolLimit { fn default() -> Self { // either 10k transactions or 20MB @@ -157,7 +167,7 @@ pub struct LocalTransactionConfig { /// - no price exemptions /// - no eviction exemptions pub no_exemptions: bool, - /// Addresses that will be considered as local . Above exemptions apply + /// Addresses that will be considered as local. Above exemptions apply. pub local_addresses: HashSet
, /// Flag indicating whether local transactions should be propagated. pub propagate_local_transactions: bool, @@ -318,4 +328,14 @@ mod tests { let new_config = config.set_propagate_local_transactions(false); assert!(!new_config.propagate_local_transactions); } + + #[test] + fn scale_pool_limit() { + let limit = SubPoolLimit::default(); + let double = limit * 2; + assert_eq!( + double, + SubPoolLimit { max_txs: limit.max_txs * 2, max_size: limit.max_size * 2 } + ) + } } diff --git a/crates/transaction-pool/src/lib.rs b/crates/transaction-pool/src/lib.rs index 4aec8ab408581..a5acd6edba5a8 100644 --- a/crates/transaction-pool/src/lib.rs +++ b/crates/transaction-pool/src/lib.rs @@ -324,6 +324,7 @@ where impl TransactionPool for Pool where V: TransactionValidator, + ::Transaction: EthPoolTransaction, T: TransactionOrdering::Transaction>, S: BlobStore, { @@ -488,6 +489,13 @@ where self.pool.get_transactions_by_sender(sender) } + fn get_highest_transaction_by_sender( + &self, + sender: Address, + ) -> Option>> { + self.pool.get_highest_transaction_by_sender(sender) + } + fn get_transaction_by_sender_and_nonce( &self, sender: Address, @@ -546,6 +554,7 @@ where impl TransactionPoolExt for Pool where V: TransactionValidator, + ::Transaction: EthPoolTransaction, T: TransactionOrdering::Transaction>, S: BlobStore, { diff --git a/crates/transaction-pool/src/maintain.rs b/crates/transaction-pool/src/maintain.rs index da416fd2d43fb..66b98614737e0 100644 --- a/crates/transaction-pool/src/maintain.rs +++ b/crates/transaction-pool/src/maintain.rs @@ -18,6 +18,7 @@ use reth_execution_types::ChangedAccount; use reth_fs_util::FsPathError; use reth_primitives::{ BlockNumberOrTag, PooledTransactionsElementEcRecovered, SealedHeader, TransactionSigned, + TransactionSignedEcRecovered, }; use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskSpawner; @@ -334,11 +335,10 @@ pub async fn maintain_transaction_pool( .ok() }) .map(|tx| { - <

::Transaction as PoolTransaction>::from_pooled(tx) +

::Transaction::from_pooled(tx.into()) }) } else { - - ::try_from_consensus(tx).ok() +

::Transaction::try_from_consensus(tx.into()).ok() } }) .collect::>(); @@ -583,7 +583,7 @@ where .filter_map(|tx| tx.try_ecrecovered()) .filter_map(|tx| { // Filter out errors - ::try_from_consensus(tx).ok() + ::try_from_consensus(tx.into()).ok() }) .collect::>(); @@ -606,7 +606,11 @@ where let local_transactions = local_transactions .into_iter() - .map(|tx| tx.to_recovered_transaction().into_signed()) + .map(|tx| { + let recovered: TransactionSignedEcRecovered = + tx.transaction.clone().into_consensus().into(); + recovered.into_signed() + }) .collect::>(); let num_txs = local_transactions.len(); @@ -672,6 +676,7 @@ mod tests { blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder, CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionOrigin, }; + use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; use reth_chainspec::MAINNET; use reth_fs_util as fs; @@ -695,7 +700,7 @@ mod tests { let temp_dir = tempfile::tempdir().unwrap(); let transactions_path = temp_dir.path().join(FILENAME).with_extension(EXTENSION); let tx_bytes = hex!("02f87201830655c2808505ef61f08482565f94388c818ca8b9251b393131c08a736a67ccb192978801049e39c4b5b1f580c001a01764ace353514e8abdfb92446de356b260e3c1225b73fc4c8876a6258d12a129a04f02294aa61ca7676061cd99f29275491218b4754b46a0248e5e42bc5091f507"); - let tx = PooledTransactionsElement::decode_enveloped(&mut &tx_bytes[..]).unwrap(); + let tx = PooledTransactionsElement::decode_2718(&mut &tx_bytes[..]).unwrap(); let provider = MockEthProvider::default(); let transaction: EthPooledTransaction = tx.try_into_ecrecovered().unwrap().into(); let tx_to_cmp = transaction.clone(); diff --git a/crates/transaction-pool/src/metrics.rs b/crates/transaction-pool/src/metrics.rs index d323d47e459ac..f5d269b361fc0 100644 --- a/crates/transaction-pool/src/metrics.rs +++ b/crates/transaction-pool/src/metrics.rs @@ -36,8 +36,18 @@ pub struct TxPoolMetrics { /// Total amount of memory used by the transactions in the blob sub-pool in bytes pub(crate) blob_pool_size_bytes: Gauge, - /// Number of all transactions of all sub-pools: pending + basefee + queued + /// Number of all transactions of all sub-pools: pending + basefee + queued + blob pub(crate) total_transactions: Gauge, + /// Number of all legacy transactions in the pool + pub(crate) total_legacy_transactions: Gauge, + /// Number of all EIP-2930 transactions in the pool + pub(crate) total_eip2930_transactions: Gauge, + /// Number of all EIP-1559 transactions in the pool + pub(crate) total_eip1559_transactions: Gauge, + /// Number of all EIP-4844 transactions in the pool + pub(crate) total_eip4844_transactions: Gauge, + /// Number of all EIP-7702 transactions in the pool + pub(crate) total_eip7702_transactions: Gauge, /// How often the pool was updated after the canonical state changed pub(crate) performed_state_updates: Counter, diff --git a/crates/transaction-pool/src/noop.rs b/crates/transaction-pool/src/noop.rs index 0c4caa5731402..ddab4f6227415 100644 --- a/crates/transaction-pool/src/noop.rs +++ b/crates/transaction-pool/src/noop.rs @@ -206,6 +206,13 @@ impl TransactionPool for NoopTransactionPool { vec![] } + fn get_highest_transaction_by_sender( + &self, + _sender: Address, + ) -> Option>> { + None + } + fn get_transaction_by_sender_and_nonce( &self, _sender: Address, diff --git a/crates/transaction-pool/src/ordering.rs b/crates/transaction-pool/src/ordering.rs index 3381bb0279472..0ee0c1004a131 100644 --- a/crates/transaction-pool/src/ordering.rs +++ b/crates/transaction-pool/src/ordering.rs @@ -1,6 +1,5 @@ use crate::traits::PoolTransaction; use alloy_primitives::U256; -use reth_primitives::{PooledTransactionsElementEcRecovered, TransactionSignedEcRecovered}; use std::{fmt, marker::PhantomData}; /// Priority of the transaction that can be missing. @@ -32,10 +31,7 @@ pub trait TransactionOrdering: Send + Sync + 'static { type PriorityValue: Ord + Clone + Default + fmt::Debug + Send + Sync; /// The transaction type to determine the priority of. - type Transaction: PoolTransaction< - Pooled = PooledTransactionsElementEcRecovered, - Consensus = TransactionSignedEcRecovered, - >; + type Transaction: PoolTransaction; /// Returns the priority score for the given transaction. fn priority( @@ -55,10 +51,7 @@ pub struct CoinbaseTipOrdering(PhantomData); impl TransactionOrdering for CoinbaseTipOrdering where - T: PoolTransaction< - Pooled = PooledTransactionsElementEcRecovered, - Consensus = TransactionSignedEcRecovered, - > + 'static, + T: PoolTransaction + 'static, { type PriorityValue = U256; type Transaction = T; diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index 4c1a7f2c29bbc..090b92fb65949 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -88,6 +88,7 @@ use reth_execution_types::ChangedAccount; use reth_primitives::{ BlobTransaction, BlobTransactionSidecar, PooledTransactionsElement, TransactionSigned, + TransactionSignedEcRecovered, }; use std::{ collections::{HashMap, HashSet}, @@ -318,13 +319,19 @@ where &self, tx_hashes: Vec, limit: GetPooledTransactionLimit, - ) -> Vec { + ) -> Vec + where + ::Transaction: + PoolTransaction>, + { let transactions = self.get_all(tx_hashes); let mut elements = Vec::with_capacity(transactions.len()); let mut size = 0; for transaction in transactions { let encoded_len = transaction.encoded_length(); - let tx = transaction.to_recovered_transaction().into_signed(); + let recovered: TransactionSignedEcRecovered = + transaction.transaction.clone().into_consensus().into(); + let tx = recovered.into_signed(); let pooled = if tx.is_eip4844() { // for EIP-4844 transactions, we need to fetch the blob sidecar from the blob store if let Some(blob) = self.get_blob_transaction(tx) { @@ -360,9 +367,15 @@ where pub(crate) fn get_pooled_transaction_element( &self, tx_hash: TxHash, - ) -> Option { + ) -> Option + where + ::Transaction: + PoolTransaction>, + { self.get(&tx_hash).and_then(|transaction| { - let tx = transaction.to_recovered_transaction().into_signed(); + let recovered: TransactionSignedEcRecovered = + transaction.transaction.clone().into_consensus().into(); + let tx = recovered.into_signed(); if tx.is_eip4844() { self.get_blob_transaction(tx).map(PooledTransactionsElement::BlobTransaction) } else { @@ -731,6 +744,15 @@ where self.get_pool_data().get_transactions_by_sender(sender_id) } + /// Returns the highest transaction of the address + pub(crate) fn get_highest_transaction_by_sender( + &self, + sender: Address, + ) -> Option>> { + let sender_id = self.get_sender_id(sender); + self.get_pool_data().get_highest_transaction_by_sender(sender_id) + } + /// Returns all transactions that where submitted with the given [`TransactionOrigin`] pub(crate) fn get_transactions_by_origin( &self, diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index 912e04506a19a..10605565c85d3 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -19,8 +19,12 @@ use crate::{ ValidPoolTransaction, U256, }; use alloy_primitives::{Address, TxHash, B256}; -use reth_primitives::constants::{ - eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, +use reth_primitives::{ + constants::{ + eip4844::BLOB_TX_MIN_BLOB_GASPRICE, ETHEREUM_BLOCK_GAS_LIMIT, MIN_PROTOCOL_BASE_FEE, + }, + EIP1559_TX_TYPE_ID, EIP2930_TX_TYPE_ID, EIP4844_TX_TYPE_ID, EIP7702_TX_TYPE_ID, + LEGACY_TX_TYPE_ID, }; use rustc_hash::FxHashMap; use smallvec::SmallVec; @@ -429,6 +433,7 @@ impl TxPool { let UpdateOutcome { promoted, discarded } = self.update_accounts(changed_senders); + self.update_transaction_type_metrics(); self.metrics.performed_state_updates.increment(1); OnNewCanonicalStateOutcome { block_hash, mined: mined_transactions, promoted, discarded } @@ -448,6 +453,32 @@ impl TxPool { self.metrics.total_transactions.set(stats.total as f64); } + /// Updates transaction type metrics for the entire pool. + pub(crate) fn update_transaction_type_metrics(&self) { + let mut legacy_count = 0; + let mut eip2930_count = 0; + let mut eip1559_count = 0; + let mut eip4844_count = 0; + let mut eip7702_count = 0; + + for tx in self.all_transactions.transactions_iter() { + match tx.transaction.tx_type() { + LEGACY_TX_TYPE_ID => legacy_count += 1, + EIP2930_TX_TYPE_ID => eip2930_count += 1, + EIP1559_TX_TYPE_ID => eip1559_count += 1, + EIP4844_TX_TYPE_ID => eip4844_count += 1, + EIP7702_TX_TYPE_ID => eip7702_count += 1, + _ => {} // Ignore other types + } + } + + self.metrics.total_legacy_transactions.set(legacy_count as f64); + self.metrics.total_eip2930_transactions.set(eip2930_count as f64); + self.metrics.total_eip1559_transactions.set(eip1559_count as f64); + self.metrics.total_eip4844_transactions.set(eip4844_count as f64); + self.metrics.total_eip7702_transactions.set(eip7702_count as f64); + } + /// Adds the transaction into the pool. /// /// This pool consists of four sub-pools: `Queued`, `Pending`, `BaseFee`, and `Blob`. diff --git a/crates/transaction-pool/src/traits.rs b/crates/transaction-pool/src/traits.rs index d4eabc73bbcea..adae238e46b3c 100644 --- a/crates/transaction-pool/src/traits.rs +++ b/crates/transaction-pool/src/traits.rs @@ -44,10 +44,7 @@ pub type PeerId = alloy_primitives::B512; #[auto_impl::auto_impl(&, Arc)] pub trait TransactionPool: Send + Sync + Clone { /// The transaction type of the pool - type Transaction: PoolTransaction< - Pooled = PooledTransactionsElementEcRecovered, - Consensus = TransactionSignedEcRecovered, - >; + type Transaction: EthPoolTransaction; /// Returns stats about the pool and all sub-pools. fn pool_size(&self) -> PoolSize; @@ -337,6 +334,12 @@ pub trait TransactionPool: Send + Sync + Clone { sender: Address, ) -> Vec>>; + /// Returns the highest transaction sent by a given user + fn get_highest_transaction_by_sender( + &self, + sender: Address, + ) -> Option>>; + /// Returns a transaction sent by a given user and a nonce fn get_transaction_by_sender_and_nonce( &self, @@ -496,12 +499,12 @@ pub struct AllPoolTransactions { impl AllPoolTransactions { /// Returns an iterator over all pending [`TransactionSignedEcRecovered`] transactions. pub fn pending_recovered(&self) -> impl Iterator + '_ { - self.pending.iter().map(|tx| tx.transaction.clone().into_consensus()) + self.pending.iter().map(|tx| tx.transaction.clone().into()) } /// Returns an iterator over all queued [`TransactionSignedEcRecovered`] transactions. pub fn queued_recovered(&self) -> impl Iterator + '_ { - self.queued.iter().map(|tx| tx.transaction.clone().into_consensus()) + self.queued.iter().map(|tx| tx.transaction.clone().into()) } } @@ -647,7 +650,7 @@ pub struct CanonicalStateUpdate<'a> { pub mined_transactions: Vec, } -impl<'a> CanonicalStateUpdate<'a> { +impl CanonicalStateUpdate<'_> { /// Returns the number of the tip block. pub fn number(&self) -> u64 { self.new_tip.number @@ -813,19 +816,25 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { type TryFromConsensusError; /// Associated type representing the raw consensus variant of the transaction. - type Consensus: From + TryInto; + type Consensus: From + TryInto; /// Associated type representing the recovered pooled variant of the transaction. type Pooled: Into; /// Define a method to convert from the `Consensus` type to `Self` - fn try_from_consensus(tx: Self::Consensus) -> Result; + fn try_from_consensus(tx: Self::Consensus) -> Result { + tx.try_into() + } /// Define a method to convert from the `Self` type to `Consensus` - fn into_consensus(self) -> Self::Consensus; + fn into_consensus(self) -> Self::Consensus { + self.into() + } /// Define a method to convert from the `Pooled` type to `Self` - fn from_pooled(pooled: Self::Pooled) -> Self; + fn from_pooled(pooled: Self::Pooled) -> Self { + pooled.into() + } /// Hash of the transaction. fn hash(&self) -> &TxHash; @@ -921,12 +930,11 @@ pub trait PoolTransaction: fmt::Debug + Send + Sync + Clone { fn chain_id(&self) -> Option; } -/// An extension trait that provides additional interfaces for the -/// [`EthTransactionValidator`](crate::EthTransactionValidator). +/// Super trait for transactions that can be converted to and from Eth transactions pub trait EthPoolTransaction: PoolTransaction< - Pooled = PooledTransactionsElementEcRecovered, - Consensus = TransactionSignedEcRecovered, + Consensus: From + Into, + Pooled: From + Into, > { /// Extracts the blob sidecar from the transaction. @@ -1043,7 +1051,7 @@ impl EthPooledTransaction { /// Conversion from the network transaction type to the pool transaction type. impl From for EthPooledTransaction { fn from(tx: PooledTransactionsElementEcRecovered) -> Self { - let encoded_length = tx.length_without_header(); + let encoded_length = tx.encode_2718_len(); let (tx, signer) = tx.into_components(); match tx { PooledTransactionsElement::BlobTransaction(tx) => { @@ -1069,18 +1077,6 @@ impl PoolTransaction for EthPooledTransaction { type Pooled = PooledTransactionsElementEcRecovered; - fn try_from_consensus(tx: Self::Consensus) -> Result { - tx.try_into() - } - - fn into_consensus(self) -> Self::Consensus { - self.into() - } - - fn from_pooled(pooled: Self::Pooled) -> Self { - pooled.into() - } - /// Returns hash of the transaction. fn hash(&self) -> &TxHash { self.transaction.hash_ref() diff --git a/crates/transaction-pool/src/validate/eth.rs b/crates/transaction-pool/src/validate/eth.rs index acfe46d6e88a0..49165f189a057 100644 --- a/crates/transaction-pool/src/validate/eth.rs +++ b/crates/transaction-pool/src/validate/eth.rs @@ -833,6 +833,7 @@ mod tests { blobstore::InMemoryBlobStore, error::PoolErrorKind, CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionPool, }; + use alloy_eips::eip2718::Decodable2718; use alloy_primitives::{hex, U256}; use reth_chainspec::MAINNET; use reth_primitives::PooledTransactionsElement; @@ -842,7 +843,7 @@ mod tests { let raw = "0x02f914950181ad84b2d05e0085117553845b830f7df88080b9143a6040608081523462000414576200133a803803806200001e8162000419565b9283398101608082820312620004145781516001600160401b03908181116200041457826200004f9185016200043f565b92602092838201519083821162000414576200006d9183016200043f565b8186015190946001600160a01b03821692909183900362000414576060015190805193808511620003145760038054956001938488811c9816801562000409575b89891014620003f3578190601f988981116200039d575b50899089831160011462000336576000926200032a575b505060001982841b1c191690841b1781555b8751918211620003145760049788548481811c9116801562000309575b89821014620002f457878111620002a9575b5087908784116001146200023e5793839491849260009562000232575b50501b92600019911b1c19161785555b6005556007805460ff60a01b19169055600880546001600160a01b0319169190911790553015620001f3575060025469d3c21bcecceda100000092838201809211620001de57506000917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9160025530835282815284832084815401905584519384523093a351610e889081620004b28239f35b601190634e487b7160e01b6000525260246000fd5b90606493519262461bcd60e51b845283015260248201527f45524332303a206d696e7420746f20746865207a65726f2061646472657373006044820152fd5b0151935038806200013a565b9190601f198416928a600052848a6000209460005b8c8983831062000291575050501062000276575b50505050811b0185556200014a565b01519060f884600019921b161c191690553880808062000267565b86860151895590970196948501948893500162000253565b89600052886000208880860160051c8201928b8710620002ea575b0160051c019085905b828110620002dd5750506200011d565b60008155018590620002cd565b92508192620002c4565b60228a634e487b7160e01b6000525260246000fd5b90607f16906200010b565b634e487b7160e01b600052604160045260246000fd5b015190503880620000dc565b90869350601f19831691856000528b6000209260005b8d8282106200038657505084116200036d575b505050811b018155620000ee565b015160001983861b60f8161c191690553880806200035f565b8385015186558a979095019493840193016200034c565b90915083600052896000208980850160051c8201928c8610620003e9575b918891869594930160051c01915b828110620003d9575050620000c5565b60008155859450889101620003c9565b92508192620003bb565b634e487b7160e01b600052602260045260246000fd5b97607f1697620000ae565b600080fd5b6040519190601f01601f191682016001600160401b038111838210176200031457604052565b919080601f84011215620004145782516001600160401b038111620003145760209062000475601f8201601f1916830162000419565b92818452828287010111620004145760005b8181106200049d57508260009394955001015290565b85810183015184820184015282016200048756fe608060408181526004918236101561001657600080fd5b600092833560e01c91826306fdde0314610a1c57508163095ea7b3146109f257816318160ddd146109d35781631b4c84d2146109ac57816323b872dd14610833578163313ce5671461081757816339509351146107c357816370a082311461078c578163715018a6146107685781638124f7ac146107495781638da5cb5b1461072057816395d89b411461061d578163a457c2d714610575578163a9059cbb146104e4578163c9567bf914610120575063dd62ed3e146100d557600080fd5b3461011c578060031936011261011c57806020926100f1610b5a565b6100f9610b75565b6001600160a01b0391821683526001865283832091168252845220549051908152f35b5080fd5b905082600319360112610338576008546001600160a01b039190821633036104975760079283549160ff8360a01c1661045557737a250d5630b4cf539739df2c5dacb4c659f2488d92836bffffffffffffffffffffffff60a01b8092161786553087526020938785528388205430156104065730895260018652848920828a52865280858a205584519081527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925863092a38554835163c45a015560e01b815290861685828581845afa9182156103dd57849187918b946103e7575b5086516315ab88c960e31b815292839182905afa9081156103dd576044879289928c916103c0575b508b83895196879586946364e329cb60e11b8652308c870152166024850152165af19081156103b6579086918991610389575b50169060065416176006558385541660604730895288865260c4858a20548860085416928751958694859363f305d71960e01b8552308a86015260248501528d60448501528d606485015260848401524260a48401525af1801561037f579084929161034c575b50604485600654169587541691888551978894859363095ea7b360e01b855284015260001960248401525af1908115610343575061030c575b5050805460ff60a01b1916600160a01b17905580f35b81813d831161033c575b6103208183610b8b565b8101031261033857518015150361011c5738806102f6565b8280fd5b503d610316565b513d86823e3d90fd5b6060809293503d8111610378575b6103648183610b8b565b81010312610374578290386102bd565b8580fd5b503d61035a565b83513d89823e3d90fd5b6103a99150863d88116103af575b6103a18183610b8b565b810190610e33565b38610256565b503d610397565b84513d8a823e3d90fd5b6103d79150843d86116103af576103a18183610b8b565b38610223565b85513d8b823e3d90fd5b6103ff919450823d84116103af576103a18183610b8b565b92386101fb565b845162461bcd60e51b81528085018790526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608490fd5b6020606492519162461bcd60e51b8352820152601760248201527f74726164696e6720697320616c7265616479206f70656e0000000000000000006044820152fd5b608490602084519162461bcd60e51b8352820152602160248201527f4f6e6c79206f776e65722063616e2063616c6c20746869732066756e6374696f6044820152603760f91b6064820152fd5b9050346103385781600319360112610338576104fe610b5a565b9060243593303303610520575b602084610519878633610bc3565b5160018152f35b600594919454808302908382041483151715610562576127109004820391821161054f5750925080602061050b565b634e487b7160e01b815260118552602490fd5b634e487b7160e01b825260118652602482fd5b9050823461061a578260031936011261061a57610590610b5a565b918360243592338152600160205281812060018060a01b03861682526020522054908282106105c9576020856105198585038733610d31565b608490602086519162461bcd60e51b8352820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f77604482015264207a65726f60d81b6064820152fd5b80fd5b83833461011c578160031936011261011c57805191809380549160019083821c92828516948515610716575b6020958686108114610703578589529081156106df5750600114610687575b6106838787610679828c0383610b8b565b5191829182610b11565b0390f35b81529295507f8a35acfbc15ff81a39ae7d344fd709f28e8600b4aa8c65c6b64bfe7fe36bd19b5b8284106106cc57505050826106839461067992820101948680610668565b80548685018801529286019281016106ae565b60ff19168887015250505050151560051b8301019250610679826106838680610668565b634e487b7160e01b845260228352602484fd5b93607f1693610649565b50503461011c578160031936011261011c5760085490516001600160a01b039091168152602090f35b50503461011c578160031936011261011c576020906005549051908152f35b833461061a578060031936011261061a57600880546001600160a01b031916905580f35b50503461011c57602036600319011261011c5760209181906001600160a01b036107b4610b5a565b16815280845220549051908152f35b82843461061a578160031936011261061a576107dd610b5a565b338252600160209081528383206001600160a01b038316845290528282205460243581019290831061054f57602084610519858533610d31565b50503461011c578160031936011261011c576020905160128152f35b83833461011c57606036600319011261011c5761084e610b5a565b610856610b75565b6044359160018060a01b0381169485815260209560018752858220338352875285822054976000198903610893575b505050906105199291610bc3565b85891061096957811561091a5733156108cc5750948481979861051997845260018a528284203385528a52039120558594938780610885565b865162461bcd60e51b8152908101889052602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b6064820152608490fd5b865162461bcd60e51b81529081018890526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608490fd5b865162461bcd60e51b8152908101889052601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e63650000006044820152606490fd5b50503461011c578160031936011261011c5760209060ff60075460a01c1690519015158152f35b50503461011c578160031936011261011c576020906002549051908152f35b50503461011c578060031936011261011c57602090610519610a12610b5a565b6024359033610d31565b92915034610b0d5783600319360112610b0d57600354600181811c9186908281168015610b03575b6020958686108214610af05750848852908115610ace5750600114610a75575b6106838686610679828b0383610b8b565b929550600383527fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b5b828410610abb575050508261068394610679928201019438610a64565b8054868501880152928601928101610a9e565b60ff191687860152505050151560051b83010192506106798261068338610a64565b634e487b7160e01b845260229052602483fd5b93607f1693610a44565b8380fd5b6020808252825181830181905290939260005b828110610b4657505060409293506000838284010152601f8019910116010190565b818101860151848201604001528501610b24565b600435906001600160a01b0382168203610b7057565b600080fd5b602435906001600160a01b0382168203610b7057565b90601f8019910116810190811067ffffffffffffffff821117610bad57604052565b634e487b7160e01b600052604160045260246000fd5b6001600160a01b03908116918215610cde5716918215610c8d57600082815280602052604081205491808310610c3957604082827fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef958760209652828652038282205586815220818154019055604051908152a3565b60405162461bcd60e51b815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e7420657863656564732062604482015265616c616e636560d01b6064820152608490fd5b60405162461bcd60e51b815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201526265737360e81b6064820152608490fd5b60405162461bcd60e51b815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f206164604482015264647265737360d81b6064820152608490fd5b6001600160a01b03908116918215610de25716918215610d925760207f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925918360005260018252604060002085600052825280604060002055604051908152a3565b60405162461bcd60e51b815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f206164647265604482015261737360f01b6064820152608490fd5b60405162461bcd60e51b8152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f206164646044820152637265737360e01b6064820152608490fd5b90816020910312610b7057516001600160a01b0381168103610b70579056fea2646970667358221220285c200b3978b10818ff576bb83f2dc4a2a7c98dfb6a36ea01170de792aa652764736f6c63430008140033000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000d3fd4f95820a9aa848ce716d6c200eaefb9a2e4900000000000000000000000000000000000000000000000000000000000000640000000000000000000000000000000000000000000000000000000000000003543131000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000035431310000000000000000000000000000000000000000000000000000000000c001a04e551c75810ffdfe6caff57da9f5a8732449f42f0f4c57f935b05250a76db3b6a046cd47e6d01914270c1ec0d9ac7fae7dfb240ec9a8b6ec7898c4d6aa174388f2"; let data = hex::decode(raw).unwrap(); - let tx = PooledTransactionsElement::decode_enveloped(&mut data.as_ref()).unwrap(); + let tx = PooledTransactionsElement::decode_2718(&mut data.as_ref()).unwrap(); tx.try_into_ecrecovered().unwrap().into() } diff --git a/crates/transaction-pool/src/validate/mod.rs b/crates/transaction-pool/src/validate/mod.rs index b8fe7cbb1de02..4395cc97908b7 100644 --- a/crates/transaction-pool/src/validate/mod.rs +++ b/crates/transaction-pool/src/validate/mod.rs @@ -7,10 +7,7 @@ use crate::{ }; use alloy_primitives::{Address, TxHash, B256, U256}; use futures_util::future::Either; -use reth_primitives::{ - BlobTransactionSidecar, PooledTransactionsElementEcRecovered, SealedBlock, - TransactionSignedEcRecovered, -}; +use reth_primitives::{BlobTransactionSidecar, SealedBlock, TransactionSignedEcRecovered}; use std::{fmt, future::Future, time::Instant}; mod constants; @@ -154,10 +151,7 @@ impl ValidTransaction { /// Provides support for validating transaction at any given state of the chain pub trait TransactionValidator: Send + Sync { /// The transaction type to validate. - type Transaction: PoolTransaction< - Pooled = PooledTransactionsElementEcRecovered, - Consensus = TransactionSignedEcRecovered, - >; + type Transaction: PoolTransaction; /// Validates the transaction and returns a [`TransactionValidationOutcome`] describing the /// validity of the given transaction. @@ -380,12 +374,12 @@ impl ValidPoolTransaction { } } -impl> ValidPoolTransaction { +impl>> ValidPoolTransaction { /// Converts to this type into a [`TransactionSignedEcRecovered`]. /// /// Note: this takes `&self` since indented usage is via `Arc`. pub fn to_recovered_transaction(&self) -> TransactionSignedEcRecovered { - self.transaction.clone().into_consensus() + self.transaction.clone().into_consensus().into() } } diff --git a/crates/trie/db/src/hashed_cursor.rs b/crates/trie/db/src/hashed_cursor.rs index bf0341c8884dc..6d0b79e5a02b4 100644 --- a/crates/trie/db/src/hashed_cursor.rs +++ b/crates/trie/db/src/hashed_cursor.rs @@ -11,7 +11,7 @@ use reth_trie::hashed_cursor::{HashedCursor, HashedCursorFactory, HashedStorageC #[derive(Debug)] pub struct DatabaseHashedCursorFactory<'a, TX>(&'a TX); -impl<'a, TX> Clone for DatabaseHashedCursorFactory<'a, TX> { +impl Clone for DatabaseHashedCursorFactory<'_, TX> { fn clone(&self) -> Self { Self(self.0) } @@ -24,7 +24,7 @@ impl<'a, TX> DatabaseHashedCursorFactory<'a, TX> { } } -impl<'a, TX: DbTx> HashedCursorFactory for DatabaseHashedCursorFactory<'a, TX> { +impl HashedCursorFactory for DatabaseHashedCursorFactory<'_, TX> { type AccountCursor = DatabaseHashedAccountCursor<::Cursor>; type StorageCursor = DatabaseHashedStorageCursor<::DupCursor>; diff --git a/crates/trie/db/src/lib.rs b/crates/trie/db/src/lib.rs index aceea1da86602..3a9b1e328239e 100644 --- a/crates/trie/db/src/lib.rs +++ b/crates/trie/db/src/lib.rs @@ -12,7 +12,7 @@ pub use hashed_cursor::{ DatabaseHashedAccountCursor, DatabaseHashedCursorFactory, DatabaseHashedStorageCursor, }; pub use prefix_set::PrefixSetLoader; -pub use proof::DatabaseProof; +pub use proof::{DatabaseProof, DatabaseStorageProof}; pub use state::{DatabaseHashedPostState, DatabaseStateRoot}; pub use storage::{DatabaseHashedStorage, DatabaseStorageRoot}; pub use trie_cursor::{ diff --git a/crates/trie/db/src/prefix_set.rs b/crates/trie/db/src/prefix_set.rs index 07b87016d2b4f..079fe393764d2 100644 --- a/crates/trie/db/src/prefix_set.rs +++ b/crates/trie/db/src/prefix_set.rs @@ -26,7 +26,7 @@ impl<'a, TX> PrefixSetLoader<'a, TX> { } } -impl<'a, TX: DbTx> PrefixSetLoader<'a, TX> { +impl PrefixSetLoader<'_, TX> { /// Load all account and storage changes for the given block range. pub fn load(self, range: RangeInclusive) -> Result { // Initialize prefix sets. diff --git a/crates/trie/db/src/proof.rs b/crates/trie/db/src/proof.rs index 1d5fda84cc5bc..9bf08fe136f74 100644 --- a/crates/trie/db/src/proof.rs +++ b/crates/trie/db/src/proof.rs @@ -1,13 +1,16 @@ use crate::{DatabaseHashedCursorFactory, DatabaseTrieCursorFactory}; use alloy_primitives::{ + keccak256, map::{HashMap, HashSet}, Address, B256, }; use reth_db_api::transaction::DbTx; use reth_execution_errors::StateProofError; use reth_trie::{ - hashed_cursor::HashedPostStateCursorFactory, proof::Proof, - trie_cursor::InMemoryTrieCursorFactory, MultiProof, TrieInput, + hashed_cursor::HashedPostStateCursorFactory, + proof::{Proof, StorageProof}, + trie_cursor::InMemoryTrieCursorFactory, + HashedPostStateSorted, HashedStorage, MultiProof, TrieInput, }; use reth_trie_common::AccountProof; @@ -81,3 +84,46 @@ impl<'a, TX: DbTx> DatabaseProof<'a, TX> .multiproof(targets) } } + +/// Extends [`StorageProof`] with operations specific for working with a database transaction. +pub trait DatabaseStorageProof<'a, TX> { + /// Create a new [`StorageProof`] from database transaction and account address. + fn from_tx(tx: &'a TX, address: Address) -> Self; + + /// Generates the storage proof for target slot based on [`TrieInput`]. + fn overlay_storage_proof( + tx: &'a TX, + address: Address, + slot: B256, + storage: HashedStorage, + ) -> Result; +} + +impl<'a, TX: DbTx> DatabaseStorageProof<'a, TX> + for StorageProof, DatabaseHashedCursorFactory<'a, TX>> +{ + fn from_tx(tx: &'a TX, address: Address) -> Self { + Self::new(DatabaseTrieCursorFactory::new(tx), DatabaseHashedCursorFactory::new(tx), address) + } + + fn overlay_storage_proof( + tx: &'a TX, + address: Address, + slot: B256, + storage: HashedStorage, + ) -> Result { + let hashed_address = keccak256(address); + let prefix_set = storage.construct_prefix_set(); + let state_sorted = HashedPostStateSorted::new( + Default::default(), + HashMap::from([(hashed_address, storage.into_sorted())]), + ); + Self::from_tx(tx, address) + .with_hashed_cursor_factory(HashedPostStateCursorFactory::new( + DatabaseHashedCursorFactory::new(tx), + &state_sorted, + )) + .with_prefix_set_mut(prefix_set) + .storage_proof(slot) + } +} diff --git a/crates/trie/db/src/trie_cursor.rs b/crates/trie/db/src/trie_cursor.rs index 601100b3faee3..bfded342ba04d 100644 --- a/crates/trie/db/src/trie_cursor.rs +++ b/crates/trie/db/src/trie_cursor.rs @@ -19,7 +19,7 @@ use reth_trie_common::StorageTrieEntry; #[derive(Debug)] pub struct DatabaseTrieCursorFactory<'a, TX>(&'a TX); -impl<'a, TX> Clone for DatabaseTrieCursorFactory<'a, TX> { +impl Clone for DatabaseTrieCursorFactory<'_, TX> { fn clone(&self) -> Self { Self(self.0) } @@ -33,7 +33,7 @@ impl<'a, TX> DatabaseTrieCursorFactory<'a, TX> { } /// Implementation of the trie cursor factory for a database transaction. -impl<'a, TX: DbTx> TrieCursorFactory for DatabaseTrieCursorFactory<'a, TX> { +impl TrieCursorFactory for DatabaseTrieCursorFactory<'_, TX> { type AccountTrieCursor = DatabaseAccountTrieCursor<::Cursor>; type StorageTrieCursor = DatabaseStorageTrieCursor<::DupCursor>; diff --git a/crates/trie/trie/src/forward_cursor.rs b/crates/trie/trie/src/forward_cursor.rs index da71326cea905..6db214bb51a33 100644 --- a/crates/trie/trie/src/forward_cursor.rs +++ b/crates/trie/trie/src/forward_cursor.rs @@ -21,7 +21,7 @@ impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> { } } -impl<'a, K, V> ForwardInMemoryCursor<'a, K, V> +impl ForwardInMemoryCursor<'_, K, V> where K: PartialOrd + Clone, V: Clone, diff --git a/crates/trie/trie/src/hashed_cursor/post_state.rs b/crates/trie/trie/src/hashed_cursor/post_state.rs index 53a2cdb3bb645..678914191527e 100644 --- a/crates/trie/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/trie/src/hashed_cursor/post_state.rs @@ -132,7 +132,7 @@ where } } -impl<'a, C> HashedCursor for HashedPostStateAccountCursor<'a, C> +impl HashedCursor for HashedPostStateAccountCursor<'_, C> where C: HashedCursor, { @@ -276,7 +276,7 @@ where } } -impl<'a, C> HashedCursor for HashedPostStateStorageCursor<'a, C> +impl HashedCursor for HashedPostStateStorageCursor<'_, C> where C: HashedStorageCursor, { @@ -304,7 +304,7 @@ where } } -impl<'a, C> HashedStorageCursor for HashedPostStateStorageCursor<'a, C> +impl HashedStorageCursor for HashedPostStateStorageCursor<'_, C> where C: HashedStorageCursor, { diff --git a/crates/trie/trie/src/prefix_set.rs b/crates/trie/trie/src/prefix_set.rs index 4997228050a3a..af0fb173d98a5 100644 --- a/crates/trie/trie/src/prefix_set.rs +++ b/crates/trie/trie/src/prefix_set.rs @@ -20,9 +20,9 @@ pub struct TriePrefixSetsMut { impl TriePrefixSetsMut { /// Extends prefix sets with contents of another prefix set. pub fn extend(&mut self, other: Self) { - self.account_prefix_set.extend(other.account_prefix_set.keys); + self.account_prefix_set.extend(other.account_prefix_set); for (hashed_address, prefix_set) in other.storage_prefix_sets { - self.storage_prefix_sets.entry(hashed_address).or_default().extend(prefix_set.keys); + self.storage_prefix_sets.entry(hashed_address).or_default().extend(prefix_set); } self.destroyed_accounts.extend(other.destroyed_accounts); } @@ -115,12 +115,18 @@ impl PrefixSetMut { self.keys.push(nibbles); } + /// Extend prefix set with contents of another prefix set. + pub fn extend(&mut self, other: Self) { + self.all |= other.all; + self.keys.extend(other.keys); + } + /// Extend prefix set keys with contents of provided iterator. - pub fn extend(&mut self, nibbles_iter: I) + pub fn extend_keys(&mut self, keys: I) where I: IntoIterator, { - self.keys.extend(nibbles_iter); + self.keys.extend(keys); } /// Returns the number of elements in the set. @@ -270,4 +276,11 @@ mod tests { assert_eq!(prefix_set.keys.len(), 3); // Length should be 3 (excluding duplicate) assert_eq!(prefix_set.keys.capacity(), 3); // Capacity should be 3 after shrinking } + + #[test] + fn test_prefix_set_all_extend() { + let mut prefix_set_mut = PrefixSetMut::default(); + prefix_set_mut.extend(PrefixSetMut::all()); + assert!(prefix_set_mut.all); + } } diff --git a/crates/trie/trie/src/proof.rs b/crates/trie/trie/src/proof.rs index 95d9505218bf5..d31d63fd9a8be 100644 --- a/crates/trie/trie/src/proof.rs +++ b/crates/trie/trie/src/proof.rs @@ -96,7 +96,7 @@ where // Create the walker. let mut prefix_set = self.prefix_sets.account_prefix_set.clone(); - prefix_set.extend(targets.keys().map(Nibbles::unpack)); + prefix_set.extend_keys(targets.keys().map(Nibbles::unpack)); let walker = TrieWalker::new(trie_cursor, prefix_set.freeze()); // Create a hash builder to rebuild the root node since it is not available in the database. @@ -124,7 +124,7 @@ where hashed_address, ) .with_prefix_set_mut(storage_prefix_set) - .storage_proof(proof_targets)?; + .storage_multiproof(proof_targets)?; // Encode account account_rlp.clear(); @@ -170,6 +170,26 @@ impl StorageProof { } } + /// Set the trie cursor factory. + pub fn with_trie_cursor_factory(self, trie_cursor_factory: TF) -> StorageProof { + StorageProof { + trie_cursor_factory, + hashed_cursor_factory: self.hashed_cursor_factory, + hashed_address: self.hashed_address, + prefix_set: self.prefix_set, + } + } + + /// Set the hashed cursor factory. + pub fn with_hashed_cursor_factory(self, hashed_cursor_factory: HF) -> StorageProof { + StorageProof { + trie_cursor_factory: self.trie_cursor_factory, + hashed_cursor_factory, + hashed_address: self.hashed_address, + prefix_set: self.prefix_set, + } + } + /// Set the changed prefixes. pub fn with_prefix_set_mut(mut self, prefix_set: PrefixSetMut) -> Self { self.prefix_set = prefix_set; @@ -182,8 +202,17 @@ where T: TrieCursorFactory, H: HashedCursorFactory, { - /// Generate storage proof. + /// Generate an account proof from intermediate nodes. pub fn storage_proof( + self, + slot: B256, + ) -> Result { + let targets = HashSet::from_iter([keccak256(slot)]); + Ok(self.storage_multiproof(targets)?.storage_proof(slot)?) + } + + /// Generate storage proof. + pub fn storage_multiproof( mut self, targets: HashSet, ) -> Result { @@ -196,7 +225,7 @@ where } let target_nibbles = targets.into_iter().map(Nibbles::unpack).collect::>(); - self.prefix_set.extend(target_nibbles.clone()); + self.prefix_set.extend_keys(target_nibbles.clone()); let trie_cursor = self.trie_cursor_factory.storage_trie_cursor(self.hashed_address)?; let walker = TrieWalker::new(trie_cursor, self.prefix_set.freeze()); diff --git a/crates/trie/trie/src/trie_cursor/in_memory.rs b/crates/trie/trie/src/trie_cursor/in_memory.rs index 82ffcfb428f91..0f00191378ba8 100644 --- a/crates/trie/trie/src/trie_cursor/in_memory.rs +++ b/crates/trie/trie/src/trie_cursor/in_memory.rs @@ -113,7 +113,7 @@ impl<'a, C: TrieCursor> InMemoryAccountTrieCursor<'a, C> { } } -impl<'a, C: TrieCursor> TrieCursor for InMemoryAccountTrieCursor<'a, C> { +impl TrieCursor for InMemoryAccountTrieCursor<'_, C> { fn seek_exact( &mut self, key: Nibbles, @@ -188,7 +188,7 @@ impl<'a, C> InMemoryStorageTrieCursor<'a, C> { } } -impl<'a, C: TrieCursor> InMemoryStorageTrieCursor<'a, C> { +impl InMemoryStorageTrieCursor<'_, C> { fn seek_inner( &mut self, key: Nibbles, @@ -237,7 +237,7 @@ impl<'a, C: TrieCursor> InMemoryStorageTrieCursor<'a, C> { } } -impl<'a, C: TrieCursor> TrieCursor for InMemoryStorageTrieCursor<'a, C> { +impl TrieCursor for InMemoryStorageTrieCursor<'_, C> { fn seek_exact( &mut self, key: Nibbles, diff --git a/crates/trie/trie/src/updates.rs b/crates/trie/trie/src/updates.rs index a2d3d67363ea6..03e80cf52e539 100644 --- a/crates/trie/trie/src/updates.rs +++ b/crates/trie/trie/src/updates.rs @@ -455,7 +455,7 @@ pub mod serde_bincode_compat { } } - impl<'a> SerializeAs for TrieUpdates<'a> { + impl SerializeAs for TrieUpdates<'_> { fn serialize_as(source: &super::TrieUpdates, serializer: S) -> Result where S: Serializer, @@ -515,7 +515,7 @@ pub mod serde_bincode_compat { } } - impl<'a> SerializeAs for StorageTrieUpdates<'a> { + impl SerializeAs for StorageTrieUpdates<'_> { fn serialize_as( source: &super::StorageTrieUpdates, serializer: S, diff --git a/crates/trie/trie/src/witness.rs b/crates/trie/trie/src/witness.rs index b0fcfb021ae1f..d668946e62381 100644 --- a/crates/trie/trie/src/witness.rs +++ b/crates/trie/trie/src/witness.rs @@ -178,7 +178,7 @@ where hashed_address, ) .with_prefix_set_mut(storage_prefix_set) - .storage_proof(HashSet::from_iter([target_key]))?; + .storage_multiproof(HashSet::from_iter([target_key]))?; // The subtree only contains the proof for a single target. let node = diff --git a/docs/repo/layout.md b/docs/repo/layout.md index 6ba14e652bf1c..6ed91e79656f9 100644 --- a/docs/repo/layout.md +++ b/docs/repo/layout.md @@ -132,7 +132,6 @@ The IPC transport lives in [`rpc/ipc`](../../crates/rpc/ipc). - [`rpc/rpc-api`](../../crates/rpc/rpc-api): RPC traits - Supported transports: HTTP, WS, IPC - Supported namespaces: `eth_`, `engine_`, `debug_` -- [`rpc/rpc-types`](../../crates/rpc/rpc-types): Types relevant for the RPC endpoints above, grouped by namespace - [`rpc/rpc-eth-api`](../../crates/rpc/rpc-eth-api/): Reth RPC 'eth' namespace API (including interface and implementation), this crate is re-exported by `rpc/rpc-api` - [`rpc/rpc-eth-types`](../../crates/rpc/rpc-eth-types/): Types `supporting implementation` of 'eth' namespace RPC server API - [`rpc/rpc-server-types`](../../crates/rpc/rpc-server-types/): RPC server types and constants @@ -169,7 +168,7 @@ Small utility crates. - [`tasks`](../../crates/tasks): An executor-agnostic task abstraction, used to spawn tasks on different async executors. Supports blocking tasks and handles panics gracefully. A tokio implementation is provided by default. - [`metrics/common`](../../crates/metrics/src/common): Common metrics types (e.g. metered channels) -- [`metrics/metrics-derive`](../../crates/metrics/metrics-derive): A derive-style API for creating metrics +- [`metrics/metrics-derive`](https://github.com/rkrasiuk/metrics-derive): A derive-style API for creating metrics - [`tracing`](../../crates/tracing): A small utility crate to install a uniform [`tracing`][tracing] subscriber [libmdbx-rs]: https://crates.io/crates/libmdbx diff --git a/etc/grafana/dashboards/overview.json b/etc/grafana/dashboards/overview.json index 39849f20f0902..15786764f4299 100644 --- a/etc/grafana/dashboards/overview.json +++ b/etc/grafana/dashboards/overview.json @@ -2,7 +2,7 @@ "__inputs": [ { "name": "DS_PROMETHEUS", - "label": "Prometheus", + "label": "prometheus", "description": "", "type": "datasource", "pluginId": "prometheus", @@ -5732,6 +5732,79 @@ "title": "Engine API getPayloadBodies Latency", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Counts the number of failed response deliveries due to client request termination.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "mode": "none" + } + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 198 + }, + "id": 213, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "consensus_engine_beacon_failed_new_payload_response_deliveries{instance=~\"$instance\"}", + "legendFormat": "Failed NewPayload Deliveries", + "refId": "A" + }, + { + "expr": "consensus_engine_beacon_failed_forkchoice_updated_response_deliveries{instance=~\"$instance\"}", + "legendFormat": "Failed ForkchoiceUpdated Deliveries", + "refId": "B" + } + ], + "title": "Failed Engine API Response Deliveries", + "type": "timeseries" + }, { "collapsed": false, "gridPos": { @@ -7787,7 +7860,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "The total number of canonical state notifications sent to an ExEx.", + "description": "The total number of canonical state notifications sent to ExExes.", "fieldConfig": { "defaults": { "color": { @@ -7884,7 +7957,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "The total number of events an ExEx has sent to the manager.", + "description": "The total number of events ExExes have sent to the manager.", "fieldConfig": { "defaults": { "color": { @@ -7981,7 +8054,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Current and Max capacity of the internal state notifications buffer.", + "description": "Current and Maximum capacity of the internal state notifications buffer.", "fieldConfig": { "defaults": { "color": { @@ -8187,7 +8260,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "description": "Number of ExExs on the node", + "description": "Total number of ExExes installed in the node", "fieldConfig": { "defaults": { "color": { @@ -8250,7 +8323,7 @@ "refId": "A" } ], - "title": "Number of ExExs", + "title": "Number of ExExes", "type": "stat" }, { @@ -8261,6 +8334,343 @@ "x": 0, "y": 308 }, + "id": 241, + "panels": [], + "title": "Execution Extensions Write-Ahead Log", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 309 + }, + "id": 243, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_lowest_committed_block_height{instance=~\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Lowest Block", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_highest_committed_block_height{instance=~\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Highest Block", + "range": true, + "refId": "C" + } + ], + "title": "Current Committed Block Heights", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 309 + }, + "id": 244, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_committed_blocks_count{instance=~\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Committed Blocks", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_notifications_count{instance=~\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "Notifications", + "range": true, + "refId": "B" + } + ], + "title": "Number of entities", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 317 + }, + "id": 245, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "expr": "reth_exex_wal_size_bytes{instance=~\"$instance\"}", + "hide": false, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "C" + } + ], + "title": "Total size of all notifications", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 325 + }, "id": 226, "panels": [], "title": "Eth Requests", @@ -8357,7 +8767,7 @@ "h": 8, "w": 12, "x": 0, - "y": 309 + "y": 326 }, "id": 225, "options": { @@ -8486,7 +8896,7 @@ "h": 8, "w": 12, "x": 12, - "y": 309 + "y": 326 }, "id": 227, "options": { @@ -8615,7 +9025,7 @@ "h": 8, "w": 12, "x": 0, - "y": 317 + "y": 334 }, "id": 235, "options": { @@ -8744,7 +9154,7 @@ "h": 8, "w": 12, "x": 12, - "y": 317 + "y": 334 }, "id": 234, "options": { @@ -8821,6 +9231,6 @@ "timezone": "", "title": "Reth", "uid": "2k8BXz24x", - "version": 3, + "version": 8, "weekStart": "" } diff --git a/etc/grafana/dashboards/reth-mempool.json b/etc/grafana/dashboards/reth-mempool.json index 41be1dc411ebc..ebb693184a5a6 100644 --- a/etc/grafana/dashboards/reth-mempool.json +++ b/etc/grafana/dashboards/reth-mempool.json @@ -15,7 +15,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "11.1.3" + "version": "11.2.0" }, { "type": "panel", @@ -131,7 +131,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.1.3", + "pluginVersion": "11.2.0", "targets": [ { "datasource": { @@ -201,7 +201,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.1.3", + "pluginVersion": "11.2.0", "targets": [ { "datasource": { @@ -271,7 +271,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.1.3", + "pluginVersion": "11.2.0", "targets": [ { "datasource": { @@ -341,7 +341,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.1.3", + "pluginVersion": "11.2.0", "targets": [ { "datasource": { @@ -411,7 +411,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.1.3", + "pluginVersion": "11.2.0", "targets": [ { "datasource": { @@ -481,7 +481,7 @@ "textMode": "name", "wideLayout": true }, - "pluginVersion": "11.1.3", + "pluginVersion": "11.2.0", "targets": [ { "datasource": { @@ -534,6 +534,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -686,6 +687,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -817,6 +819,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -969,6 +972,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1099,6 +1103,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1261,6 +1266,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1357,6 +1363,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1504,6 +1511,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1605,6 +1613,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1756,6 +1765,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1852,6 +1862,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1971,6 +1982,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -2101,6 +2113,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -2323,6 +2336,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -2442,6 +2456,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -2578,6 +2593,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -2679,6 +2695,177 @@ "title": "Fetch Hashes Pending Fetch Duration", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "Number of all transactions of all sub-pools by type", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 69 + }, + "id": 218, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_legacy_transactions{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "Legacy", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_eip2930_transactions{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "EIP-2930", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_eip1559_transactions{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "EIP-1559", + "range": true, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_eip4844_transactions{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "EIP-4844", + "range": true, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "reth_transaction_pool_total_eip7702_transactions{instance=\"$instance\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "EIP-7702", + "range": true, + "refId": "E", + "useBackend": false + } + ], + "title": "Transactions by Type in Pool", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -2697,6 +2884,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -2865,6 +3053,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -2960,6 +3149,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -3102,6 +3292,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -3408,6 +3599,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -3524,6 +3716,6 @@ "timezone": "", "title": "Reth - Transaction Pool", "uid": "bee34f59-c79c-4669-a000-198057b3703d", - "version": 3, + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/etc/grafana/dashboards/reth-performance.json b/etc/grafana/dashboards/reth-performance.json index e0ff5865dd599..02d890dceeff0 100644 --- a/etc/grafana/dashboards/reth-performance.json +++ b/etc/grafana/dashboards/reth-performance.json @@ -15,7 +15,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "11.2.0" + "version": "11.1.0" }, { "type": "datasource", @@ -83,7 +83,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 25, "gradientMode": "none", @@ -137,9 +136,9 @@ "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "right", - "showLegend": false + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single", @@ -202,7 +201,6 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 25, "gradientMode": "none", @@ -256,9 +254,9 @@ "options": { "legend": { "calcs": [], - "displayMode": "hidden", - "placement": "right", - "showLegend": false + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, "tooltip": { "mode": "single", @@ -315,7 +313,7 @@ "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "definition": "label_values($instance)", + "definition": "query_result(reth_info)", "hide": 0, "includeAll": false, "label": "instance", @@ -323,12 +321,12 @@ "name": "instance", "options": [], "query": { - "qryType": 1, - "query": "label_values($instance)", + "qryType": 3, + "query": "query_result(reth_info)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 1, - "regex": "", + "regex": "/.*instance=\\\"([^\\\"]*).*/", "skipUrlSync": false, "sort": 0, "type": "query" @@ -341,8 +339,8 @@ }, "timepicker": {}, "timezone": "browser", - "title": "Reth Performance", + "title": "Reth - Performance", "uid": "bdywb3xjphfy8a", "version": 2, "weekStart": "" -} \ No newline at end of file +} diff --git a/examples/beacon-api-sidecar-fetcher/src/main.rs b/examples/beacon-api-sidecar-fetcher/src/main.rs index 7d8880ca185cc..a0b9b6e01ec8c 100644 --- a/examples/beacon-api-sidecar-fetcher/src/main.rs +++ b/examples/beacon-api-sidecar-fetcher/src/main.rs @@ -23,7 +23,7 @@ use clap::Parser; use futures_util::{stream::FuturesUnordered, StreamExt}; use mined_sidecar::MinedSidecarStream; use reth::{ - args::utils::EthereumChainSpecParser, builder::NodeHandle, cli::Cli, + builder::NodeHandle, chainspec::EthereumChainSpecParser, cli::Cli, providers::CanonStateSubscriptions, }; use reth_node_ethereum::EthereumNode; diff --git a/examples/beacon-api-sse/src/main.rs b/examples/beacon-api-sse/src/main.rs index 81535ef6140b5..243511d4960ee 100644 --- a/examples/beacon-api-sse/src/main.rs +++ b/examples/beacon-api-sse/src/main.rs @@ -21,7 +21,7 @@ use alloy_rpc_types_beacon::events::PayloadAttributesEvent; use clap::Parser; use futures_util::stream::StreamExt; use mev_share_sse::{client::EventStream, EventClient}; -use reth::{args::utils::EthereumChainSpecParser, cli::Cli}; +use reth::{chainspec::EthereumChainSpecParser, cli::Cli}; use reth_node_ethereum::EthereumNode; use std::net::{IpAddr, Ipv4Addr}; use tracing::{info, warn}; diff --git a/examples/custom-engine-types/src/main.rs b/examples/custom-engine-types/src/main.rs index 213a156af8fd6..34f8186be7f82 100644 --- a/examples/custom-engine-types/src/main.rs +++ b/examples/custom-engine-types/src/main.rs @@ -253,6 +253,10 @@ where .consensus(EthereumConsensusBuilder::default()) .engine_validator(CustomEngineValidatorBuilder::default()) } + + fn add_ons(&self) -> Self::AddOns { + EthereumAddOns::default() + } } /// A custom payload service builder that supports the custom engine types diff --git a/examples/custom-evm/src/main.rs b/examples/custom-evm/src/main.rs index d931c3b275bf4..9c421f9c6a59e 100644 --- a/examples/custom-evm/src/main.rs +++ b/examples/custom-evm/src/main.rs @@ -226,7 +226,7 @@ async fn main() -> eyre::Result<()> { .executor(MyExecutorBuilder::default()) .payload(MyPayloadBuilder::default()), ) - .with_add_ons::() + .with_add_ons(EthereumAddOns::default()) .launch() .await .unwrap(); diff --git a/examples/custom-inspector/src/main.rs b/examples/custom-inspector/src/main.rs index 700de274e6810..12b7620f4adcf 100644 --- a/examples/custom-inspector/src/main.rs +++ b/examples/custom-inspector/src/main.rs @@ -15,8 +15,8 @@ use alloy_rpc_types::state::EvmOverrides; use clap::Parser; use futures_util::StreamExt; use reth::{ - args::utils::EthereumChainSpecParser, builder::NodeHandle, + chainspec::EthereumChainSpecParser, cli::Cli, primitives::BlockNumberOrTag, revm::{ diff --git a/examples/custom-node-components/src/main.rs b/examples/custom-node-components/src/main.rs index 1faca73d25b05..d00b8a70224af 100644 --- a/examples/custom-node-components/src/main.rs +++ b/examples/custom-node-components/src/main.rs @@ -25,7 +25,7 @@ fn main() { // Configure the components of the node // use default ethereum components but use our custom pool .with_components(EthereumNode::components().pool(CustomPoolBuilder::default())) - .with_add_ons::() + .with_add_ons(EthereumAddOns::default()) .launch() .await?; diff --git a/examples/custom-payload-builder/src/main.rs b/examples/custom-payload-builder/src/main.rs index 5ed414eb850bc..e46b969adaa14 100644 --- a/examples/custom-payload-builder/src/main.rs +++ b/examples/custom-payload-builder/src/main.rs @@ -81,7 +81,7 @@ fn main() { .with_components( EthereumNode::components().payload(CustomPayloadBuilder::default()), ) - .with_add_ons::() + .with_add_ons(EthereumAddOns::default()) .launch() .await?; diff --git a/examples/node-custom-rpc/src/main.rs b/examples/node-custom-rpc/src/main.rs index 5aeecfd2915c0..92e0bfea26e9b 100644 --- a/examples/node-custom-rpc/src/main.rs +++ b/examples/node-custom-rpc/src/main.rs @@ -14,7 +14,7 @@ use clap::Parser; use jsonrpsee::{core::RpcResult, proc_macros::rpc}; -use reth::{args::utils::EthereumChainSpecParser, cli::Cli}; +use reth::{chainspec::EthereumChainSpecParser, cli::Cli}; use reth_node_ethereum::EthereumNode; use reth_transaction_pool::TransactionPool; diff --git a/examples/stateful-precompile/src/main.rs b/examples/stateful-precompile/src/main.rs index 05a6fd86c9350..26ebdfe4124b8 100644 --- a/examples/stateful-precompile/src/main.rs +++ b/examples/stateful-precompile/src/main.rs @@ -263,7 +263,7 @@ async fn main() -> eyre::Result<()> { .with_types::() // use default ethereum components but with our executor .with_components(EthereumNode::components().executor(MyExecutorBuilder::default())) - .with_add_ons::() + .with_add_ons(EthereumAddOns::default()) .launch() .await .unwrap(); diff --git a/examples/txpool-tracing/src/main.rs b/examples/txpool-tracing/src/main.rs index f8c2e19d203a1..94f800987a967 100644 --- a/examples/txpool-tracing/src/main.rs +++ b/examples/txpool-tracing/src/main.rs @@ -15,7 +15,7 @@ use alloy_rpc_types_trace::{parity::TraceType, tracerequest::TraceCallRequest}; use clap::Parser; use futures_util::StreamExt; use reth::{ - args::utils::EthereumChainSpecParser, builder::NodeHandle, cli::Cli, + builder::NodeHandle, chainspec::EthereumChainSpecParser, cli::Cli, rpc::compat::transaction::transaction_to_call_request, transaction_pool::TransactionPool, }; use reth_node_ethereum::node::EthereumNode; diff --git a/fork.yaml b/fork.yaml index 0ae42b86efad2..b8c77208f4ab5 100644 --- a/fork.yaml +++ b/fork.yaml @@ -4,7 +4,7 @@ footer: | base: name: reth url: https://github.com/paradigmxyz/reth - hash: 4960b927bcf5b1ce1fffd88f76c77929110b9eb0 + hash: b787d9e521fad1cb28a372637474ae4ec4986bf3 fork: name: scroll-reth url: https://github.com/scroll-tech/reth