From 4428163b1764db369c71ce1a732faf1cbc79f582 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Mon, 26 Feb 2024 14:16:53 -0800 Subject: [PATCH 1/3] `e2e`: Enable verbose load test logging to highlight breakage --- tests/load/load_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/load/load_test.go b/tests/load/load_test.go index 5a2b5c21e5..8b1e365fea 100644 --- a/tests/load/load_test.go +++ b/tests/load/load_test.go @@ -61,7 +61,9 @@ var _ = ginkgo.Describe("[Load Simulator]", ginkgo.Ordered, func() { // The load tests are flaky at high levels of evm logging, so leave it at // the default level instead of raising it to debug (as the warp testing does). - chainConfig := tmpnet.FlagsMap{} + chainConfig := tmpnet.FlagsMap{ + "log-level": "debug", + } nodes := utils.NewTmpnetNodes(nodeCount) @@ -71,7 +73,7 @@ var _ = ginkgo.Describe("[Load Simulator]", ginkgo.Ordered, func() { nodes, tmpnet.FlagsMap{ // The default tmpnet log level (debug) induces too much overhead for load testing. - config.LogLevelKey: "info", + config.LogLevelKey: "debug", }, utils.NewTmpnetSubnet(subnetAName, genesisPath, chainConfig, nodes...), ), From 7f6cb8a62939986bc24e231c3220f902fddea178 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Tue, 12 Mar 2024 22:00:40 -0700 Subject: [PATCH 2/3] Revert "fix test reliability (#1118)" This reverts commit 96188ab1db02725641b8092ec151a70277fbb468. --- core/txpool/txpool.go | 22 ++++++++++------------ internal/ethapi/api_test.go | 1 - 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index ce3f5f7e73..252aca0cf8 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -114,16 +114,7 @@ func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) return nil, err } } - - // Subscribe to chain head events to trigger subpool resets - var ( - newHeadCh = make(chan core.ChainHeadEvent) - newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh) - ) - go func() { - pool.loop(head, newHeadCh) - newHeadSub.Unsubscribe() - }() + go pool.loop(head, chain) return pool, nil } @@ -201,7 +192,14 @@ func (p *TxPool) Close() error { // loop is the transaction pool's main event loop, waiting for and reacting to // outside blockchain events as well as for various reporting and transaction // eviction events. -func (p *TxPool) loop(head *types.Header, newHeadCh <-chan core.ChainHeadEvent) { +func (p *TxPool) loop(head *types.Header, chain BlockChain) { + // Subscribe to chain head events to trigger subpool resets + var ( + newHeadCh = make(chan core.ChainHeadEvent) + newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh) + ) + defer newHeadSub.Unsubscribe() + // Track the previous and current head to feed to an idle reset var ( oldHead = head @@ -226,8 +224,8 @@ func (p *TxPool) loop(head *types.Header, newHeadCh <-chan core.ChainHeadEvent) for _, subpool := range p.subpools { subpool.Reset(oldHead, newHead) } - p.reorgFeed.Send(core.NewTxPoolReorgEvent{Head: newHead}) resetDone <- newHead + p.reorgFeed.Send(core.NewTxPoolReorgEvent{Head: newHead}) }(oldHead, newHead) default: diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 6b7251266d..f168777153 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -377,7 +377,6 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i t.Fatalf("block %d: failed to accept into chain: %v", block.NumberU64(), err) } } - chain.DrainAcceptorQueue() backend := &testBackend{db: db, chain: chain} return backend From d709f48cb8e7bef2c043cc966ad38edc34d26d73 Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Tue, 12 Mar 2024 22:00:50 -0700 Subject: [PATCH 3/3] Revert "squashed all from geth-v1.12.2-x (#1117)" This reverts commit 49b0e313d72844e0aadf2d40def322e8bc76ae2f. --- accounts/abi/bind/backend.go | 2 +- accounts/abi/bind/backends/simulated.go | 22 +- accounts/abi/bind/backends/simulated_test.go | 1 - accounts/abi/bind/bind.go | 2 +- accounts/abi/error.go | 2 +- accounts/abi/event.go | 1 - accounts/abi/reflect.go | 2 +- accounts/abi/type.go | 6 +- accounts/abi/unpack.go | 7 +- accounts/external/backend.go | 13 +- accounts/keystore/account_cache.go | 14 +- accounts/keystore/account_cache_test.go | 7 +- accounts/keystore/keystore_test.go | 10 +- accounts/keystore/passphrase.go | 7 +- accounts/scwallet/securechannel.go | 11 +- accounts/scwallet/wallet.go | 6 +- cmd/evm/README.md | 2 +- cmd/evm/internal/t8ntool/block.go | 2 +- cmd/evm/internal/t8ntool/execution.go | 129 +- cmd/evm/internal/t8ntool/gen_stenv.go | 82 +- cmd/evm/internal/t8ntool/transaction.go | 2 +- cmd/evm/runner.go | 5 +- cmd/evm/staterunner.go | 1 - cmd/evm/t8n_test.go | 8 - cmd/evm/testdata/13/readme.md | 2 +- cmd/evm/testdata/23/readme.md | 2 +- cmd/evm/testdata/28/alloc.json | 16 - cmd/evm/testdata/28/env.json | 23 - cmd/evm/testdata/28/exp.json | 46 - cmd/evm/testdata/28/txs.rlp | 1 - cmd/evm/testdata/3/readme.md | 2 +- cmd/evm/testdata/4/readme.md | 2 +- cmd/evm/testdata/5/readme.md | 2 +- cmd/evm/transition-test.sh | 2 +- cmd/utils/cmd.go | 45 - cmd/utils/flags.go | 23 + consensus/dummy/consensus.go | 18 +- consensus/misc/eip4844.go | 54 + consensus/misc/eip4844/eip4844.go | 108 - consensus/misc/{eip4844 => }/eip4844_test.go | 43 +- core/block_validator.go | 18 - core/blockchain.go | 86 +- core/blockchain_test.go | 97 + core/chain_indexer.go | 3 +- core/chain_makers.go | 3 +- core/error.go | 4 - core/evm.go | 6 +- core/gen_genesis.go | 18 - core/genesis.go | 40 +- core/genesis_test.go | 4 +- core/mkalloc.go | 20 +- core/predicate_check_test.go | 4 +- core/rawdb/accessors_chain.go | 20 +- core/rawdb/accessors_chain_test.go | 4 +- core/rawdb/accessors_indexes_test.go | 30 +- core/rawdb/accessors_state.go | 67 - core/rawdb/accessors_trie.go | 46 +- core/rawdb/chain_iterator_test.go | 8 +- core/rawdb/database.go | 8 +- core/rawdb/schema.go | 12 - core/rawdb/table.go | 4 +- core/state/database.go | 30 +- core/state/dump.go | 19 +- core/state/iterator.go | 28 +- core/state/journal.go | 31 +- core/state/metrics.go | 7 - core/state/pruner/pruner.go | 46 +- core/state/snapshot/account.go | 97 + core/state/snapshot/conversion.go | 7 +- core/state/snapshot/difflayer.go | 13 +- core/state/snapshot/difflayer_test.go | 2 +- core/state/snapshot/disklayer.go | 5 +- core/state/snapshot/generate.go | 32 +- core/state/snapshot/generate_test.go | 141 +- core/state/snapshot/iterator_fast.go | 36 +- core/state/snapshot/journal.go | 2 +- core/state/snapshot/snapshot.go | 7 +- core/state/snapshot/snapshot_test.go | 7 +- .../slotter.go => state/snapshot/sort.go} | 40 +- core/state/state_object.go | 210 +- core/state/state_test.go | 14 +- core/state/statedb.go | 500 +--- core/state/statedb_fuzz_test.go | 386 --- core/state/statedb_test.go | 160 +- core/state/sync_test.go | 7 +- core/state/trie_prefetcher.go | 2 +- core/state_processor.go | 4 +- core/state_processor_test.go | 203 +- core/state_transition.go | 74 +- core/trie_stress_bench_test.go | 5 - core/txpool/blobpool/blobpool.go | 1649 ------------ core/txpool/blobpool/blobpool_test.go | 1273 --------- core/txpool/blobpool/config.go | 60 - core/txpool/blobpool/evictheap.go | 156 -- core/txpool/blobpool/evictheap_test.go | 330 --- core/txpool/blobpool/interface.go | 59 - core/txpool/blobpool/limbo.go | 268 -- core/txpool/blobpool/metrics.go | 88 - core/txpool/blobpool/priority.go | 100 - core/txpool/blobpool/priority_test.go | 97 - core/txpool/blobpool/slotter_test.go | 68 - core/txpool/errors.go | 67 - core/txpool/{legacypool => }/journal.go | 2 +- core/txpool/legacypool/legacypool.go | 2141 --------------- core/txpool/{legacypool => }/list.go | 4 +- core/txpool/{legacypool => }/list_test.go | 2 +- core/txpool/{legacypool => }/noncer.go | 2 +- core/txpool/subpool.go | 153 -- core/txpool/txpool.go | 2345 ++++++++++++++--- .../legacypool2_test.go => txpool2_test.go} | 56 +- .../legacypool_test.go => txpool_test.go} | 490 ++-- core/txpool/validation.go | 272 -- core/types/block.go | 124 +- core/types/block_test.go | 30 +- core/types/gen_header_json.go | 52 +- core/types/gen_header_rlp.go | 25 +- core/types/gen_receipt_json.go | 12 - core/types/hashes.go | 11 - core/types/receipt.go | 28 +- core/types/receipt_test.go | 16 +- core/types/state_account.go | 87 - core/types/transaction.go | 145 +- core/types/transaction_marshalling.go | 119 +- core/types/transaction_signing.go | 8 +- core/types/transaction_test.go | 150 +- core/types/tx_blob.go | 10 +- core/vm/contracts.go | 87 - core/vm/contracts_test.go | 78 +- core/vm/eips.go | 64 +- core/vm/evm.go | 32 +- core/vm/gas_table.go | 4 +- core/vm/instructions.go | 20 +- core/vm/instructions_test.go | 185 +- core/vm/interface.go | 8 +- core/vm/interpreter.go | 2 - core/vm/jump_table.go | 11 - core/vm/jump_table_export.go | 2 - core/vm/memory.go | 11 - core/vm/memory_table.go | 9 +- core/vm/memory_test.go | 69 - core/vm/opcodes.go | 24 +- core/vm/operations_acl.go | 144 +- core/vm/runtime/env.go | 5 +- core/vm/runtime/runtime.go | 7 +- .../testdata/precompiles/pointEvaluation.json | 9 - eth/api.go | 438 ++- eth/api_admin.go | 149 -- eth/api_backend.go | 22 +- eth/api_debug.go | 363 --- eth/backend.go | 24 +- eth/chain_with_final_block.go | 23 - eth/ethconfig/config.go | 11 +- eth/filters/api.go | 4 +- eth/filters/filter.go | 180 +- eth/filters/filter_system.go | 11 +- eth/filters/filter_test.go | 303 +-- eth/gasprice/feehistory.go | 36 +- eth/gasprice/gasprice.go | 12 +- eth/state_accessor.go | 2 +- eth/tracers/api_test.go | 13 +- .../internal/tracetest/calltrace_test.go | 1 + .../internal/tracetest/prestate_test.go | 9 +- eth/tracers/js/goja.go | 4 +- eth/tracers/logger/logger.go | 5 - eth/tracers/native/4byte.go | 2 +- eth/tracers/native/call_flat.go | 6 +- .../subnetevmclient/subnet_evm_client.go | 5 - go.mod | 22 +- go.sum | 38 +- internal/blocktest/test_hash.go | 69 - internal/cmdtest/test_cmd.go | 6 +- internal/debug/flags.go | 3 +- internal/ethapi/api.go | 223 +- internal/ethapi/api_test.go | 1462 ++-------- internal/ethapi/backend.go | 11 +- internal/ethapi/transaction_args.go | 5 +- internal/flags/categories.go | 3 +- metrics/resetting_timer.go | 14 +- metrics/sample.go | 15 +- metrics/timer.go | 2 +- metrics/writer.go | 20 +- metrics/writer_test.go | 7 +- miner/ordering.go | 157 -- miner/ordering_ext.go | 15 - miner/ordering_test.go | 198 -- miner/worker.go | 57 +- node/config.go | 6 - params/config.go | 193 +- params/config_extra.go | 149 -- params/config_test.go | 10 +- params/network_upgrades.go | 13 - params/protocol_params.go | 17 +- params/version.go | 2 +- plugin/evm/block.go | 8 +- plugin/evm/block_verification.go | 18 +- plugin/evm/config.go | 28 +- plugin/evm/gossip.go | 10 +- plugin/evm/gossip_test.go | 14 +- plugin/evm/gossiper_eth_gossiping_test.go | 4 +- plugin/evm/handler.go | 6 +- plugin/evm/tx_gossip_test.go | 5 +- plugin/evm/vm.go | 4 +- plugin/evm/vm_test.go | 6 +- precompile/contract/interfaces.go | 2 + precompile/contract/mocks.go | 14 + rpc/client.go | 140 +- rpc/client_opt.go | 29 - rpc/client_test.go | 269 +- rpc/errors.go | 44 +- rpc/handler.go | 288 +- rpc/http.go | 21 +- rpc/inproc.go | 3 +- rpc/server.go | 28 +- rpc/server_test.go | 39 - rpc/subscription.go | 13 +- rpc/testdata/invalid-batch-toolarge.js | 13 - rpc/websocket.go | 4 +- scripts/avalanche_header.txt | 10 - scripts/build_test.sh | 2 +- scripts/format_add_avalanche_header.sh | 14 - scripts/format_as_fork.sh | 53 - scripts/format_as_upstream.sh | 47 - scripts/geth-allowed-packages.txt | 1 - sync/handlers/leafs_request.go | 14 +- sync/handlers/leafs_request_test.go | 10 +- sync/statesync/sync_helpers.go | 9 +- sync/statesync/sync_test.go | 6 +- sync/statesync/test_sync.go | 3 +- sync/syncutils/iterators.go | 3 +- sync/syncutils/test_trie.go | 29 +- tests/init.go | 16 - tests/state_test_util.go | 50 +- trie/committer.go | 15 +- trie/database_test.go | 8 +- trie/{database.go => database_wrap.go} | 109 +- trie/errors.go | 6 - trie/iterator_test.go | 93 +- trie/proof.go | 14 +- trie/proof_test.go | 180 +- trie/secure_trie.go | 28 +- trie/secure_trie_test.go | 4 +- trie/sync_test.go | 4 +- trie/testutil/utils.go | 71 - trie/tracer.go | 11 +- trie/tracer_test.go | 46 +- trie/trie.go | 84 +- trie/trie_reader.go | 36 +- trie/trie_test.go | 98 +- trie/triedb/hashdb/database.go | 80 +- trie/triedb/pathdb/database.go | 373 --- trie/triedb/pathdb/database_test.go | 563 ---- trie/triedb/pathdb/difflayer.go | 184 -- trie/triedb/pathdb/difflayer_test.go | 180 -- trie/triedb/pathdb/disklayer.go | 308 --- trie/triedb/pathdb/errors.go | 63 - trie/triedb/pathdb/history.go | 496 ---- trie/triedb/pathdb/history_test.go | 171 -- trie/triedb/pathdb/journal.go | 388 --- trie/triedb/pathdb/layertree.go | 224 -- trie/triedb/pathdb/metrics.go | 61 - trie/triedb/pathdb/nodebuffer.go | 287 -- trie/triedb/pathdb/testutils.go | 166 -- trie/trienode/node.go | 88 +- trie/triestate/state.go | 277 -- utils/metered_cache.go | 30 +- warp/aggregator/mock_signature_getter.go | 2 +- 266 files changed, 6049 insertions(+), 18936 deletions(-) delete mode 100644 cmd/evm/testdata/28/alloc.json delete mode 100644 cmd/evm/testdata/28/env.json delete mode 100644 cmd/evm/testdata/28/exp.json delete mode 100644 cmd/evm/testdata/28/txs.rlp delete mode 100644 cmd/utils/cmd.go create mode 100644 consensus/misc/eip4844.go delete mode 100644 consensus/misc/eip4844/eip4844.go rename consensus/misc/{eip4844 => }/eip4844_test.go (58%) create mode 100644 core/state/snapshot/account.go rename core/{txpool/blobpool/slotter.go => state/snapshot/sort.go} (53%) delete mode 100644 core/state/statedb_fuzz_test.go delete mode 100644 core/txpool/blobpool/blobpool.go delete mode 100644 core/txpool/blobpool/blobpool_test.go delete mode 100644 core/txpool/blobpool/config.go delete mode 100644 core/txpool/blobpool/evictheap.go delete mode 100644 core/txpool/blobpool/evictheap_test.go delete mode 100644 core/txpool/blobpool/interface.go delete mode 100644 core/txpool/blobpool/limbo.go delete mode 100644 core/txpool/blobpool/metrics.go delete mode 100644 core/txpool/blobpool/priority.go delete mode 100644 core/txpool/blobpool/priority_test.go delete mode 100644 core/txpool/blobpool/slotter_test.go delete mode 100644 core/txpool/errors.go rename core/txpool/{legacypool => }/journal.go (99%) delete mode 100644 core/txpool/legacypool/legacypool.go rename core/txpool/{legacypool => }/list.go (99%) rename core/txpool/{legacypool => }/list_test.go (99%) rename core/txpool/{legacypool => }/noncer.go (99%) delete mode 100644 core/txpool/subpool.go rename core/txpool/{legacypool/legacypool2_test.go => txpool2_test.go} (84%) rename core/txpool/{legacypool/legacypool_test.go => txpool_test.go} (86%) delete mode 100644 core/txpool/validation.go delete mode 100644 core/vm/memory_test.go delete mode 100644 core/vm/testdata/precompiles/pointEvaluation.json delete mode 100644 eth/api_admin.go delete mode 100644 eth/api_debug.go delete mode 100644 eth/chain_with_final_block.go delete mode 100644 internal/blocktest/test_hash.go delete mode 100644 miner/ordering.go delete mode 100644 miner/ordering_ext.go delete mode 100644 miner/ordering_test.go delete mode 100644 params/config_extra.go delete mode 100644 rpc/testdata/invalid-batch-toolarge.js delete mode 100644 scripts/avalanche_header.txt delete mode 100755 scripts/format_add_avalanche_header.sh delete mode 100755 scripts/format_as_fork.sh delete mode 100755 scripts/format_as_upstream.sh rename trie/{database.go => database_wrap.go} (76%) delete mode 100644 trie/testutil/utils.go delete mode 100644 trie/triedb/pathdb/database.go delete mode 100644 trie/triedb/pathdb/database_test.go delete mode 100644 trie/triedb/pathdb/difflayer.go delete mode 100644 trie/triedb/pathdb/difflayer_test.go delete mode 100644 trie/triedb/pathdb/disklayer.go delete mode 100644 trie/triedb/pathdb/errors.go delete mode 100644 trie/triedb/pathdb/history.go delete mode 100644 trie/triedb/pathdb/history_test.go delete mode 100644 trie/triedb/pathdb/journal.go delete mode 100644 trie/triedb/pathdb/layertree.go delete mode 100644 trie/triedb/pathdb/metrics.go delete mode 100644 trie/triedb/pathdb/nodebuffer.go delete mode 100644 trie/triedb/pathdb/testutils.go delete mode 100644 trie/triestate/state.go diff --git a/accounts/abi/bind/backend.go b/accounts/abi/bind/backend.go index f83ed9aab6..2fc19f108f 100644 --- a/accounts/abi/bind/backend.go +++ b/accounts/abi/bind/backend.go @@ -39,7 +39,7 @@ import ( var ( // ErrNoCode is returned by call and transact operations for which the requested // recipient contract to operate on does not exist in the state db or does not - // have any code associated with it (i.e. self-destructed). + // have any code associated with it (i.e. suicided). ErrNoCode = errors.New("no contract code at given address") // ErrNoAcceptedState is raised when attempting to perform a accepted state action diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index d401e086fe..7efabaa1b6 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -62,6 +62,8 @@ import ( var ( _ bind.AcceptedContractCaller = (*SimulatedBackend)(nil) _ bind.ContractBackend = (*SimulatedBackend)(nil) + _ bind.ContractFilterer = (*SimulatedBackend)(nil) + _ bind.ContractTransactor = (*SimulatedBackend)(nil) _ bind.DeployBackend = (*SimulatedBackend)(nil) _ interfaces.ChainReader = (*SimulatedBackend)(nil) @@ -145,7 +147,7 @@ func (b *SimulatedBackend) Close() error { return nil } -// Commit imports all the accepted transactions as a single block and starts a +// Commit imports all the pending transactions as a single block and starts a // fresh new state. func (b *SimulatedBackend) Commit(accept bool) common.Hash { b.mu.Lock() @@ -169,7 +171,7 @@ func (b *SimulatedBackend) Commit(accept bool) common.Hash { return blockHash } -// Rollback aborts all accepted transactions, reverting to the last committed state. +// Rollback aborts all pending transactions, reverting to the last committed state. func (b *SimulatedBackend) Rollback() { b.mu.Lock() defer b.mu.Unlock() @@ -204,7 +206,7 @@ func (b *SimulatedBackend) Fork(ctx context.Context, parent common.Hash) error { defer b.mu.Unlock() if len(b.acceptedBlock.Transactions()) != 0 { - return errors.New("accepted block dirty") + return errors.New("pending block dirty") } block, err := b.blockByHash(ctx, parent) if err != nil { @@ -291,10 +293,10 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common return receipt, nil } -// TransactionByHash checks the pool of accepted transactions in addition to the -// blockchain. The isAccepted return value indicates whether the transaction has been +// TransactionByHash checks the pool of pending transactions in addition to the +// blockchain. The isPending return value indicates whether the transaction has been // mined yet. Note that the transaction may not be part of the canonical chain even if -// it's not accepted. +// it's not pending. func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) { b.mu.Lock() defer b.mu.Unlock() @@ -535,7 +537,7 @@ func (b *SimulatedBackend) SuggestGasTipCap(ctx context.Context) (*big.Int, erro return big.NewInt(1), nil } -// EstimateGas executes the requested code against the currently accepted block/state and +// EstimateGas executes the requested code against the currently pending block/state and // returns the used amount of gas. func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.CallMsg) (uint64, error) { b.mu.Lock() @@ -639,7 +641,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call interfaces.Call return hi, nil } -// callContract implements common code between normal and accepted contract calls. +// callContract implements common code between normal and pending contract calls. // state is modified during execution, make sure to copy it if necessary. func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.CallMsg, header *types.Header, stateDB *state.StateDB) (*core.ExecutionResult, error) { // Gas prices post 1559 need to be initialized @@ -709,7 +711,7 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call interfaces.Cal return core.ApplyMessage(vmEnv, msg, gasPool) } -// SendTransaction updates the accepted block to include the given transaction. +// SendTransaction updates the pending block to include the given transaction. func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transaction) error { b.mu.Lock() defer b.mu.Unlock() @@ -852,7 +854,7 @@ func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error { } block := b.blockchain.GetBlockByHash(b.acceptedBlock.ParentHash()) if block == nil { - return errors.New("could not find parent") + return fmt.Errorf("could not find parent") } blocks, _, _ := core.GenerateChain(b.config, block, dummy.NewFaker(), b.database, 1, 10, func(number int, block *core.BlockGen) { diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index f51e3e7101..714caf95f2 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -163,7 +163,6 @@ func TestAdjustTime(t *testing.T) { func TestNewAdjustTimeFail(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) - defer sim.blockchain.Stop() // Create tx and send head, _ := sim.HeaderByNumber(context.Background(), nil) // Should be child's, good enough diff --git a/accounts/abi/bind/bind.go b/accounts/abi/bind/bind.go index b38f118c40..e05f509bcc 100644 --- a/accounts/abi/bind/bind.go +++ b/accounts/abi/bind/bind.go @@ -151,7 +151,7 @@ func BindHelper(types []string, abis []string, bytecodes []string, fsigs []map[s normalized := original normalizedName := methodNormalizer[lang](alias(aliases, original.Name)) // Ensure there is no duplicated identifier - var identifiers = callIdentifiers + identifiers := callIdentifiers if !original.IsConstant() { identifiers = transactIdentifiers } diff --git a/accounts/abi/error.go b/accounts/abi/error.go index 34bb373c60..d94c262124 100644 --- a/accounts/abi/error.go +++ b/accounts/abi/error.go @@ -42,7 +42,7 @@ type Error struct { str string // Sig contains the string signature according to the ABI spec. - // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)" + // e.g. error foo(uint32 a, int b) = "foo(uint32,int256)" // Please note that "int" is substitute for its canonical representation "int256" Sig string diff --git a/accounts/abi/event.go b/accounts/abi/event.go index f4f0f5d92d..63ecda229d 100644 --- a/accounts/abi/event.go +++ b/accounts/abi/event.go @@ -59,7 +59,6 @@ type Event struct { // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" // Please note that "int" is substitute for its canonical representation "int256" Sig string - // ID returns the canonical representation of the event's signature used by the // abi definition to identify event names and types. ID common.Hash diff --git a/accounts/abi/reflect.go b/accounts/abi/reflect.go index 4adbf5b1c6..9f7a07a0c3 100644 --- a/accounts/abi/reflect.go +++ b/accounts/abi/reflect.go @@ -238,7 +238,7 @@ func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[stri structFieldName := ToCamelCase(argName) if structFieldName == "" { - return nil, errors.New("abi: purely underscored output cannot unpack to struct") + return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct") } // this abi has already been paired, skip it... unless there exists another, yet unassigned diff --git a/accounts/abi/type.go b/accounts/abi/type.go index 75a6c15fd7..f7dc5e6a82 100644 --- a/accounts/abi/type.go +++ b/accounts/abi/type.go @@ -80,7 +80,7 @@ var ( func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) { // check that array brackets are equal if they exist if strings.Count(t, "[") != strings.Count(t, "]") { - return Type{}, errors.New("invalid arg type in abi") + return Type{}, fmt.Errorf("invalid arg type in abi") } typ.stringKind = t @@ -119,7 +119,7 @@ func NewType(t string, internalType string, components []ArgumentMarshaling) (ty } typ.stringKind = embeddedType.stringKind + sliced } else { - return Type{}, errors.New("invalid formatting of array type") + return Type{}, fmt.Errorf("invalid formatting of array type") } return typ, err } @@ -356,7 +356,7 @@ func (t Type) pack(v reflect.Value) ([]byte, error) { } } -// requiresLengthPrefix returns whether the type requires any sort of length +// requireLengthPrefix returns whether the type requires any sort of length // prefixing. func (t Type) requiresLengthPrefix() bool { return t.T == StringTy || t.T == BytesTy || t.T == SliceTy diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go index 2899e5a5b3..bc57d71db6 100644 --- a/accounts/abi/unpack.go +++ b/accounts/abi/unpack.go @@ -28,7 +28,6 @@ package abi import ( "encoding/binary" - "errors" "fmt" "math" "math/big" @@ -136,7 +135,7 @@ func readBool(word []byte) (bool, error) { // readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes) func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { if t.T != FunctionTy { - return [24]byte{}, errors.New("abi: invalid type in call to make function type byte array") + return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array") } if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 { err = fmt.Errorf("abi: got improperly encoded function type, got %v", word) @@ -149,7 +148,7 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) { // ReadFixedBytes uses reflection to create a fixed array to be read from. func ReadFixedBytes(t Type, word []byte) (interface{}, error) { if t.T != FixedBytesTy { - return nil, errors.New("abi: invalid type in call to make fixed byte array") + return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array") } // convert array := reflect.New(t.GetType()).Elem() @@ -177,7 +176,7 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) // declare our array refSlice = reflect.New(t.GetType()).Elem() } else { - return nil, errors.New("abi: invalid type in array/slice unpacking stage") + return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage") } // Arrays have packed elements, resulting in longer unpack steps. diff --git a/accounts/external/backend.go b/accounts/external/backend.go index d80932c323..1869aef403 100644 --- a/accounts/external/backend.go +++ b/accounts/external/backend.go @@ -27,7 +27,6 @@ package external import ( - "errors" "fmt" "math/big" "sync" @@ -109,11 +108,11 @@ func (api *ExternalSigner) Status() (string, error) { } func (api *ExternalSigner) Open(passphrase string) error { - return errors.New("operation not supported on external signers") + return fmt.Errorf("operation not supported on external signers") } func (api *ExternalSigner) Close() error { - return errors.New("operation not supported on external signers") + return fmt.Errorf("operation not supported on external signers") } func (api *ExternalSigner) Accounts() []accounts.Account { @@ -156,7 +155,7 @@ func (api *ExternalSigner) Contains(account accounts.Account) bool { } func (api *ExternalSigner) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) { - return accounts.Account{}, errors.New("operation not supported on external signers") + return accounts.Account{}, fmt.Errorf("operation not supported on external signers") } func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain interfaces.ChainStateReader) { @@ -253,14 +252,14 @@ func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transactio } func (api *ExternalSigner) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { - return []byte{}, errors.New("password-operations not supported on external signers") + return []byte{}, fmt.Errorf("password-operations not supported on external signers") } func (api *ExternalSigner) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { - return nil, errors.New("password-operations not supported on external signers") + return nil, fmt.Errorf("password-operations not supported on external signers") } func (api *ExternalSigner) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) { - return nil, errors.New("password-operations not supported on external signers") + return nil, fmt.Errorf("password-operations not supported on external signers") } func (api *ExternalSigner) listAccounts() ([]common.Address, error) { diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go index dbe834b198..913def043c 100644 --- a/accounts/keystore/account_cache.go +++ b/accounts/keystore/account_cache.go @@ -41,7 +41,6 @@ import ( mapset "github.com/deckarep/golang-set/v2" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "golang.org/x/exp/slices" ) // Minimum amount of time between cache reloads. This limit applies if the platform does @@ -49,10 +48,11 @@ import ( // exist yet, the code will attempt to create a watcher at most this often. const minReloadInterval = 2 * time.Second -// byURL defines the sorting order for accounts. -func byURL(a, b accounts.Account) int { - return a.URL.Cmp(b.URL) -} +type accountsByURL []accounts.Account + +func (s accountsByURL) Len() int { return len(s) } +func (s accountsByURL) Less(i, j int) bool { return s[i].URL.Cmp(s[j].URL) < 0 } +func (s accountsByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // AmbiguousAddrError is returned when attempting to unlock // an address for which more than one file exists. @@ -77,7 +77,7 @@ type accountCache struct { keydir string watcher *watcher mu sync.Mutex - all []accounts.Account + all accountsByURL byAddr map[common.Address][]accounts.Account throttle *time.Timer notify chan struct{} @@ -204,7 +204,7 @@ func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) { default: err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))} copy(err.Matches, matches) - slices.SortFunc(err.Matches, byURL) + sort.Sort(accountsByURL(err.Matches)) return accounts.Account{}, err } } diff --git a/accounts/keystore/account_cache_test.go b/accounts/keystore/account_cache_test.go index 16a9453575..be030f0c0f 100644 --- a/accounts/keystore/account_cache_test.go +++ b/accounts/keystore/account_cache_test.go @@ -27,12 +27,12 @@ package keystore import ( - "errors" "fmt" "math/rand" "os" "path/filepath" "reflect" + "sort" "testing" "time" @@ -40,7 +40,6 @@ import ( "github.com/cespare/cp" "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" - "golang.org/x/exp/slices" ) var ( @@ -85,7 +84,7 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error { select { case <-ks.changes: default: - return errors.New("wasn't notified of new accounts") + return fmt.Errorf("wasn't notified of new accounts") } return nil } @@ -213,7 +212,7 @@ func TestCacheAddDeleteOrder(t *testing.T) { // Check that the account list is sorted by filename. wantAccounts := make([]accounts.Account, len(accs)) copy(wantAccounts, accs) - slices.SortFunc(wantAccounts, byURL) + sort.Sort(accountsByURL(wantAccounts)) list := cache.accounts() if !reflect.DeepEqual(list, wantAccounts) { t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accs), spew.Sdump(wantAccounts)) diff --git a/accounts/keystore/keystore_test.go b/accounts/keystore/keystore_test.go index 16db8259c9..dc574e82e1 100644 --- a/accounts/keystore/keystore_test.go +++ b/accounts/keystore/keystore_test.go @@ -30,6 +30,7 @@ import ( "math/rand" "os" "runtime" + "sort" "strings" "sync" "sync/atomic" @@ -40,7 +41,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" - "golang.org/x/exp/slices" ) var testSigData = make([]byte, 32) @@ -410,19 +410,19 @@ func TestImportRace(t *testing.T) { t.Fatalf("failed to export account: %v", acc) } _, ks2 := tmpKeyStore(t, true) - var atom atomic.Uint32 + var atom uint32 var wg sync.WaitGroup wg.Add(2) for i := 0; i < 2; i++ { go func() { defer wg.Done() if _, err := ks2.Import(json, "new", "new"); err != nil { - atom.Add(1) + atomic.AddUint32(&atom, 1) } }() } wg.Wait() - if atom.Load() != 1 { + if atom != 1 { t.Errorf("Import is racy") } } @@ -437,7 +437,7 @@ func checkAccounts(t *testing.T, live map[common.Address]accounts.Account, walle for _, account := range live { liveList = append(liveList, account) } - slices.SortFunc(liveList, byURL) + sort.Sort(accountsByURL(liveList)) for j, wallet := range wallets { if accs := wallet.Accounts(); len(accs) != 1 { t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs)) diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go index 98cf02ba37..7eeb0a9dbd 100644 --- a/accounts/keystore/passphrase.go +++ b/accounts/keystore/passphrase.go @@ -235,13 +235,10 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { if err != nil { return nil, err } - key, err := crypto.ToECDSA(keyBytes) - if err != nil { - return nil, fmt.Errorf("invalid key: %w", err) - } + key := crypto.ToECDSAUnsafe(keyBytes) id, err := uuid.FromBytes(keyId) if err != nil { - return nil, fmt.Errorf("invalid UUID: %w", err) + return nil, err } return &Key{ Id: id, diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go index 062bfcb198..5fc816e12a 100644 --- a/accounts/scwallet/securechannel.go +++ b/accounts/scwallet/securechannel.go @@ -34,7 +34,6 @@ import ( "crypto/rand" "crypto/sha256" "crypto/sha512" - "errors" "fmt" "github.com/ethereum/go-ethereum/crypto" @@ -136,7 +135,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error { // Unpair disestablishes an existing pairing. func (s *SecureChannelSession) Unpair() error { if s.PairingKey == nil { - return errors.New("cannot unpair: not paired") + return fmt.Errorf("cannot unpair: not paired") } _, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{}) @@ -152,7 +151,7 @@ func (s *SecureChannelSession) Unpair() error { // Open initializes the secure channel. func (s *SecureChannelSession) Open() error { if s.iv != nil { - return errors.New("session already opened") + return fmt.Errorf("session already opened") } response, err := s.open() @@ -226,7 +225,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error // transmitEncrypted sends an encrypted message, and decrypts and returns the response. func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) { if s.iv == nil { - return nil, errors.New("channel not open") + return nil, fmt.Errorf("channel not open") } data, err := s.encryptAPDU(data) @@ -265,7 +264,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b return nil, err } if !bytes.Equal(s.iv, rmac) { - return nil, errors.New("invalid MAC in response") + return nil, fmt.Errorf("invalid MAC in response") } rapdu := &responseAPDU{} @@ -330,7 +329,7 @@ func unpad(data []byte, terminator byte) ([]byte, error) { return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i]) } } - return nil, errors.New("expected end of padding, got 0") + return nil, fmt.Errorf("expected end of padding, got 0") } // updateIV is an internal method that updates the initialization vector after diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index b83fce913e..71a9ac9fbf 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -262,7 +262,7 @@ func (w *Wallet) release() error { // with the wallet. func (w *Wallet) pair(puk []byte) error { if w.session.paired() { - return errors.New("wallet already paired") + return fmt.Errorf("wallet already paired") } pairing, err := w.session.pair(puk) if err != nil { @@ -823,7 +823,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) { // unpair deletes an existing pairing. func (s *Session) unpair() error { if !s.verified { - return errors.New("unpair requires that the PIN be verified") + return fmt.Errorf("unpair requires that the PIN be verified") } return s.Channel.Unpair() } @@ -917,7 +917,7 @@ func (s *Session) initialize(seed []byte) error { return err } if status == "Online" { - return errors.New("card is already initialized, cowardly refusing to proceed") + return fmt.Errorf("card is already initialized, cowardly refusing to proceed") } s.Wallet.lock.Lock() diff --git a/cmd/evm/README.md b/cmd/evm/README.md index 2459b853b9..4df5f3a2a7 100644 --- a/cmd/evm/README.md +++ b/cmd/evm/README.md @@ -342,7 +342,7 @@ To make `t8n` apply these, the following inputs are required: - For ethash, it is `5000000000000000000` `wei`, - If this is not defined, mining rewards are not applied, - A value of `0` is valid, and causes accounts to be 'touched'. -- For each ommer, the tool needs to be given an `address\` and a `delta`. This +- For each ommer, the tool needs to be given an `addres\` and a `delta`. This is done via the `ommers` field in `env`. Note: the tool does not verify that e.g. the normal uncle rules apply, diff --git a/cmd/evm/internal/t8ntool/block.go b/cmd/evm/internal/t8ntool/block.go index 1b11e25e53..a5ec44f170 100644 --- a/cmd/evm/internal/t8ntool/block.go +++ b/cmd/evm/internal/t8ntool/block.go @@ -271,7 +271,7 @@ func readInput(ctx *cli.Context) (*bbInput, error) { return inputData, nil } -// dispatchBlock writes the output data to either stderr or stdout, or to the specified +// dispatchOutput writes the output data to either stderr or stdout, or to the specified // files func dispatchBlock(ctx *cli.Context, baseDir string, block *types.Block) error { raw, _ := rlp.EncodeToBytes(block) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 7b33af0d02..0f82378ba4 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -29,8 +29,8 @@ package t8ntool import ( "fmt" "math/big" + "os" - "github.com/ava-labs/subnet-evm/consensus/misc/eip4844" "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/state" @@ -55,18 +55,16 @@ type Prestate struct { // ExecutionResult contains the execution status after running a state test, any // error that might have occurred and a dump of the final state if requested. type ExecutionResult struct { - StateRoot common.Hash `json:"stateRoot"` - TxRoot common.Hash `json:"txRoot"` - ReceiptRoot common.Hash `json:"receiptsRoot"` - LogsHash common.Hash `json:"logsHash"` - Bloom types.Bloom `json:"logsBloom" gencodec:"required"` - Receipts types.Receipts `json:"receipts"` - Rejected []*rejectedTx `json:"rejected,omitempty"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - CurrentExcessBlobGas *math.HexOrDecimal64 `json:"currentExcessBlobGas,omitempty"` - CurrentBlobGasUsed *math.HexOrDecimal64 `json:"currentBlobGasUsed,omitempty"` + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptsRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected []*rejectedTx `json:"rejected,omitempty"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` } type ommer struct { @@ -76,44 +74,38 @@ type ommer struct { //go:generate go run github.com/fjl/gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go type stEnv struct { - Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` - Difficulty *big.Int `json:"currentDifficulty"` - Random *big.Int `json:"currentRandom"` - ParentDifficulty *big.Int `json:"parentDifficulty"` - ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"` - ParentGasUsed uint64 `json:"parentGasUsed,omitempty"` - ParentGasLimit uint64 `json:"parentGasLimit,omitempty"` - MinBaseFee *big.Int `json:"minBaseFee,omitempty"` - GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` - Number uint64 `json:"currentNumber" gencodec:"required"` - Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *big.Int `json:"currentBaseFee,omitempty"` - ParentUncleHash common.Hash `json:"parentUncleHash"` - ExcessBlobGas *uint64 `json:"excessBlobGas,omitempty"` - ParentExcessBlobGas *uint64 `json:"parentExcessBlobGas,omitempty"` - ParentBlobGasUsed *uint64 `json:"parentBlobGasUsed,omitempty"` + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty"` + Random *big.Int `json:"currentRandom"` + ParentDifficulty *big.Int `json:"parentDifficulty"` + ParentBaseFee *big.Int `json:"parentBaseFee,omitempty"` + ParentGasUsed uint64 `json:"parentGasUsed,omitempty"` + ParentGasLimit uint64 `json:"parentGasLimit,omitempty"` + MinBaseFee *big.Int `json:"minBaseFee,omitempty"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` } type stEnvMarshaling struct { - Coinbase common.UnprefixedAddress - Difficulty *math.HexOrDecimal256 - Random *math.HexOrDecimal256 - ParentDifficulty *math.HexOrDecimal256 - ParentBaseFee *math.HexOrDecimal256 - ParentGasUsed math.HexOrDecimal64 - ParentGasLimit math.HexOrDecimal64 - MinBaseFee *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - Number math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - ParentTimestamp math.HexOrDecimal64 - BaseFee *math.HexOrDecimal256 - ExcessBlobGas *math.HexOrDecimal64 - ParentExcessBlobGas *math.HexOrDecimal64 - ParentBlobGasUsed *math.HexOrDecimal64 + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + Random *math.HexOrDecimal256 + ParentDifficulty *math.HexOrDecimal256 + ParentBaseFee *math.HexOrDecimal256 + ParentGasUsed math.HexOrDecimal64 + ParentGasLimit math.HexOrDecimal64 + MinBaseFee *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + ParentTimestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 } type rejectedTx struct { @@ -171,19 +163,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, // rnd := common.BigToHash(pre.Env.Random) // vmContext.Random = &rnd // } - // If excessBlobGas is defined, add it to the vmContext. - if pre.Env.ExcessBlobGas != nil { - vmContext.ExcessBlobGas = pre.Env.ExcessBlobGas - } else { - // If it is not explicitly defined, but we have the parent values, we try - // to calculate it ourselves. - parentExcessBlobGas := pre.Env.ParentExcessBlobGas - parentBlobGasUsed := pre.Env.ParentBlobGasUsed - if parentExcessBlobGas != nil && parentBlobGasUsed != nil { - excessBlobGas := eip4844.CalcExcessBlobGas(*parentExcessBlobGas, *parentBlobGasUsed) - vmContext.ExcessBlobGas = &excessBlobGas - } - } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // done in StateProcessor.Process(block, ...), right before transactions are applied. // if chainConfig.DAOForkSupport && @@ -191,14 +170,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, // chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { // misc.ApplyDAOHardFork(statedb) // } - var blobGasUsed uint64 + for i, tx := range txs { - if tx.Type() == types.BlobTxType && vmContext.ExcessBlobGas == nil { - errMsg := "blob tx used but field env.ExcessBlobGas missing" - log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", errMsg) - rejectedTxs = append(rejectedTxs, &rejectedTx{i, errMsg}) - continue - } msg, err := core.TransactionToMessage(tx, signer, pre.Env.BaseFee) if err != nil { log.Warn("rejected tx", "index", i, "hash", tx.Hash(), "error", err) @@ -228,9 +201,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, gaspool.SetGas(prevGas) continue } - if tx.Type() == types.BlobTxType { - blobGasUsed += params.BlobTxBlobGasPerBlob - } includedTxs = append(includedTxs, tx) if hashError != nil { return nil, nil, NewError(ErrorMissingBlockhash, hashError) @@ -279,7 +249,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, if miningReward >= 0 { // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases // where - // - the coinbase self-destructed, or + // - the coinbase suicided, or // - there are only 'bad' transactions, which aren't executed. In those cases, // the coinbase gets no txfee, so isn't created, and thus needs to be touched var ( @@ -300,8 +270,9 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, statedb.AddBalance(pre.Env.Coinbase, minerReward) } // Commit block - root, err := statedb.Commit(vmContext.BlockNumber.Uint64(), chainConfig.IsEIP158(vmContext.BlockNumber), false) + root, err := statedb.Commit(chainConfig.IsEIP158(vmContext.BlockNumber), false) if err != nil { + fmt.Fprintf(os.Stderr, "Could not commit state: %v", err) return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not commit state: %v", err)) } execRs := &ExecutionResult{ @@ -316,16 +287,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, GasUsed: (math.HexOrDecimal64)(gasUsed), BaseFee: (*math.HexOrDecimal256)(vmContext.BaseFee), } - if vmContext.ExcessBlobGas != nil { - execRs.CurrentExcessBlobGas = (*math.HexOrDecimal64)(vmContext.ExcessBlobGas) - execRs.CurrentBlobGasUsed = (*math.HexOrDecimal64)(&blobGasUsed) - } - // Re-create statedb instance with new root upon the updated database - // for accessing latest states. - statedb, err = state.New(root, statedb.Database(), nil) - if err != nil { - return nil, nil, NewError(ErrorEVM, fmt.Errorf("could not reopen state: %v", err)) - } return statedb, execRs, nil } @@ -341,7 +302,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc) *state.StateDB } } // Commit and re-open to start with a clean state. - root, _ := statedb.Commit(0, false, false) + root, _ := statedb.Commit(false, false) statedb, _ = state.New(root, sdb, nil) return statedb } diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index ed8652d741..e0a4061564 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -16,25 +16,22 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"` - ParentGasUsed math.HexOrDecimal64 `json:"parentGasUsed,omitempty"` - ParentGasLimit math.HexOrDecimal64 `json:"parentGasLimit,omitempty"` - MinBaseFee *math.HexOrDecimal256 `json:"minBaseFee,omitempty"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash common.Hash `json:"parentUncleHash"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` - ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` - ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"` + ParentGasUsed math.HexOrDecimal64 `json:"parentGasUsed,omitempty"` + ParentGasLimit math.HexOrDecimal64 `json:"parentGasLimit,omitempty"` + MinBaseFee *math.HexOrDecimal256 `json:"minBaseFee,omitempty"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) @@ -53,34 +50,28 @@ func (s stEnv) MarshalJSON() ([]byte, error) { enc.Ommers = s.Ommers enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) enc.ParentUncleHash = s.ParentUncleHash - enc.ExcessBlobGas = (*math.HexOrDecimal64)(s.ExcessBlobGas) - enc.ParentExcessBlobGas = (*math.HexOrDecimal64)(s.ParentExcessBlobGas) - enc.ParentBlobGasUsed = (*math.HexOrDecimal64)(s.ParentBlobGasUsed) return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"` - ParentGasUsed *math.HexOrDecimal64 `json:"parentGasUsed,omitempty"` - ParentGasLimit *math.HexOrDecimal64 `json:"parentGasLimit,omitempty"` - MinBaseFee *math.HexOrDecimal256 `json:"minBaseFee,omitempty"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash *common.Hash `json:"parentUncleHash"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas,omitempty"` - ParentExcessBlobGas *math.HexOrDecimal64 `json:"parentExcessBlobGas,omitempty"` - ParentBlobGasUsed *math.HexOrDecimal64 `json:"parentBlobGasUsed,omitempty"` + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + ParentBaseFee *math.HexOrDecimal256 `json:"parentBaseFee,omitempty"` + ParentGasUsed *math.HexOrDecimal64 `json:"parentGasUsed,omitempty"` + ParentGasLimit *math.HexOrDecimal64 `json:"parentGasLimit,omitempty"` + MinBaseFee *math.HexOrDecimal256 `json:"minBaseFee,omitempty"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash *common.Hash `json:"parentUncleHash"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -138,14 +129,5 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.ParentUncleHash != nil { s.ParentUncleHash = *dec.ParentUncleHash } - if dec.ExcessBlobGas != nil { - s.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) - } - if dec.ParentExcessBlobGas != nil { - s.ParentExcessBlobGas = (*uint64)(dec.ParentExcessBlobGas) - } - if dec.ParentBlobGasUsed != nil { - s.ParentBlobGasUsed = (*uint64)(dec.ParentBlobGasUsed) - } return nil } diff --git a/cmd/evm/internal/t8ntool/transaction.go b/cmd/evm/internal/t8ntool/transaction.go index 2b05394cb5..98ebc3e275 100644 --- a/cmd/evm/internal/t8ntool/transaction.go +++ b/cmd/evm/internal/t8ntool/transaction.go @@ -149,7 +149,7 @@ func Transaction(ctx *cli.Context) error { r.Address = sender } // Check intrinsic gas - rules := chainConfig.Rules(new(big.Int), 0) + rules := chainConfig.AvalancheRules(new(big.Int), 0) if gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, rules); err != nil { r.Error = err results = append(results, r) diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index 019e1ab989..4882c1b6bd 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -138,7 +138,6 @@ func runCmd(ctx *cli.Context) error { receiver = common.BytesToAddress([]byte("receiver")) genesisConfig *core.Genesis preimages = ctx.Bool(DumpFlag.Name) - blobHashes []common.Hash // TODO (MariusVanDerWijden) implement blob hashes in state tests ) if ctx.Bool(MachineFlag.Name) { tracer = logger.NewJSONLogger(logconfig, os.Stdout) @@ -228,7 +227,6 @@ func runCmd(ctx *cli.Context) error { Time: genesisConfig.Timestamp, Coinbase: genesisConfig.Coinbase, BlockNumber: new(big.Int).SetUint64(genesisConfig.Number), - BlobHashes: blobHashes, EVMConfig: vm.Config{ Tracer: tracer, }, @@ -290,7 +288,8 @@ func runCmd(ctx *cli.Context) error { output, leftOverGas, stats, err := timedExec(bench, execFunc) if ctx.Bool(DumpFlag.Name) { - statedb.Commit(genesisConfig.Number, true, false) + statedb.Commit(true, false) + statedb.IntermediateRoot(true) fmt.Println(string(statedb.Dump(nil))) } diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index 81b7442394..900f5bba1f 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -127,7 +127,6 @@ func runStateTest(fname string, cfg vm.Config, jsonOut, dump bool) error { // Test failed, mark as so and dump any state to aid debugging result.Pass, result.Error = false, err.Error() if dump && s != nil { - s, _ = state.New(*result.Root, s.Database(), nil) dump := s.RawDump(nil) result.State = &dump } diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go index 0cfa0151e6..80284dee67 100644 --- a/cmd/evm/t8n_test.go +++ b/cmd/evm/t8n_test.go @@ -276,14 +276,6 @@ func TestT8n(t *testing.T) { // output: t8nOutput{alloc: true, result: true}, // expOut: "exp.json", // }, - { // Cancun tests - base: "./testdata/28", - input: t8nInput{ - "alloc.json", "txs.rlp", "env.json", "Cancun", "", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, } { args := []string{"t8n"} args = append(args, tc.output.get()...) diff --git a/cmd/evm/testdata/13/readme.md b/cmd/evm/testdata/13/readme.md index 889975d47e..64f52fc9a9 100644 --- a/cmd/evm/testdata/13/readme.md +++ b/cmd/evm/testdata/13/readme.md @@ -1,4 +1,4 @@ ## Input transactions in RLP form -This testdata folder is used to exemplify how transaction input can be provided in rlp form. +This testdata folder is used to examplify how transaction input can be provided in rlp form. Please see the README in `evm` folder for how this is performed. \ No newline at end of file diff --git a/cmd/evm/testdata/23/readme.md b/cmd/evm/testdata/23/readme.md index f31b64de2f..85fe8db66c 100644 --- a/cmd/evm/testdata/23/readme.md +++ b/cmd/evm/testdata/23/readme.md @@ -1 +1 @@ -These files exemplify how to sign a transaction using the pre-EIP155 scheme. +These files examplify how to sign a transaction using the pre-EIP155 scheme. diff --git a/cmd/evm/testdata/28/alloc.json b/cmd/evm/testdata/28/alloc.json deleted file mode 100644 index 680a89f4ed..0000000000 --- a/cmd/evm/testdata/28/alloc.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { - "balance" : "0x016345785d8a0000", - "code" : "0x", - "nonce" : "0x00", - "storage" : { - } - }, - "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { - "balance" : "0x016345785d8a0000", - "code" : "0x60004960015500", - "nonce" : "0x00", - "storage" : { - } - } -} \ No newline at end of file diff --git a/cmd/evm/testdata/28/env.json b/cmd/evm/testdata/28/env.json deleted file mode 100644 index 14a1f3ed1e..0000000000 --- a/cmd/evm/testdata/28/env.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", - "currentNumber" : "0x01", - "currentTimestamp" : "0x079e", - "currentGasLimit" : "0x7fffffffffffffff", - "previousHash" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6", - "currentBlobGasUsed" : "0x00", - "parentTimestamp" : "0x03b6", - "parentDifficulty" : "0x00", - "parentUncleHash" : "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "currentRandom" : "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "withdrawals" : [ - ], - "parentBaseFee" : "0x0a", - "parentGasUsed" : "0x00", - "parentGasLimit" : "0x7fffffffffffffff", - "parentExcessBlobGas" : "0x00", - "parentBlobGasUsed" : "0x00", - "blockHashes" : { - "0" : "0x3a9b485972e7353edd9152712492f0c58d89ef80623686b6bf947a4a6dce6cb6" - }, - "minBaseFee" : "0x9" -} \ No newline at end of file diff --git a/cmd/evm/testdata/28/exp.json b/cmd/evm/testdata/28/exp.json deleted file mode 100644 index 9a3358f908..0000000000 --- a/cmd/evm/testdata/28/exp.json +++ /dev/null @@ -1,46 +0,0 @@ -{ - "alloc": { - "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { - "balance": "0x73c57" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x16345785d80c3a9", - "nonce": "0x1" - }, - "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "code": "0x60004960015500", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000001": "0x01a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" - }, - "balance": "0x16345785d8a0000" - } - }, - "result": { - "stateRoot": "0xabcbb1d3be8aee044a219dd181fe6f2c2482749b9da95d15358ba7af9b43c372", - "txRoot": "0x4409cc4b699384ba5f8248d92b784713610c5ff9c1de51e9239da0dac76de9ce", - "receiptsRoot": "0xbff643da765981266133094092d98c81d2ac8e9a83a7bbda46c3d736f1f874ac", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [ - { - "type": "0x3", - "root": "0x", - "status": "0x1", - "cumulativeGasUsed": "0xa865", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x7508d7139d002a4b3a26a4f12dec0d87cb46075c78bf77a38b569a133b509262", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0xa865", - "effectiveGasPrice": null, - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "transactionIndex": "0x0" - } - ], - "currentDifficulty": null, - "gasUsed": "0xa865", - "currentBaseFee": "0x9", - "currentExcessBlobGas": "0x0", - "currentBlobGasUsed": "0x20000" - } -} \ No newline at end of file diff --git a/cmd/evm/testdata/28/txs.rlp b/cmd/evm/testdata/28/txs.rlp deleted file mode 100644 index 8df20e3aa2..0000000000 --- a/cmd/evm/testdata/28/txs.rlp +++ /dev/null @@ -1 +0,0 @@ -"0xf88bb88903f8860180026483061a8094b94f5374fce5edbc8e2a8697c15331677e6ebf0b8080c00ae1a001a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d801a025e16bb498552165016751911c3608d79000ab89dc3100776e729e6ea13091c7a03acacff7fc0cff6eda8a927dec93ca17765e1ee6cbc06c5954ce102e097c01d2" \ No newline at end of file diff --git a/cmd/evm/testdata/3/readme.md b/cmd/evm/testdata/3/readme.md index 246c58ef3b..499f03d7aa 100644 --- a/cmd/evm/testdata/3/readme.md +++ b/cmd/evm/testdata/3/readme.md @@ -1,2 +1,2 @@ -These files exemplify a transition where a transaction (executed on block 5) requests +These files examplify a transition where a transaction (excuted on block 5) requests the blockhash for block `1`. diff --git a/cmd/evm/testdata/4/readme.md b/cmd/evm/testdata/4/readme.md index eede41a9fd..08840d37bd 100644 --- a/cmd/evm/testdata/4/readme.md +++ b/cmd/evm/testdata/4/readme.md @@ -1,3 +1,3 @@ -These files exemplify a transition where a transaction (executed on block 5) requests +These files examplify a transition where a transaction (excuted on block 5) requests the blockhash for block `4`, but where the hash for that block is missing. It's expected that executing these should cause `exit` with errorcode `4`. diff --git a/cmd/evm/testdata/5/readme.md b/cmd/evm/testdata/5/readme.md index 1a84afaab6..e2b608face 100644 --- a/cmd/evm/testdata/5/readme.md +++ b/cmd/evm/testdata/5/readme.md @@ -1 +1 @@ -These files exemplify a transition where there are no transactions, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2). \ No newline at end of file +These files examplify a transition where there are no transcations, two ommers, at block `N-1` (delta 1) and `N-2` (delta 2). \ No newline at end of file diff --git a/cmd/evm/transition-test.sh b/cmd/evm/transition-test.sh index 8cc6aa41de..a2ea534189 100644 --- a/cmd/evm/transition-test.sh +++ b/cmd/evm/transition-test.sh @@ -280,7 +280,7 @@ To make `t8n` apply these, the following inputs are required: - For ethash, it is `5000000000000000000` `wei`, - If this is not defined, mining rewards are not applied, - A value of `0` is valid, and causes accounts to be 'touched'. -- For each ommer, the tool needs to be given an `address\` and a `delta`. This +- For each ommer, the tool needs to be given an `addres\` and a `delta`. This is done via the `ommers` field in `env`. Note: the tool does not verify that e.g. the normal uncle rules apply, diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go deleted file mode 100644 index 2bd5559436..0000000000 --- a/cmd/utils/cmd.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -// Package utils contains internal helper functions for go-ethereum commands. -package utils - -import ( - "fmt" - "io" - "os" - "runtime" -) - -// Fatalf formats a message to standard error and exits the program. -// The message is also printed to standard output if standard error -// is redirected to a different file. -func Fatalf(format string, args ...interface{}) { - w := io.MultiWriter(os.Stdout, os.Stderr) - if runtime.GOOS == "windows" { - // The SameFile check below doesn't work on Windows. - // stdout is unlikely to get redirected though, so just print there. - w = os.Stdout - } else { - outf, _ := os.Stdout.Stat() - errf, _ := os.Stderr.Stat() - if outf != nil && errf != nil && os.SameFile(outf, errf) { - w = os.Stderr - } - } - fmt.Fprintf(w, "Fatal: "+format+"\n", args...) - os.Exit(1) -} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 07a6e60015..dfa6990ddf 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -19,11 +19,34 @@ package utils import ( "fmt" + "io" + "os" + "runtime" "strings" "github.com/urfave/cli/v2" ) +// Fatalf formats a message to standard error and exits the program. +// The message is also printed to standard output if standard error +// is redirected to a different file. +func Fatalf(format string, args ...interface{}) { + w := io.MultiWriter(os.Stdout, os.Stderr) + if runtime.GOOS == "windows" { + // The SameFile check below doesn't work on Windows. + // stdout is unlikely to get redirected though, so just print there. + w = os.Stdout + } else { + outf, _ := os.Stdout.Stat() + errf, _ := os.Stderr.Stat() + if outf != nil && errf != nil && os.SameFile(outf, errf) { + w = os.Stderr + } + } + fmt.Fprintf(w, "Fatal: "+format+"\n", args...) + os.Exit(1) +} + // CheckExclusive verifies that only a single instance of the provided flags was // set by the user. Each flag might optionally be followed by a string type to // specialize it further. diff --git a/consensus/dummy/consensus.go b/consensus/dummy/consensus.go index 1f003661fe..173f1e8d53 100644 --- a/consensus/dummy/consensus.go +++ b/consensus/dummy/consensus.go @@ -12,7 +12,6 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/subnet-evm/consensus" - "github.com/ava-labs/subnet-evm/consensus/misc/eip4844" "github.com/ava-labs/subnet-evm/core/state" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/params" @@ -234,18 +233,13 @@ func (self *DummyEngine) verifyHeader(chain consensus.ChainHeaderReader, header if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 { return consensus.ErrInvalidNumber } - // Verify the existence / non-existence of excessBlobGas - cancun := chain.Config().IsCancun(header.Number, header.Time) - if !cancun && header.ExcessBlobGas != nil { - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", header.ExcessBlobGas) + // Verify the existence / non-existence of excessDataGas + cancun := chain.Config().IsCancun(header.Time) + if cancun && header.ExcessDataGas == nil { + return errors.New("missing excessDataGas") } - if !cancun && header.BlobGasUsed != nil { - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", header.BlobGasUsed) - } - if cancun { - if err := eip4844.VerifyEIP4844Header(parent, header); err != nil { - return err - } + if !cancun && header.ExcessDataGas != nil { + return fmt.Errorf("invalid excessDataGas: have %d, expected nil", header.ExcessDataGas) } return nil } diff --git a/consensus/misc/eip4844.go b/consensus/misc/eip4844.go new file mode 100644 index 0000000000..70d84d8529 --- /dev/null +++ b/consensus/misc/eip4844.go @@ -0,0 +1,54 @@ +// Copyright 2023 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package misc + +import ( + "math/big" + + "github.com/ava-labs/subnet-evm/params" +) + +var ( + minDataGasPrice = big.NewInt(params.BlobTxMinDataGasprice) + dataGaspriceUpdateFraction = big.NewInt(params.BlobTxDataGaspriceUpdateFraction) +) + +// CalcBlobFee calculates the blobfee from the header's excess data gas field. +func CalcBlobFee(excessDataGas *big.Int) *big.Int { + // If this block does not yet have EIP-4844 enabled, return the starting fee + if excessDataGas == nil { + return big.NewInt(params.BlobTxMinDataGasprice) + } + return fakeExponential(minDataGasPrice, excessDataGas, dataGaspriceUpdateFraction) +} + +// fakeExponential approximates factor * e ** (numerator / denominator) using +// Taylor expansion. +func fakeExponential(factor, numerator, denominator *big.Int) *big.Int { + var ( + output = new(big.Int) + accum = new(big.Int).Mul(factor, denominator) + ) + for i := 1; accum.Sign() > 0; i++ { + output.Add(output, accum) + + accum.Mul(accum, numerator) + accum.Div(accum, denominator) + accum.Div(accum, big.NewInt(int64(i))) + } + return output.Div(output, denominator) +} diff --git a/consensus/misc/eip4844/eip4844.go b/consensus/misc/eip4844/eip4844.go deleted file mode 100644 index 8be8b32969..0000000000 --- a/consensus/misc/eip4844/eip4844.go +++ /dev/null @@ -1,108 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eip4844 - -import ( - "errors" - "fmt" - "math/big" - - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/params" -) - -var ( - minBlobGasPrice = big.NewInt(params.BlobTxMinBlobGasprice) - blobGaspriceUpdateFraction = big.NewInt(params.BlobTxBlobGaspriceUpdateFraction) -) - -// VerifyEIP4844Header verifies the presence of the excessBlobGas field and that -// if the current block contains no transactions, the excessBlobGas is updated -// accordingly. -func VerifyEIP4844Header(parent, header *types.Header) error { - // Verify the header is not malformed - if header.ExcessBlobGas == nil { - return errors.New("header is missing excessBlobGas") - } - if header.BlobGasUsed == nil { - return errors.New("header is missing blobGasUsed") - } - // Verify that the blob gas used remains within reasonable limits. - if *header.BlobGasUsed > params.BlobTxMaxBlobGasPerBlock { - return fmt.Errorf("blob gas used %d exceeds maximum allowance %d", *header.BlobGasUsed, params.BlobTxMaxBlobGasPerBlock) - } - if *header.BlobGasUsed%params.BlobTxBlobGasPerBlob != 0 { - return fmt.Errorf("blob gas used %d not a multiple of blob gas per blob %d", header.BlobGasUsed, params.BlobTxBlobGasPerBlob) - } - // Verify the excessBlobGas is correct based on the parent header - var ( - parentExcessBlobGas uint64 - parentBlobGasUsed uint64 - ) - if parent.ExcessBlobGas != nil { - parentExcessBlobGas = *parent.ExcessBlobGas - parentBlobGasUsed = *parent.BlobGasUsed - } - expectedExcessBlobGas := CalcExcessBlobGas(parentExcessBlobGas, parentBlobGasUsed) - if *header.ExcessBlobGas != expectedExcessBlobGas { - return fmt.Errorf("invalid excessBlobGas: have %d, want %d, parent excessBlobGas %d, parent blobDataUsed %d", - *header.ExcessBlobGas, expectedExcessBlobGas, parentExcessBlobGas, parentBlobGasUsed) - } - return nil -} - -// CalcExcessBlobGas calculates the excess blob gas after applying the set of -// blobs on top of the excess blob gas. -func CalcExcessBlobGas(parentExcessBlobGas uint64, parentBlobGasUsed uint64) uint64 { - excessBlobGas := parentExcessBlobGas + parentBlobGasUsed - if excessBlobGas < params.BlobTxTargetBlobGasPerBlock { - return 0 - } - return excessBlobGas - params.BlobTxTargetBlobGasPerBlock -} - -// CalcBlobFee calculates the blobfee from the header's excess blob gas field. -func CalcBlobFee(excessBlobGas uint64) *big.Int { - return fakeExponential(minBlobGasPrice, new(big.Int).SetUint64(excessBlobGas), blobGaspriceUpdateFraction) -} - -// fakeExponential approximates factor * e ** (numerator / denominator) using -// Taylor expansion. -func fakeExponential(factor, numerator, denominator *big.Int) *big.Int { - var ( - output = new(big.Int) - accum = new(big.Int).Mul(factor, denominator) - ) - for i := 1; accum.Sign() > 0; i++ { - output.Add(output, accum) - - accum.Mul(accum, numerator) - accum.Div(accum, denominator) - accum.Div(accum, big.NewInt(int64(i))) - } - return output.Div(output, denominator) -} diff --git a/consensus/misc/eip4844/eip4844_test.go b/consensus/misc/eip4844_test.go similarity index 58% rename from consensus/misc/eip4844/eip4844_test.go rename to consensus/misc/eip4844_test.go index 3780acc15a..ea636ede9b 100644 --- a/consensus/misc/eip4844/eip4844_test.go +++ b/consensus/misc/eip4844_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package eip4844 +package misc import ( "fmt" @@ -24,42 +24,9 @@ import ( "github.com/ava-labs/subnet-evm/params" ) -func TestCalcExcessBlobGas(t *testing.T) { - var tests = []struct { - excess uint64 - blobs uint64 - want uint64 - }{ - // The excess blob gas should not increase from zero if the used blob - // slots are below - or equal - to the target. - {0, 0, 0}, - {0, 1, 0}, - {0, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, 0}, - - // If the target blob gas is exceeded, the excessBlobGas should increase - // by however much it was overshot - {0, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob}, - {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 1, params.BlobTxBlobGasPerBlob + 1}, - {1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) + 2, 2*params.BlobTxBlobGasPerBlob + 1}, - - // The excess blob gas should decrease by however much the target was - // under-shot, capped at zero. - {params.BlobTxTargetBlobGasPerBlock, params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob, params.BlobTxTargetBlobGasPerBlock}, - {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, params.BlobTxBlobGasPerBlob}, - {params.BlobTxTargetBlobGasPerBlock, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 2, 0}, - {params.BlobTxBlobGasPerBlob - 1, (params.BlobTxTargetBlobGasPerBlock / params.BlobTxBlobGasPerBlob) - 1, 0}, - } - for _, tt := range tests { - result := CalcExcessBlobGas(tt.excess, tt.blobs*params.BlobTxBlobGasPerBlob) - if result != tt.want { - t.Errorf("excess blob gas mismatch: have %v, want %v", result, tt.want) - } - } -} - func TestCalcBlobFee(t *testing.T) { tests := []struct { - excessBlobGas uint64 + excessDataGas int64 blobfee int64 }{ {0, 1}, @@ -67,8 +34,12 @@ func TestCalcBlobFee(t *testing.T) { {1542707, 2}, {10 * 1024 * 1024, 111}, } + have := CalcBlobFee(nil) + if have.Int64() != params.BlobTxMinDataGasprice { + t.Errorf("nil test: blobfee mismatch: have %v, want %v", have, params.BlobTxMinDataGasprice) + } for i, tt := range tests { - have := CalcBlobFee(tt.excessBlobGas) + have := CalcBlobFee(big.NewInt(tt.excessDataGas)) if have.Int64() != tt.blobfee { t.Errorf("test %d: blobfee mismatch: have %v want %v", i, have, tt.blobfee) } diff --git a/core/block_validator.go b/core/block_validator.go index 86160826c4..dd0133136e 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -27,7 +27,6 @@ package core import ( - "errors" "fmt" "github.com/ava-labs/subnet-evm/consensus" @@ -78,23 +77,6 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { return fmt.Errorf("transaction root hash mismatch (header value %x, calculated %x)", header.TxHash, hash) } - // Blob transactions may be present after the Cancun fork. - var blobs int - for _, tx := range block.Transactions() { - // Count the number of blobs to validate against the header's blobGasUsed - blobs += len(tx.BlobHashes()) - // The individual checks for blob validity (version-check + not empty) - // happens in the state_transition check. - } - if header.BlobGasUsed != nil { - if want := *header.BlobGasUsed / params.BlobTxBlobGasPerBlob; uint64(blobs) != want { // div because the header is surely good vs the body might be bloated - return fmt.Errorf("blob gas used mismatch (header %v, calculated %v)", *header.BlobGasUsed, blobs*params.BlobTxBlobGasPerBlob) - } - } else { - if blobs > 0 { - return errors.New("data blobs present in block body") - } - } if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) { if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) { return consensus.ErrUnknownAncestor diff --git a/core/blockchain.go b/core/blockchain.go index f13b478a94..4f85ed5887 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -41,7 +41,6 @@ import ( "github.com/ava-labs/subnet-evm/commontype" "github.com/ava-labs/subnet-evm/consensus" - "github.com/ava-labs/subnet-evm/consensus/misc/eip4844" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/state" "github.com/ava-labs/subnet-evm/core/state/snapshot" @@ -102,8 +101,6 @@ var ( errFutureBlockUnsupported = errors.New("future block insertion not supported") errCacheConfigNotSpecified = errors.New("must specify cache config") - errInvalidOldChain = errors.New("invalid old chain") - errInvalidNewChain = errors.New("invalid new chain") ) const ( @@ -146,7 +143,7 @@ const ( // trieCleanCacheStatsNamespace is the namespace to surface stats from the trie // clean cache's underlying fastcache. - trieCleanCacheStatsNamespace = "hashdb/memcache/clean/fastcache" + trieCleanCacheStatsNamespace = "trie/memcache/clean/fastcache" ) // cacheableFeeConfig encapsulates fee configuration itself and the block number that it has changed at, @@ -166,23 +163,25 @@ type cacheableCoinbaseConfig struct { // CacheConfig contains the configuration values for the trie database // that's resident in a blockchain. type CacheConfig struct { - TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory - TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk - TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit - TriePrefetcherParallelism int // Max concurrent disk reads trie prefetcher should perform at once - CommitInterval uint64 // Commit the trie every [CommitInterval] blocks. - Pruning bool // Whether to disable trie write caching and GC altogether (archive node) - AcceptorQueueLimit int // Blocks to queue before blocking during acceptance - PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries. - PopulateMissingTriesParallelism int // Number of readers to use when trying to populate missing tries. - AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled - SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call - SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory - SnapshotVerify bool // Verify generated snapshots - Preimages bool // Whether to store preimage of trie key to the disk - AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip - TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices - SkipTxIndexing bool // Whether to skip transaction indexing + TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory + TrieCleanJournal string // Disk journal for saving clean cache entries. + TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically + TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk + TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit + TriePrefetcherParallelism int // Max concurrent disk reads trie prefetcher should perform at once + CommitInterval uint64 // Commit the trie every [CommitInterval] blocks. + Pruning bool // Whether to disable trie write caching and GC altogether (archive node) + AcceptorQueueLimit int // Blocks to queue before blocking during acceptance + PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries. + PopulateMissingTriesParallelism int // Number of readers to use when trying to populate missing tries. + AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled + SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call + SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory + SnapshotVerify bool // Verify generated snapshots + Preimages bool // Whether to store preimage of trie key to the disk + AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip + TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices + SkipTxIndexing bool // Whether to skip transaction indexing SnapshotNoBuild bool // Whether the background generation is allowed SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it @@ -316,6 +315,7 @@ func NewBlockChain( // Open trie database with provided config triedb := trie.NewDatabaseWithConfig(db, &trie.Config{ Cache: cacheConfig.TrieCleanLimit, + Journal: cacheConfig.TrieCleanJournal, Preimages: cacheConfig.Preimages, StatsPrefix: trieCleanCacheStatsNamespace, }) @@ -416,6 +416,17 @@ func NewBlockChain( // Start processing accepted blocks effects in the background go bc.startAcceptor() + // If periodic cache journal is required, spin it up. + if bc.cacheConfig.TrieCleanRejournal > 0 && len(bc.cacheConfig.TrieCleanJournal) > 0 { + log.Info("Starting to save trie clean cache periodically", "journalDir", bc.cacheConfig.TrieCleanJournal, "freq", bc.cacheConfig.TrieCleanRejournal) + + bc.wg.Add(1) + go func() { + defer bc.wg.Done() + bc.triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) + }() + } + // Start tx indexer/unindexer if required. if bc.cacheConfig.TxLookupLimit != 0 { bc.wg.Add(1) @@ -790,7 +801,7 @@ func (bc *BlockChain) ExportCallback(callback func(block *types.Block) error, fi return fmt.Errorf("export failed on #%d: not found", nr) } if nr > first && block.ParentHash() != parentHash { - return errors.New("export failed: chain reorg during export") + return fmt.Errorf("export failed: chain reorg during export") } parentHash = block.Hash() if err := callback(block); err != nil { @@ -978,6 +989,7 @@ func (bc *BlockChain) Stop() { if err := bc.stateCache.TrieDB().Close(); err != nil { log.Error("Failed to close trie db", "err", err) } + log.Info("Blockchain stopped") } @@ -1217,9 +1229,9 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. // diff layer for the block. var err error if bc.snaps == nil { - _, err = state.Commit(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), true) + _, err = state.Commit(bc.chainConfig.IsEIP158(block.Number()), true) } else { - _, err = state.CommitWithSnap(block.NumberU64(), bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true) + _, err = state.CommitWithSnap(bc.chainConfig.IsEIP158(block.Number()), bc.snaps, block.Hash(), block.ParentHash(), true) } if err != nil { return err @@ -1446,15 +1458,8 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { // collectUnflattenedLogs collects the logs that were generated or removed during // the processing of a block. func (bc *BlockChain) collectUnflattenedLogs(b *types.Block, removed bool) [][]*types.Log { - var blobGasPrice *big.Int - excessBlobGas := b.ExcessBlobGas() - if excessBlobGas != nil { - blobGasPrice = eip4844.CalcBlobFee(*excessBlobGas) - } receipts := rawdb.ReadRawReceipts(bc.db, b.Hash(), b.NumberU64()) - if err := receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), blobGasPrice, b.Transactions()); err != nil { - log.Error("Failed to derive block receipts fields", "hash", b.Hash(), "number", b.NumberU64(), "err", err) - } + receipts.DeriveFields(bc.chainConfig, b.Hash(), b.NumberU64(), b.Time(), b.BaseFee(), b.Transactions()) // Note: gross but this needs to be initialized here because returning nil will be treated specially as an incorrect // error case downstream. @@ -1462,10 +1467,11 @@ func (bc *BlockChain) collectUnflattenedLogs(b *types.Block, removed bool) [][]* for i, receipt := range receipts { receiptLogs := make([]*types.Log, len(receipt.Logs)) for i, log := range receipt.Logs { + l := *log if removed { - log.Removed = true + l.Removed = true } - receiptLogs[i] = log + receiptLogs[i] = &l } logs[i] = receiptLogs } @@ -1507,10 +1513,10 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { } } if oldBlock == nil { - return errInvalidOldChain + return errors.New("invalid old chain") } if newBlock == nil { - return errInvalidNewChain + return errors.New("invalid new chain") } // Both sides of the reorg are at the same number, reduce both until the common // ancestor is found @@ -1527,11 +1533,11 @@ func (bc *BlockChain) reorg(oldHead *types.Header, newHead *types.Block) error { // Step back with both chains oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) if oldBlock == nil { - return errInvalidOldChain + return fmt.Errorf("invalid old chain") } newBlock = bc.GetBlock(newBlock.ParentHash(), newBlock.NumberU64()-1) if newBlock == nil { - return errInvalidNewChain + return fmt.Errorf("invalid new chain") } } @@ -1766,9 +1772,9 @@ func (bc *BlockChain) reprocessBlock(parent *types.Block, current *types.Block) // If snapshots are enabled, call CommitWithSnaps to explicitly create a snapshot // diff layer for the block. if bc.snaps == nil { - return statedb.Commit(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), false) + return statedb.Commit(bc.chainConfig.IsEIP158(current.Number()), false) } - return statedb.CommitWithSnap(current.NumberU64(), bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false) + return statedb.CommitWithSnap(bc.chainConfig.IsEIP158(current.Number()), bc.snaps, current.Hash(), current.ParentHash(), false) } // initSnapshot instantiates a Snapshot instance and adds it to [bc] diff --git a/core/blockchain_test.go b/core/blockchain_test.go index bf099e3cdc..0694fa33ac 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -21,6 +21,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" + "github.com/fsnotify/fsnotify" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -81,6 +83,100 @@ func TestArchiveBlockChain(t *testing.T) { } } +// awaitWatcherEventsSubside waits for at least one event on [watcher] and then waits +// for at least [subsideTimeout] before returning +func awaitWatcherEventsSubside(watcher *fsnotify.Watcher, subsideTimeout time.Duration) { + done := make(chan struct{}) + + go func() { + defer func() { + close(done) + }() + + select { + case <-watcher.Events: + case <-watcher.Errors: + return + } + + for { + select { + case <-watcher.Events: + case <-watcher.Errors: + return + case <-time.After(subsideTimeout): + return + } + } + }() + <-done +} + +func TestTrieCleanJournal(t *testing.T) { + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } + require := require.New(t) + assert := assert.New(t) + + trieCleanJournal := t.TempDir() + trieCleanJournalWatcher, err := fsnotify.NewWatcher() + require.NoError(err) + defer func() { + assert.NoError(trieCleanJournalWatcher.Close()) + }() + require.NoError(trieCleanJournalWatcher.Add(trieCleanJournal)) + + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + config := *archiveConfig + config.TrieCleanJournal = trieCleanJournal + config.TrieCleanRejournal = 100 * time.Millisecond + return createBlockChain(db, &config, gspec, lastAcceptedHash) + } + + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + chainDB = rawdb.NewMemoryDatabase() + ) + + // Ensure that key1 has some funds in the genesis block. + genesisBalance := big.NewInt(1000000) + gspec := &Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, + } + + blockchain, err := create(chainDB, gspec, common.Hash{}) + require.NoError(err) + defer blockchain.Stop() + + // This call generates a chain of 3 blocks. + signer := types.HomesteadSigner{} + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { + tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) + gen.AddTx(tx) + }) + require.NoError(err) + + // Insert and accept the generated chain + _, err = blockchain.InsertChain(chain) + require.NoError(err) + + for _, block := range chain { + require.NoError(blockchain.Accept(block)) + } + blockchain.DrainAcceptorQueue() + + awaitWatcherEventsSubside(trieCleanJournalWatcher, time.Second) + // Assert that a new file is created in the trie clean journal + dirEntries, err := os.ReadDir(trieCleanJournal) + require.NoError(err) + require.NotEmpty(dirEntries) +} + func TestArchiveBlockChainSnapsDisabled(t *testing.T) { create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { return createBlockChain( @@ -279,6 +375,7 @@ func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { prunerConfig := pruner.Config{ Datadir: tempDir, BloomSize: 256, + Cachedir: pruningConfig.TrieCleanJournal, } pruner, err := pruner.NewPruner(db, prunerConfig) diff --git a/core/chain_indexer.go b/core/chain_indexer.go index a7a2f945f2..2034619f75 100644 --- a/core/chain_indexer.go +++ b/core/chain_indexer.go @@ -29,7 +29,6 @@ package core import ( "context" "encoding/binary" - "errors" "fmt" "sync" "sync/atomic" @@ -414,7 +413,7 @@ func (c *ChainIndexer) processSection(section uint64, lastHead common.Hash) (com if header == nil { return common.Hash{}, fmt.Errorf("block #%d [%x..] not found", number, hash[:4]) } else if header.ParentHash != lastHead { - return common.Hash{}, errors.New("chain reorged during section processing") + return common.Hash{}, fmt.Errorf("chain reorged during section processing") } if err := c.backend.Process(c.ctx, header); err != nil { return common.Hash{}, err diff --git a/core/chain_makers.go b/core/chain_makers.go index 4e9f794fc8..2231e413ae 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -271,7 +271,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse } // Write state changes to db - root, err := statedb.Commit(b.header.Number.Uint64(), config.IsEIP158(b.header.Number), false) + root, err := statedb.Commit(config.IsEIP158(b.header.Number), false) if err != nil { panic(fmt.Sprintf("state write error: %v", err)) } @@ -335,6 +335,7 @@ func makeHeader(chain consensus.ChainReader, config *params.ChainConfig, parent Number: new(big.Int).Add(parent.Number(), common.Big1), Time: time, } + if chain.Config().IsSubnetEVM(time) { feeConfig, _, err := chain.GetFeeConfigAt(parent.Header()) if err != nil { diff --git a/core/error.go b/core/error.go index eac66628d6..bc519410c0 100644 --- a/core/error.go +++ b/core/error.go @@ -105,8 +105,4 @@ var ( // ErrSenderNoEOA is returned if the sender of a transaction is a contract. ErrSenderNoEOA = errors.New("sender not an eoa") - - // ErrBlobFeeCapTooLow is returned if the transaction fee cap is less than the - // blob gas fee of the block. - ErrBlobFeeCapTooLow = errors.New("max fee per blob gas less than block blob gas fee") ) diff --git a/core/evm.go b/core/evm.go index 9a93e70840..71da2dd4d4 100644 --- a/core/evm.go +++ b/core/evm.go @@ -102,16 +102,14 @@ func newEVMBlockContext(header *types.Header, chain ChainContext, author *common Difficulty: new(big.Int).Set(header.Difficulty), BaseFee: baseFee, GasLimit: header.GasLimit, - ExcessBlobGas: header.ExcessBlobGas, } } // NewEVMTxContext creates a new transaction context for a single transaction. func NewEVMTxContext(msg *Message) vm.TxContext { return vm.TxContext{ - Origin: msg.From, - GasPrice: new(big.Int).Set(msg.GasPrice), - BlobHashes: msg.BlobHashes, + Origin: msg.From, + GasPrice: new(big.Int).Set(msg.GasPrice), } } diff --git a/core/gen_genesis.go b/core/gen_genesis.go index d2938b70d0..44002eb07a 100644 --- a/core/gen_genesis.go +++ b/core/gen_genesis.go @@ -29,13 +29,10 @@ func (g Genesis) MarshalJSON() ([]byte, error) { Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"` AirdropHash common.Hash `json:"airdropHash"` AirdropAmount *math.HexOrDecimal256 `json:"airdropAmount"` - AirdropData []byte `json:"-"` Number math.HexOrDecimal64 `json:"number"` GasUsed math.HexOrDecimal64 `json:"gasUsed"` ParentHash common.Hash `json:"parentHash"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` - BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` } var enc Genesis enc.Config = g.Config @@ -54,13 +51,10 @@ func (g Genesis) MarshalJSON() ([]byte, error) { } enc.AirdropHash = g.AirdropHash enc.AirdropAmount = (*math.HexOrDecimal256)(g.AirdropAmount) - enc.AirdropData = g.AirdropData enc.Number = math.HexOrDecimal64(g.Number) enc.GasUsed = math.HexOrDecimal64(g.GasUsed) enc.ParentHash = g.ParentHash enc.BaseFee = (*math.HexOrDecimal256)(g.BaseFee) - enc.ExcessBlobGas = (*math.HexOrDecimal64)(g.ExcessBlobGas) - enc.BlobGasUsed = (*math.HexOrDecimal64)(g.BlobGasUsed) return json.Marshal(&enc) } @@ -78,13 +72,10 @@ func (g *Genesis) UnmarshalJSON(input []byte) error { Alloc map[common.UnprefixedAddress]GenesisAccount `json:"alloc" gencodec:"required"` AirdropHash *common.Hash `json:"airdropHash"` AirdropAmount *math.HexOrDecimal256 `json:"airdropAmount"` - AirdropData []byte `json:"-"` Number *math.HexOrDecimal64 `json:"number"` GasUsed *math.HexOrDecimal64 `json:"gasUsed"` ParentHash *common.Hash `json:"parentHash"` BaseFee *math.HexOrDecimal256 `json:"baseFeePerGas"` - ExcessBlobGas *math.HexOrDecimal64 `json:"excessBlobGas"` - BlobGasUsed *math.HexOrDecimal64 `json:"blobGasUsed"` } var dec Genesis if err := json.Unmarshal(input, &dec); err != nil { @@ -129,9 +120,6 @@ func (g *Genesis) UnmarshalJSON(input []byte) error { if dec.AirdropAmount != nil { g.AirdropAmount = (*big.Int)(dec.AirdropAmount) } - if dec.AirdropData != nil { - g.AirdropData = dec.AirdropData - } if dec.Number != nil { g.Number = uint64(*dec.Number) } @@ -144,11 +132,5 @@ func (g *Genesis) UnmarshalJSON(input []byte) error { if dec.BaseFee != nil { g.BaseFee = (*big.Int)(dec.BaseFee) } - if dec.ExcessBlobGas != nil { - g.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) - } - if dec.BlobGasUsed != nil { - g.BlobGasUsed = (*uint64)(dec.BlobGasUsed) - } return nil } diff --git a/core/genesis.go b/core/genesis.go index fc39b6f965..78037416d7 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -77,12 +77,10 @@ type Genesis struct { // These fields are used for consensus tests. Please don't use them // in actual genesis blocks. - Number uint64 `json:"number"` - GasUsed uint64 `json:"gasUsed"` - ParentHash common.Hash `json:"parentHash"` - BaseFee *big.Int `json:"baseFeePerGas"` // EIP-1559 - ExcessBlobGas *uint64 `json:"excessBlobGas"` // EIP-4844 - BlobGasUsed *uint64 `json:"blobGasUsed"` // EIP-4844 + Number uint64 `json:"number"` + GasUsed uint64 `json:"gasUsed"` + ParentHash common.Hash `json:"parentHash"` + BaseFee *big.Int `json:"baseFeePerGas"` } // GenesisAlloc specifies the initial state that is part of the genesis block. @@ -121,8 +119,6 @@ type genesisSpecMarshaling struct { BaseFee *math.HexOrDecimal256 Alloc map[common.UnprefixedAddress]GenesisAccount AirdropAmount *math.HexOrDecimal256 - ExcessBlobGas *math.HexOrDecimal64 - BlobGasUsed *math.HexOrDecimal64 } type genesisAccountMarshaling struct { @@ -236,7 +232,7 @@ func SetupGenesisBlock( // when we start syncing from scratch, the last accepted block // will be genesis block if lastBlock == nil { - return newcfg, common.Hash{}, errors.New("missing last accepted block") + return newcfg, common.Hash{}, fmt.Errorf("missing last accepted block") } height := lastBlock.NumberU64() timestamp := lastBlock.Time() @@ -329,28 +325,14 @@ func (g *Genesis) toBlock(db ethdb.Database, triedb *trie.Database) *types.Block if g.Difficulty == nil { head.Difficulty = params.GenesisDifficulty } - if conf := g.Config; conf != nil { - num := new(big.Int).SetUint64(g.Number) - if conf.IsSubnetEVM(g.Timestamp) { - if g.BaseFee != nil { - head.BaseFee = g.BaseFee - } else { - head.BaseFee = new(big.Int).Set(g.Config.FeeConfig.MinBaseFee) - } - } - if conf.IsCancun(num, g.Timestamp) { - head.ExcessBlobGas = g.ExcessBlobGas - head.BlobGasUsed = g.BlobGasUsed - if head.ExcessBlobGas == nil { - head.ExcessBlobGas = new(uint64) - } - if head.BlobGasUsed == nil { - head.BlobGasUsed = new(uint64) - } + if g.Config != nil && g.Config.IsSubnetEVM(0) { + if g.BaseFee != nil { + head.BaseFee = g.BaseFee + } else { + head.BaseFee = new(big.Int).Set(g.Config.FeeConfig.MinBaseFee) } } - - statedb.Commit(0, false, false) + statedb.Commit(false, false) // Commit newly generated states into disk if it's not empty. if root != types.EmptyRootHash { if err := triedb.Commit(root, true); err != nil { diff --git a/core/genesis_test.go b/core/genesis_test.go index a59beda33f..ea751f7f90 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -289,9 +289,9 @@ func TestPrecompileActivationAfterHeaderBlock(t *testing.T) { func TestGenesisWriteUpgradesRegression(t *testing.T) { require := require.New(t) - config := *params.TestChainConfig + testConfig := *params.TestChainConfig genesis := &Genesis{ - Config: &config, + Config: &testConfig, Alloc: GenesisAlloc{ {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, }, diff --git a/core/mkalloc.go b/core/mkalloc.go index 4ab78b31b7..affc4c3aee 100644 --- a/core/mkalloc.go +++ b/core/mkalloc.go @@ -40,28 +40,32 @@ import ( "fmt" "math/big" "os" + "sort" "strconv" "github.com/ava-labs/subnet-evm/core" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/exp/slices" ) type allocItem struct{ Addr, Balance *big.Int } -func makelist(g *core.Genesis) []allocItem { - items := make([]allocItem, 0, len(g.Alloc)) +type allocList []allocItem + +func (a allocList) Len() int { return len(a) } +func (a allocList) Less(i, j int) bool { return a[i].Addr.Cmp(a[j].Addr) < 0 } +func (a allocList) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func makelist(g *core.Genesis) allocList { + a := make(allocList, 0, len(g.Alloc)) for addr, account := range g.Alloc { if len(account.Storage) > 0 || len(account.Code) > 0 || account.Nonce != 0 { panic(fmt.Sprintf("can't encode account %x", addr)) } bigAddr := new(big.Int).SetBytes(addr.Bytes()) - items = append(items, allocItem{bigAddr, account.Balance}) + a = append(a, allocItem{bigAddr, account.Balance}) } - slices.SortFunc(items, func(a, b allocItem) bool { - return a.Addr.Cmp(b.Addr) < 0 - }) - return items + sort.Sort(a) + return a } func makealloc(g *core.Genesis) string { diff --git a/core/predicate_check_test.go b/core/predicate_check_test.go index 85e685154e..e661ba7f2c 100644 --- a/core/predicate_check_test.go +++ b/core/predicate_check_test.go @@ -297,7 +297,7 @@ func TestCheckPredicate(t *testing.T) { t.Run(name, func(t *testing.T) { require := require.New(t) // Create the rules from TestChainConfig and update the predicates based on the test params - rules := params.TestChainConfig.Rules(common.Big0, 0) + rules := params.TestChainConfig.AvalancheRules(common.Big0, 0) if test.createPredicates != nil { for address, predicater := range test.createPredicates(t) { rules.Predicaters[address] = predicater @@ -423,7 +423,7 @@ func TestCheckPredicatesOutput(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) // Create the rules from TestChainConfig and update the predicates based on the test params - rules := params.TestChainConfig.Rules(common.Big0, 0) + rules := params.TestChainConfig.AvalancheRules(common.Big0, 0) predicater := precompileconfig.NewMockPredicater(gomock.NewController(t)) predicater.EXPECT().PredicateGas(gomock.Any()).Return(uint64(0), nil).Times(len(test.testTuple)) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 47dd95abf9..7005019485 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -32,7 +32,6 @@ import ( "errors" "math/big" - "github.com/ava-labs/subnet-evm/consensus/misc/eip4844" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/params" "github.com/ethereum/go-ethereum/common" @@ -86,7 +85,7 @@ type NumberHash struct { Hash common.Hash } -// ReadAllHashesInRange retrieves all the hashes assigned to blocks at certain +// ReadAllHashesInRange retrieves all the hashes assigned to blocks at a certain // heights, both canonical and reorged forks included. // This method considers both limits to be _inclusive_. func ReadAllHashesInRange(db ethdb.Iteratee, first, last uint64) []*NumberHash { @@ -205,11 +204,12 @@ func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) { // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + // Then try to look up the data in leveldb. data, _ := db.Get(headerKey(number, hash)) if len(data) > 0 { return data } - return nil + return nil // Can't find the data anywhere. } // HasHeader verifies the existence of a block header corresponding to the hash. @@ -273,11 +273,12 @@ func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + // Then try to look up the data in leveldb. data, _ := db.Get(blockBodyKey(number, hash)) if len(data) > 0 { return data } - return nil + return nil // Can't find the data anywhere. } // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical @@ -347,11 +348,12 @@ func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool { // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding. func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue { + // Then try to look up the data in leveldb. data, _ := db.Get(blockReceiptsKey(number, hash)) if len(data) > 0 { return data } - return nil + return nil // Can't find the data anywhere. } // ReadRawReceipts retrieves all the transaction receipts belonging to a block. @@ -395,19 +397,13 @@ func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, time uint64, return nil } header := ReadHeader(db, hash, number) - var baseFee *big.Int if header == nil { baseFee = big.NewInt(0) } else { baseFee = header.BaseFee } - // Compute effective blob gas price. - var blobGasPrice *big.Int - if header != nil && header.ExcessBlobGas != nil { - blobGasPrice = eip4844.CalcBlobFee(*header.ExcessBlobGas) - } - if err := receipts.DeriveFields(config, hash, number, time, baseFee, blobGasPrice, body.Transactions); err != nil { + if err := receipts.DeriveFields(config, hash, number, time, baseFee, body.Transactions); err != nil { log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err) return nil } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index b9e1eddb4d..17c407fe17 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -83,7 +83,7 @@ func TestBodyStorage(t *testing.T) { WriteBody(db, hash, 0, body) if entry := ReadBody(db, hash, 0); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(types.Transactions(body.Transactions), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(body.Uncles) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, body) } if entry := ReadBodyRLP(db, hash, 0); entry == nil { @@ -137,7 +137,7 @@ func TestBlockStorage(t *testing.T) { } if entry := ReadBody(db, block.Hash(), block.NumberU64()); entry == nil { t.Fatalf("Stored body not found") - } else if types.DeriveSha(types.Transactions(entry.Transactions), newTestHasher()) != types.DeriveSha(block.Transactions(), newTestHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { + } else if types.DeriveSha(types.Transactions(entry.Transactions), newHasher()) != types.DeriveSha(block.Transactions(), newHasher()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body()) } // Delete the block and verify the execution diff --git a/core/rawdb/accessors_indexes_test.go b/core/rawdb/accessors_indexes_test.go index f887b7ea3d..6ee68178f2 100644 --- a/core/rawdb/accessors_indexes_test.go +++ b/core/rawdb/accessors_indexes_test.go @@ -18,17 +18,41 @@ package rawdb import ( "bytes" + "hash" "math/big" "testing" "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/internal/blocktest" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" ) -var newTestHasher = blocktest.NewHasher +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) error { + h.hasher.Write(key) + h.hasher.Write(val) + return nil +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} // Tests that positional lookup metadata can be stored and retrieved. func TestLookupStorage(t *testing.T) { @@ -75,7 +99,7 @@ func TestLookupStorage(t *testing.T) { tx3 := types.NewTransaction(3, common.BytesToAddress([]byte{0x33}), big.NewInt(333), 3333, big.NewInt(33333), []byte{0x33, 0x33, 0x33}) txs := []*types.Transaction{tx1, tx2, tx3} - block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newTestHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(314)}, txs, nil, nil, newHasher()) // Check that no transactions entries are in a pristine database for i, tx := range txs { diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index 31f89b0d13..1cb712d3e4 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -27,8 +27,6 @@ package rawdb import ( - "encoding/binary" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -79,68 +77,3 @@ func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { log.Crit("Failed to delete contract code", "err", err) } } - -// ReadStateID retrieves the state id with the provided state root. -func ReadStateID(db ethdb.KeyValueReader, root common.Hash) *uint64 { - data, err := db.Get(stateIDKey(root)) - if err != nil || len(data) == 0 { - return nil - } - number := binary.BigEndian.Uint64(data) - return &number -} - -// WriteStateID writes the provided state lookup to database. -func WriteStateID(db ethdb.KeyValueWriter, root common.Hash, id uint64) { - var buff [8]byte - binary.BigEndian.PutUint64(buff[:], id) - if err := db.Put(stateIDKey(root), buff[:]); err != nil { - log.Crit("Failed to store state ID", "err", err) - } -} - -// DeleteStateID deletes the specified state lookup from the database. -func DeleteStateID(db ethdb.KeyValueWriter, root common.Hash) { - if err := db.Delete(stateIDKey(root)); err != nil { - log.Crit("Failed to delete state ID", "err", err) - } -} - -// ReadPersistentStateID retrieves the id of the persistent state from the database. -func ReadPersistentStateID(db ethdb.KeyValueReader) uint64 { - data, _ := db.Get(persistentStateIDKey) - if len(data) != 8 { - return 0 - } - return binary.BigEndian.Uint64(data) -} - -// WritePersistentStateID stores the id of the persistent state into database. -func WritePersistentStateID(db ethdb.KeyValueWriter, number uint64) { - if err := db.Put(persistentStateIDKey, encodeBlockNumber(number)); err != nil { - log.Crit("Failed to store the persistent state ID", "err", err) - } -} - -// ReadTrieJournal retrieves the serialized in-memory trie nodes of layers saved at -// the last shutdown. -func ReadTrieJournal(db ethdb.KeyValueReader) []byte { - data, _ := db.Get(trieJournalKey) - return data -} - -// WriteTrieJournal stores the serialized in-memory trie nodes of layers to save at -// shutdown. -func WriteTrieJournal(db ethdb.KeyValueWriter, journal []byte) { - if err := db.Put(trieJournalKey, journal); err != nil { - log.Crit("Failed to store tries journal", "err", err) - } -} - -// DeleteTrieJournal deletes the serialized in-memory trie nodes of layers saved at -// the last shutdown. -func DeleteTrieJournal(db ethdb.KeyValueWriter) { - if err := db.Delete(trieJournalKey); err != nil { - log.Crit("Failed to remove tries journal", "err", err) - } -} diff --git a/core/rawdb/accessors_trie.go b/core/rawdb/accessors_trie.go index 4e020e11ca..a5e3a517e2 100644 --- a/core/rawdb/accessors_trie.go +++ b/core/rawdb/accessors_trie.go @@ -56,23 +56,21 @@ const HashScheme = "hashScheme" // on extra state diffs to survive deep reorg. const PathScheme = "pathScheme" -// hasher is used to compute the sha256 hash of the provided data. -type hasher struct{ sha crypto.KeccakState } +// nodeHasher used to derive the hash of trie node. +type nodeHasher struct{ sha crypto.KeccakState } var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, + New: func() interface{} { return &nodeHasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, } -func newHasher() *hasher { - return hasherPool.Get().(*hasher) -} - -func (h *hasher) hash(data []byte) common.Hash { - return crypto.HashData(h.sha, data) -} +func newNodeHasher() *nodeHasher { return hasherPool.Get().(*nodeHasher) } +func returnHasherToPool(h *nodeHasher) { hasherPool.Put(h) } -func (h *hasher) release() { - hasherPool.Put(h) +func (h *nodeHasher) hashData(data []byte) (n common.Hash) { + h.sha.Reset() + h.sha.Write(data) + h.sha.Read(n[:]) + return n } // ReadAccountTrieNode retrieves the account trie node and the associated node @@ -82,9 +80,9 @@ func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.H if err != nil { return nil, common.Hash{} } - h := newHasher() - defer h.release() - return data, h.hash(data) + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return data, hasher.hashData(data) } // HasAccountTrieNode checks the account trie node presence with the specified @@ -94,9 +92,9 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash) if err != nil { return false } - h := newHasher() - defer h.release() - return h.hash(data) == hash + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return hasher.hashData(data) == hash } // WriteAccountTrieNode writes the provided account trie node into database. @@ -120,9 +118,9 @@ func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path if err != nil { return nil, common.Hash{} } - h := newHasher() - defer h.release() - return data, h.hash(data) + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return data, hasher.hashData(data) } // HasStorageTrieNode checks the storage trie node presence with the provided @@ -132,9 +130,9 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [ if err != nil { return false } - h := newHasher() - defer h.release() - return h.hash(data) == hash + hasher := newNodeHasher() + defer returnHasherToPool(hasher) + return hasher.hashData(data) == hash } // WriteStorageTrieNode writes the provided storage trie node into database. diff --git a/core/rawdb/chain_iterator_test.go b/core/rawdb/chain_iterator_test.go index 6242741a76..fb2ee988f1 100644 --- a/core/rawdb/chain_iterator_test.go +++ b/core/rawdb/chain_iterator_test.go @@ -44,7 +44,7 @@ func TestChainIterator(t *testing.T) { var block *types.Block var txs []*types.Transaction to := common.BytesToAddress([]byte{0x11}) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) // Empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) // Empty genesis block WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) for i := uint64(1); i <= 10; i++ { @@ -70,7 +70,7 @@ func TestChainIterator(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } @@ -121,7 +121,7 @@ func TestIndexTransactions(t *testing.T) { to := common.BytesToAddress([]byte{0x11}) // Write empty genesis block - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) @@ -148,7 +148,7 @@ func TestIndexTransactions(t *testing.T) { }) } txs = append(txs, tx) - block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newTestHasher()) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher()) WriteBlock(chainDb, block) WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index e0e85fb3b7..45f6a584f3 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -83,13 +83,13 @@ func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, e } // TruncateHead returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) { - return 0, errNotSupported +func (db *nofreezedb) TruncateHead(items uint64) error { + return errNotSupported } // TruncateTail returns an error as we don't have a backing chain freezer. -func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) { - return 0, errNotSupported +func (db *nofreezedb) TruncateTail(items uint64) error { + return errNotSupported } // Sync returns an error as we don't have a backing chain freezer. diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 261169ba13..b972bf2681 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -48,9 +48,6 @@ var ( // headBlockKey tracks the latest known full block's hash. headBlockKey = []byte("LastBlock") - // persistentStateIDKey tracks the id of latest stored state(for path-based only). - persistentStateIDKey = []byte("LastStateID") - // snapshotRootKey tracks the hash of the last snapshot. snapshotRootKey = []byte("SnapshotRoot") @@ -60,9 +57,6 @@ var ( // snapshotGeneratorKey tracks the snapshot generation marker across restarts. snapshotGeneratorKey = []byte("SnapshotGenerator") - // trieJournalKey tracks the in-memory trie node layers across restarts. - trieJournalKey = []byte("TrieJournal") - // txIndexTailKey tracks the oldest block whose transactions have been indexed. txIndexTailKey = []byte("TransactionIndexTail") @@ -99,7 +93,6 @@ var ( // Path-based storage scheme of merkle patricia trie. trieNodeAccountPrefix = []byte("A") // trieNodeAccountPrefix + hexPath -> trie node trieNodeStoragePrefix = []byte("O") // trieNodeStoragePrefix + accountHash + hexPath -> trie node - stateIDPrefix = []byte("L") // stateIDPrefix + state root -> state id PreimagePrefix = []byte("secure-key-") // PreimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db @@ -231,11 +224,6 @@ func upgradeConfigKey(hash common.Hash) []byte { return append(upgradeConfigPrefix, hash.Bytes()...) } -// stateIDKey = stateIDPrefix + root (32 bytes) -func stateIDKey(root common.Hash) []byte { - return append(stateIDPrefix, root.Bytes()...) -} - // accountTrieNodeKey = trieNodeAccountPrefix + nodePath. func accountTrieNodeKey(path []byte) []byte { return append(trieNodeAccountPrefix, path...) diff --git a/core/rawdb/table.go b/core/rawdb/table.go index 0fa34a8c06..9563f8cc5f 100644 --- a/core/rawdb/table.go +++ b/core/rawdb/table.go @@ -107,13 +107,13 @@ func (t *table) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err e // TruncateHead is a noop passthrough that just forwards the request to the underlying // database. -func (t *table) TruncateHead(items uint64) (uint64, error) { +func (t *table) TruncateHead(items uint64) error { return t.db.TruncateHead(items) } // TruncateTail is a noop passthrough that just forwards the request to the underlying // database. -func (t *table) TruncateTail(items uint64) (uint64, error) { +func (t *table) TruncateTail(items uint64) error { return t.db.TruncateTail(items) } diff --git a/core/state/database.go b/core/state/database.go index 15d1367b56..f550a53a75 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -36,7 +36,6 @@ import ( "github.com/ava-labs/subnet-evm/trie/trienode" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/lru" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" ) @@ -54,16 +53,16 @@ type Database interface { OpenTrie(root common.Hash) (Trie, error) // OpenStorageTrie opens the storage trie of an account. - OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) + OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) // CopyTrie returns an independent copy of the given trie. CopyTrie(Trie) Trie // ContractCode retrieves a particular contract's code. - ContractCode(addr common.Address, codeHash common.Hash) ([]byte, error) + ContractCode(addrHash, codeHash common.Hash) ([]byte, error) // ContractCodeSize retrieves a particular contracts code's size. - ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) + ContractCodeSize(addrHash, codeHash common.Hash) (int, error) // DiskDB returns the underlying key-value disk database. DiskDB() ethdb.KeyValueStore @@ -104,10 +103,6 @@ type Trie interface { // in the trie with provided address. UpdateAccount(address common.Address, account *types.StateAccount) error - // UpdateContractCode abstracts code write to the trie. It is expected - // to be moved to the stateWriter interface when the latter is ready. - UpdateContractCode(address common.Address, codeHash common.Hash, code []byte) error - // DeleteStorage removes any existing value for key from the trie. If a node // was not found in the database, a trie.MissingNodeError is returned. DeleteStorage(addr common.Address, key []byte) error @@ -125,12 +120,11 @@ type Trie interface { // The returned nodeset can be nil if the trie is clean(nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage - Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) + Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) // NodeIterator returns an iterator that returns nodes of the trie. Iteration - // starts at the key after the given start key. And error will be returned - // if fails to create node iterator. - NodeIterator(startKey []byte) (trie.NodeIterator, error) + // starts at the key after the given start key. + NodeIterator(startKey []byte) trie.NodeIterator // Prove constructs a Merkle proof for key. The result contains all encoded nodes // on the path to the value at key. The value itself is also included in the last @@ -139,7 +133,7 @@ type Trie interface { // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. - Prove(key []byte, proofDb ethdb.KeyValueWriter) error + Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error } // NewDatabase creates a backing store for state. The returned database is safe for @@ -188,8 +182,8 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { } // OpenStorageTrie opens the storage trie of an account. -func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash) (Trie, error) { - tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, crypto.Keccak256Hash(address.Bytes()), root), db.triedb) +func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) { + tr, err := trie.NewStateTrie(trie.StorageTrieID(stateRoot, addrHash, root), db.triedb) if err != nil { return nil, err } @@ -207,7 +201,7 @@ func (db *cachingDB) CopyTrie(t Trie) Trie { } // ContractCode retrieves a particular contract's code. -func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) ([]byte, error) { +func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error) { code, _ := db.codeCache.Get(codeHash) if len(code) > 0 { return code, nil @@ -222,11 +216,11 @@ func (db *cachingDB) ContractCode(address common.Address, codeHash common.Hash) } // ContractCodeSize retrieves a particular contracts code's size. -func (db *cachingDB) ContractCodeSize(addr common.Address, codeHash common.Hash) (int, error) { +func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, error) { if cached, ok := db.codeSizeCache.Get(codeHash); ok { return cached, nil } - code, err := db.ContractCode(addr, codeHash) + code, err := db.ContractCode(addrHash, codeHash) return len(code), err } diff --git a/core/state/dump.go b/core/state/dump.go index 65b1dca9f9..ecbe833b00 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -150,11 +150,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] log.Info("Trie dumping started", "root", s.trie.Hash()) c.OnRoot(s.trie.Hash()) - trieIt, err := s.trie.NodeIterator(conf.Start) - if err != nil { - return nil - } - it := trie.NewIterator(trieIt) + it := trie.NewIterator(s.trie.NodeIterator(conf.Start)) for it.Next() { var data types.StateAccount if err := rlp.DecodeBytes(it.Value, &data); err != nil { @@ -181,23 +177,18 @@ func (s *StateDB) DumpToCollector(c DumpCollector, conf *DumpConfig) (nextKey [] } else { address = &addr } - obj := newObject(s, addr, &data) + obj := newObject(s, addr, data) if !conf.SkipCode { - account.Code = obj.Code() + account.Code = obj.Code(s.db) } if !conf.SkipStorage { account.Storage = make(map[common.Hash]string) - tr, err := obj.getTrie() + tr, err := obj.getTrie(s.db) if err != nil { log.Error("Failed to load storage trie", "err", err) continue } - trieIt, err := tr.NodeIterator(nil) - if err != nil { - log.Error("Failed to create trie iterator", "err", err) - continue - } - storageIt := trie.NewIterator(trieIt) + storageIt := trie.NewIterator(tr.NodeIterator(nil)) for storageIt.Next() { _, content, _, err := rlp.Split(storageIt.Value) if err != nil { diff --git a/core/state/iterator.go b/core/state/iterator.go index 9129ce16ca..47b46b53e3 100644 --- a/core/state/iterator.go +++ b/core/state/iterator.go @@ -28,7 +28,6 @@ package state import ( "bytes" - "errors" "fmt" "github.com/ava-labs/subnet-evm/core/types" @@ -38,8 +37,7 @@ import ( ) // nodeIterator is an iterator to traverse the entire state trie post-order, -// including all of the contract code and contract state tries. Preimage is -// required in order to resolve the contract address. +// including all of the contract code and contract state tries. type nodeIterator struct { state *StateDB // State being iterated @@ -86,12 +84,8 @@ func (it *nodeIterator) step() error { return nil } // Initialize the iterator if we've just started - var err error if it.stateIt == nil { - it.stateIt, err = it.state.trie.NodeIterator(nil) - if err != nil { - return err - } + it.stateIt = it.state.trie.NodeIterator(nil) } // If we had data nodes previously, we surely have at least state nodes if it.dataIt != nil { @@ -125,28 +119,18 @@ func (it *nodeIterator) step() error { if err := rlp.Decode(bytes.NewReader(it.stateIt.LeafBlob()), &account); err != nil { return err } - // Lookup the preimage of account hash - preimage := it.state.trie.GetKey(it.stateIt.LeafKey()) - if preimage == nil { - return errors.New("account address is not available") - } - address := common.BytesToAddress(preimage) - - // Traverse the storage slots belong to the account - dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, address, account.Root) - if err != nil { - return err - } - it.dataIt, err = dataTrie.NodeIterator(nil) + dataTrie, err := it.state.db.OpenStorageTrie(it.state.originalRoot, common.BytesToHash(it.stateIt.LeafKey()), account.Root) if err != nil { return err } + it.dataIt = dataTrie.NodeIterator(nil) if !it.dataIt.Next(true) { it.dataIt = nil } if !bytes.Equal(account.CodeHash, types.EmptyCodeHash.Bytes()) { it.codeHash = common.BytesToHash(account.CodeHash) - it.code, err = it.state.db.ContractCode(address, common.BytesToHash(account.CodeHash)) + addrHash := common.BytesToHash(it.stateIt.LeafKey()) + it.code, err = it.state.db.ContractCode(addrHash, common.BytesToHash(account.CodeHash)) if err != nil { return fmt.Errorf("code %x: %v", account.CodeHash, err) } diff --git a/core/state/journal.go b/core/state/journal.go index 4ba90fba5f..1f62869fc3 100644 --- a/core/state/journal.go +++ b/core/state/journal.go @@ -100,19 +100,12 @@ type ( account *common.Address } resetObjectChange struct { - account *common.Address prev *stateObject prevdestruct bool - prevAccount []byte - prevStorage map[common.Hash][]byte - - prevAccountOriginExist bool - prevAccountOrigin []byte - prevStorageOrigin map[common.Hash][]byte } - selfDestructChange struct { + suicideChange struct { account *common.Address - prev bool // whether account had already self-destructed + prev bool // whether account had already suicided prevbalance *big.Int } @@ -176,33 +169,21 @@ func (ch resetObjectChange) revert(s *StateDB) { if !ch.prevdestruct { delete(s.stateObjectsDestruct, ch.prev.address) } - if ch.prevAccount != nil { - s.accounts[ch.prev.addrHash] = ch.prevAccount - } - if ch.prevStorage != nil { - s.storages[ch.prev.addrHash] = ch.prevStorage - } - if ch.prevAccountOriginExist { - s.accountsOrigin[ch.prev.address] = ch.prevAccountOrigin - } - if ch.prevStorageOrigin != nil { - s.storagesOrigin[ch.prev.address] = ch.prevStorageOrigin - } } func (ch resetObjectChange) dirtied() *common.Address { - return ch.account + return nil } -func (ch selfDestructChange) revert(s *StateDB) { +func (ch suicideChange) revert(s *StateDB) { obj := s.getStateObject(*ch.account) if obj != nil { - obj.selfDestructed = ch.prev + obj.suicided = ch.prev obj.setBalance(ch.prevbalance) } } -func (ch selfDestructChange) dirtied() *common.Address { +func (ch suicideChange) dirtied() *common.Address { return ch.account } diff --git a/core/state/metrics.go b/core/state/metrics.go index 5af6243c98..ff131416f0 100644 --- a/core/state/metrics.go +++ b/core/state/metrics.go @@ -37,11 +37,4 @@ var ( storageTriesUpdatedMeter = metrics.NewRegisteredMeter("state/update/storagenodes", nil) accountTrieDeletedMeter = metrics.NewRegisteredMeter("state/delete/accountnodes", nil) storageTriesDeletedMeter = metrics.NewRegisteredMeter("state/delete/storagenodes", nil) - - slotDeletionMaxCount = metrics.NewRegisteredGauge("state/delete/storage/max/slot", nil) - slotDeletionMaxSize = metrics.NewRegisteredGauge("state/delete/storage/max/size", nil) - slotDeletionTimer = metrics.NewRegisteredResettingTimer("state/delete/storage/timer", nil) - slotDeletionCount = metrics.NewRegisteredMeter("state/delete/storage/slot", nil) - slotDeletionSize = metrics.NewRegisteredMeter("state/delete/storage/size", nil) - slotDeletionSkip = metrics.NewRegisteredGauge("state/delete/storage/skip", nil) ) diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 1a5a342384..84981a6d4a 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -67,6 +67,7 @@ const ( // Config includes all the configurations for pruning. type Config struct { Datadir string // The directory of the state database + Cachedir string // The directory of state clean cache BloomSize uint64 // The Megabytes of memory allocated to bloom-filter } @@ -260,7 +261,7 @@ func (p *Pruner) Prune(root common.Hash) error { return err } if stateBloomRoot != (common.Hash{}) { - return RecoverPruning(p.config.Datadir, p.db) + return RecoverPruning(p.config.Datadir, p.db, p.config.Cachedir) } // If the target state root is not specified, return a fatal error. @@ -275,6 +276,11 @@ func (p *Pruner) Prune(root common.Hash) error { } else { log.Info("Selecting last accepted block root as the pruning target", "root", root) } + // Before start the pruning, delete the clean trie cache first. + // It's necessary otherwise in the next restart we will hit the + // deleted state root in the "clean cache" so that the incomplete + // state is picked for usage. + deleteCleanTrieCache(p.config.Cachedir) // Traverse the target state, re-construct the whole state trie and // commit to the given bloom filter. @@ -304,7 +310,7 @@ func (p *Pruner) Prune(root common.Hash) error { // pruning can be resumed. What's more if the bloom filter is constructed, the // pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left // in the disk. -func RecoverPruning(datadir string, db ethdb.Database) error { +func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error { stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir) if err != nil { return err @@ -322,6 +328,12 @@ func RecoverPruning(datadir string, db ethdb.Database) error { } log.Info("Loaded state bloom filter", "path", stateBloomPath) + // Before start the pruning, delete the clean trie cache first. + // It's necessary otherwise in the next restart we will hit the + // deleted state root in the "clean cache" so that the incomplete + // state is picked for usage. + deleteCleanTrieCache(trieCachePath) + // All the state roots of the middle layers should be forcibly pruned, // otherwise the dangling state will be left. if stateBloomRoot != headBlock.Root() { @@ -346,10 +358,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if err != nil { return err } - accIter, err := t.NodeIterator(nil) - if err != nil { - return err - } + accIter := t.NodeIterator(nil) for accIter.Next(true) { hash := accIter.Hash() @@ -370,10 +379,7 @@ func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { if err != nil { return err } - storageIter, err := storageTrie.NodeIterator(nil) - if err != nil { - return err - } + storageIter := storageTrie.NodeIterator(nil) for storageIter.Next(true) { hash := storageIter.Hash() if hash != (common.Hash{}) { @@ -423,3 +429,23 @@ func findBloomFilter(datadir string) (string, common.Hash, error) { } return stateBloomPath, stateBloomRoot, nil } + +const warningLog = ` + +WARNING! + +The clean trie cache is not found. Please delete it by yourself after the +pruning. Remember don't start the Subnet-EVM without deleting the clean trie cache +otherwise the entire database may be damaged! + +Check the configuration option "offline-pruning-enabled" for more details. +` + +func deleteCleanTrieCache(path string) { + if !common.FileExist(path) { + log.Warn(warningLog) + return + } + os.RemoveAll(path) + log.Info("Deleted trie clean cache", "path", path) +} diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go new file mode 100644 index 0000000000..5a3444eb44 --- /dev/null +++ b/core/state/snapshot/account.go @@ -0,0 +1,97 @@ +// (c) 2019-2020, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snapshot + +import ( + "bytes" + "math/big" + + "github.com/ava-labs/subnet-evm/core/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" +) + +// Account is a modified version of a state.Account, where the root is replaced +// with a byte slice. This format can be used to represent full-consensus format +// or slim-snapshot format which replaces the empty root and code hash as nil +// byte slice. +type Account struct { + Nonce uint64 + Balance *big.Int + Root []byte + CodeHash []byte +} + +// SlimAccount converts a state.Account content into a slim snapshot account +func SlimAccount(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) Account { + slim := Account{ + Nonce: nonce, + Balance: balance, + } + if root != types.EmptyRootHash { + slim.Root = root[:] + } + if !bytes.Equal(codehash, types.EmptyCodeHash[:]) { + slim.CodeHash = codehash + } + return slim +} + +// SlimAccountRLP converts a state.Account content into a slim snapshot +// version RLP encoded. +func SlimAccountRLP(nonce uint64, balance *big.Int, root common.Hash, codehash []byte) []byte { + data, err := rlp.EncodeToBytes(SlimAccount(nonce, balance, root, codehash)) + if err != nil { + panic(err) + } + return data +} + +// FullAccount decodes the data on the 'slim RLP' format and return +// the consensus format account. +func FullAccount(data []byte) (Account, error) { + var account Account + if err := rlp.DecodeBytes(data, &account); err != nil { + return Account{}, err + } + if len(account.Root) == 0 { + account.Root = types.EmptyRootHash[:] + } + if len(account.CodeHash) == 0 { + account.CodeHash = types.EmptyCodeHash[:] + } + return account, nil +} + +// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format. +func FullAccountRLP(data []byte) ([]byte, error) { + account, err := FullAccount(data) + if err != nil { + return nil, err + } + return rlp.EncodeToBytes(account) +} diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go index e93a4fcb88..c731af79b0 100644 --- a/core/state/snapshot/conversion.go +++ b/core/state/snapshot/conversion.go @@ -27,6 +27,7 @@ package snapshot import ( + "bytes" "encoding/binary" "errors" "fmt" @@ -310,7 +311,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou fullData []byte ) if leafCallback == nil { - fullData, err = types.FullAccountRLP(it.(AccountIterator).Account()) + fullData, err = FullAccountRLP(it.(AccountIterator).Account()) if err != nil { return stop(err) } @@ -322,7 +323,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou return stop(err) } // Fetch the next account and process it concurrently - account, err := types.FullAccount(it.(AccountIterator).Account()) + account, err := FullAccount(it.(AccountIterator).Account()) if err != nil { return stop(err) } @@ -332,7 +333,7 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou results <- err return } - if account.Root != subroot { + if !bytes.Equal(account.Root, subroot.Bytes()) { results <- fmt.Errorf("invalid subroot(path %x), want %x, have %x", hash, account.Root, subroot) return } diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index 664cb91721..74dcfc92d5 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -31,15 +31,14 @@ import ( "fmt" "math" "math/rand" + "sort" "sync" "sync/atomic" "time" - "github.com/ava-labs/subnet-evm/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" bloomfilter "github.com/holiman/bloomfilter/v2" - "golang.org/x/exp/slices" ) var ( @@ -290,7 +289,7 @@ func (dl *diffLayer) Stale() bool { // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. -func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) { +func (dl *diffLayer) Account(hash common.Hash) (*Account, error) { data, err := dl.AccountRLP(hash) if err != nil { return nil, err @@ -298,7 +297,7 @@ func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) { if len(data) == 0 { // can be both nil and []byte{} return nil, nil } - account := new(types.SlimAccount) + account := new(Account) if err := rlp.DecodeBytes(data, account); err != nil { panic(err) } @@ -310,8 +309,8 @@ func (dl *diffLayer) Account(hash common.Hash) (*types.SlimAccount, error) { // // Note the returned account is not a copy, please don't modify it. func (dl *diffLayer) AccountRLP(hash common.Hash) ([]byte, error) { - // Check staleness before reaching further. dl.lock.RLock() + // Check staleness before reaching further. if dl.Stale() { dl.lock.RUnlock() return nil, ErrSnapshotStale @@ -542,7 +541,7 @@ func (dl *diffLayer) AccountList() []common.Hash { dl.accountList = append(dl.accountList, hash) } } - slices.SortFunc(dl.accountList, common.Hash.Cmp) + sort.Sort(hashes(dl.accountList)) dl.memory += uint64(len(dl.accountList) * common.HashLength) return dl.accountList } @@ -580,7 +579,7 @@ func (dl *diffLayer) StorageList(accountHash common.Hash) ([]common.Hash, bool) for k := range storageMap { storageList = append(storageList, k) } - slices.SortFunc(storageList, common.Hash.Cmp) + sort.Sort(hashes(storageList)) dl.storageList[accountHash] = storageList dl.memory += uint64(len(dl.storageList)*common.HashLength + common.HashLength) return storageList, destructed diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index 08bbf4104d..063d4435e5 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -246,7 +246,7 @@ func TestInsertAndMerge(t *testing.T) { func emptyLayer() *diskLayer { return &diskLayer{ diskdb: memorydb.New(), - cache: utils.NewMeteredCache(500*1024, "", 0), + cache: utils.NewMeteredCache(500*1024, "", "", 0), } } diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go index 5e308fde9d..7467ba8c9c 100644 --- a/core/state/snapshot/disklayer.go +++ b/core/state/snapshot/disklayer.go @@ -32,7 +32,6 @@ import ( "time" "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/trie" "github.com/ava-labs/subnet-evm/utils" "github.com/ethereum/go-ethereum/common" @@ -89,7 +88,7 @@ func (dl *diskLayer) Stale() bool { // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. -func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) { +func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { data, err := dl.AccountRLP(hash) if err != nil { return nil, err @@ -97,7 +96,7 @@ func (dl *diskLayer) Account(hash common.Hash) (*types.SlimAccount, error) { if len(data) == 0 { // can be both nil and []byte{} return nil, nil } - account := new(types.SlimAccount) + account := new(Account) if err := rlp.DecodeBytes(data, account); err != nil { panic(err) } diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 2c9bd76ac1..bf288d924e 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -30,6 +30,7 @@ import ( "bytes" "encoding/binary" "fmt" + "math/big" "time" "github.com/ava-labs/subnet-evm/core/rawdb" @@ -286,15 +287,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that accMarker = dl.genMarker[:common.HashLength] } - nodeIt, err := accTrie.NodeIterator(accMarker) - if err != nil { - log.Error("Generator failed to create account iterator", "root", dl) - abort := <-dl.genAbort - dl.genStats = stats - close(abort) - return - } - accIt := trie.NewIterator(nodeIt) + accIt := trie.NewIterator(accTrie.NodeIterator(accMarker)) batch := dl.diskdb.NewBatch() // Iterate from the previous marker and continue generating the state snapshot @@ -303,11 +296,16 @@ func (dl *diskLayer) generate(stats *generatorStats) { // Retrieve the current account and flatten it into the internal format accountHash := common.BytesToHash(accIt.Key) - var acc types.StateAccount + var acc struct { + Nonce uint64 + Balance *big.Int + Root common.Hash + CodeHash []byte + } if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { log.Crit("Invalid account encountered during snapshot creation", "err", err) } - data := types.SlimAccountRLP(acc) + data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) // If the account is not yet in-progress, write it out if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) { @@ -341,15 +339,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength { storeMarker = dl.genMarker[common.HashLength:] } - nodeIt, err := storeTrie.NodeIterator(storeMarker) - if err != nil { - log.Error("Generator failed to create storage iterator", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err) - abort := <-dl.genAbort - dl.genStats = stats - close(abort) - return - } - storeIt := trie.NewIterator(nodeIt) + storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker)) for storeIt.Next() { rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value) stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value)) @@ -409,5 +399,5 @@ func (dl *diskLayer) generate(stats *generatorStats) { } func newMeteredSnapshotCache(size int) *utils.MeteredCache { - return utils.NewMeteredCache(size, snapshotCacheNamespace, snapshotCacheStatsUpdateFrequency) + return utils.NewMeteredCache(size, "", snapshotCacheNamespace, snapshotCacheStatsUpdateFrequency) } diff --git a/core/state/snapshot/generate_test.go b/core/state/snapshot/generate_test.go index 9eb19764dc..7b2c6d7874 100644 --- a/core/state/snapshot/generate_test.go +++ b/core/state/snapshot/generate_test.go @@ -62,9 +62,9 @@ func TestGeneration(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(common.Hash{}, []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, false) - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) @@ -97,16 +97,16 @@ func TestGenerateExistentState(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) root, snap := helper.CommitAndGenerate() @@ -170,17 +170,18 @@ func newHelper() *testHelper { } } -func (t *testHelper) addTrieAccount(acckey string, acc *types.StateAccount) { +func (t *testHelper) addTrieAccount(acckey string, acc *Account) { val, _ := rlp.EncodeToBytes(acc) t.accTrie.MustUpdate([]byte(acckey), val) } -func (t *testHelper) addSnapAccount(acckey string, acc *types.StateAccount) { +func (t *testHelper) addSnapAccount(acckey string, acc *Account) { + val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte(acckey)) - rawdb.WriteAccountSnapshot(t.diskdb, key, types.SlimAccountRLP(*acc)) + rawdb.WriteAccountSnapshot(t.diskdb, key, val) } -func (t *testHelper) addAccount(acckey string, acc *types.StateAccount) { +func (t *testHelper) addAccount(acckey string, acc *Account) { t.addTrieAccount(acckey, acc) t.addSnapAccount(acckey, acc) } @@ -192,28 +193,28 @@ func (t *testHelper) addSnapStorage(accKey string, keys []string, vals []string) } } -func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) common.Hash { +func (t *testHelper) makeStorageTrie(owner common.Hash, keys []string, vals []string, commit bool) []byte { id := trie.StorageTrieID(types.EmptyRootHash, owner, types.EmptyRootHash) stTrie, _ := trie.NewStateTrie(id, t.triedb) for i, k := range keys { stTrie.MustUpdate([]byte(k), []byte(vals[i])) } if !commit { - return stTrie.Hash() + return stTrie.Hash().Bytes() } - root, nodes, _ := stTrie.Commit(false) + root, nodes := stTrie.Commit(false) if nodes != nil { t.nodes.Merge(nodes) } - return root + return root.Bytes() } func (t *testHelper) Commit() common.Hash { - root, nodes, _ := t.accTrie.Commit(true) + root, nodes := t.accTrie.Commit(true) if nodes != nil { t.nodes.Merge(nodes) } - t.triedb.Update(root, types.EmptyRootHash, 0, t.nodes, nil) + t.triedb.Update(root, types.EmptyRootHash, t.nodes) t.triedb.Commit(root, false) return root } @@ -246,28 +247,28 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { helper := newHelper() // Account one, empty root but non-empty database - helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) // Account two, non empty root but empty database stRoot := helper.makeStorageTrie(hashData([]byte("acc-2")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Miss slots { // Account three, non empty root but misses slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-3", []string{"key-2", "key-3"}, []string{"val-2", "val-3"}) // Account four, non empty root but misses slots in the middle helper.makeStorageTrie(hashData([]byte("acc-4")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-4", []string{"key-1", "key-3"}, []string{"val-1", "val-3"}) // Account five, non empty root but misses slots in the end helper.makeStorageTrie(hashData([]byte("acc-5")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-5", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-5", []string{"key-1", "key-2"}, []string{"val-1", "val-2"}) } @@ -275,22 +276,22 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { { // Account six, non empty root but wrong slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-6")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-6", []string{"key-1", "key-2", "key-3"}, []string{"badval-1", "val-2", "val-3"}) // Account seven, non empty root but wrong slots in the middle helper.makeStorageTrie(hashData([]byte("acc-7")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-7", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-7", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "badval-2", "val-3"}) // Account eight, non empty root but wrong slots in the end helper.makeStorageTrie(hashData([]byte("acc-8")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-8", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-8", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-8", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "badval-3"}) // Account 9, non empty root but rotated slots helper.makeStorageTrie(hashData([]byte("acc-9")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-9", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-9", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-9", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-3", "val-2"}) } @@ -298,17 +299,17 @@ func TestGenerateExistentStateWithWrongStorage(t *testing.T) { { // Account 10, non empty root but extra slots in the beginning helper.makeStorageTrie(hashData([]byte("acc-10")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-10", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-10", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-10", []string{"key-0", "key-1", "key-2", "key-3"}, []string{"val-0", "val-1", "val-2", "val-3"}) // Account 11, non empty root but extra slots in the middle helper.makeStorageTrie(hashData([]byte("acc-11")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-11", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-11", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-11", []string{"key-1", "key-2", "key-2-1", "key-3"}, []string{"val-1", "val-2", "val-2-1", "val-3"}) // Account 12, non empty root but extra slots in the end helper.makeStorageTrie(hashData([]byte("acc-12")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-12", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-12", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-12", []string{"key-1", "key-2", "key-3", "key-4"}, []string{"val-1", "val-2", "val-3", "val-4"}) } @@ -348,25 +349,25 @@ func TestGenerateExistentStateWithWrongAccounts(t *testing.T) { // Missing accounts, only in the trie { - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning - helper.addTrieAccount("acc-4", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle - helper.addTrieAccount("acc-6", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Beginning + helper.addTrieAccount("acc-4", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // Middle + helper.addTrieAccount("acc-6", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // End } // Wrong accounts { - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-2", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: common.Hex2Bytes("0x1234")}) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addSnapAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addSnapAccount("acc-3", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) } // Extra accounts, only in the snap { - helper.addSnapAccount("acc-0", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning - helper.addSnapAccount("acc-5", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: common.Hex2Bytes("0x1234")}) // Middle - helper.addSnapAccount("acc-7", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // after the end + helper.addSnapAccount("acc-0", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // before the beginning + helper.addSnapAccount("acc-5", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: common.Hex2Bytes("0x1234")}) // Middle + helper.addSnapAccount("acc-7", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // after the end } root, snap := helper.CommitAndGenerate() @@ -395,9 +396,9 @@ func TestGenerateCorruptAccountTrie(t *testing.T) { // without any storage slots to keep the test smaller. helper := newHelper() - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074 - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0xc7a30f39aff471c95d8a837497ad0e49b65be475cc0953540f80cfcdbdcd9074 + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x19ead688e907b0fab07176120dceec244a72aff2f0aa51e8b827584e378772f4 root := helper.Commit() // Root: 0xa04693ea110a31037fb5ee814308a6f1d76bdab0b11676bdf4541d2de55ba978 @@ -429,16 +430,15 @@ func TestGenerateMissingStorageTrie(t *testing.T) { // two of which also has the same 3-slot storage trie attached. helper := newHelper() - stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 + stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 - + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 root := helper.Commit() // Delete a storage trie root and ensure the generator chokes - helper.diskdb.Delete(stRoot.Bytes()) + helper.diskdb.Delete(stRoot) // We can only corrupt the disk database, so flush the tries out snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil) select { @@ -463,12 +463,11 @@ func TestGenerateCorruptStorageTrie(t *testing.T) { // two of which also has the same 3-slot storage trie attached. helper := newHelper() - stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x65145f923027566669a1ae5ccac66f945b55ff6eaeb17d2ea8e048b7d381f2d7 - stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 - + stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x1314700b81afc49f94db3623ef1df38f3ed18b73a1b7ea2f6c095118cf6118a0 + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) // 0x18a0f4d79cff4459642dd7604f303886ad9d77c30cf3d7d7cedb3a693ab6d371 + stRoot = helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) // 0xddefcd9376dd029653ef384bd2f0a126bb755fe84fdcc9e7cf421ba454f2bc67 + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) // 0x50815097425d000edfc8b3a4a13e175fc2bdcfee8bdfbf2d1ff61041d3c235b2 root := helper.Commit() // Delete a storage trie leaf and ensure the generator chokes @@ -499,7 +498,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e @@ -519,7 +518,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3", "val-4", "val-5"}, true, ) - acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte("acc-2")) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -570,7 +569,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { []string{"val-1", "val-2", "val-3"}, true, ) - acc := &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.MustUpdate([]byte("acc-1"), val) // 0x9250573b9c18c664139f3b6a7a8081b7d8f8916a8fcc5d94feec6c29f5fd4e9e @@ -584,7 +583,7 @@ func TestGenerateWithManyExtraAccounts(t *testing.T) { { // 100 accounts exist only in snapshot for i := 0; i < 1000; i++ { - acc := &types.StateAccount{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(int64(i)), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte(fmt.Sprintf("acc-%d", i))) rawdb.WriteAccountSnapshot(helper.diskdb, key, val) @@ -620,7 +619,7 @@ func TestGenerateWithExtraBeforeAndAfter(t *testing.T) { } helper := newHelper() { - acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val) helper.accTrie.MustUpdate(common.HexToHash("0x07").Bytes(), val) @@ -656,7 +655,7 @@ func TestGenerateWithMalformedSnapdata(t *testing.T) { } helper := newHelper() { - acc := &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()} + acc := &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()} val, _ := rlp.EncodeToBytes(acc) helper.accTrie.MustUpdate(common.HexToHash("0x03").Bytes(), val) @@ -693,7 +692,7 @@ func TestGenerateFromEmptySnap(t *testing.T) { for i := 0; i < 400; i++ { stRoot := helper.makeStorageTrie(hashData([]byte(fmt.Sprintf("acc-%d", i))), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) helper.addTrieAccount(fmt.Sprintf("acc-%d", i), - &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) } root, snap := helper.CommitAndGenerate() t.Logf("Root: %#x\n", root) // Root: 0x6f7af6d2e1a1bf2b84a3beb3f8b64388465fbc1e274ca5d5d3fc787ca78f59e4 @@ -729,7 +728,7 @@ func TestGenerateWithIncompleteStorage(t *testing.T) { for i := 0; i < 8; i++ { accKey := fmt.Sprintf("acc-%d", i) stRoot := helper.makeStorageTrie(hashData([]byte(accKey)), stKeys, stVals, true) - helper.addAccount(accKey, &types.StateAccount{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount(accKey, &Account{Balance: big.NewInt(int64(i)), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) var moddedKeys []string var moddedVals []string for ii := 0; ii < 8; ii++ { @@ -820,12 +819,12 @@ func populateDangling(disk ethdb.KeyValueStore) { func TestGenerateCompleteSnapshotWithDanglingStorage(t *testing.T) { var helper = newHelper() - stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addAccount("acc-2", &types.StateAccount{Balance: big.NewInt(1), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + stRoot := helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) + helper.addAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-2", &Account{Balance: big.NewInt(1), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addAccount("acc-3", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addAccount("acc-3", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) helper.addSnapStorage("acc-1", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) helper.addSnapStorage("acc-3", []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}) @@ -856,11 +855,11 @@ func TestGenerateBrokenSnapshotWithDanglingStorage(t *testing.T) { var helper = newHelper() stRoot := helper.makeStorageTrie(hashData([]byte("acc-1")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-1", &types.StateAccount{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) - helper.addTrieAccount("acc-2", &types.StateAccount{Balance: big.NewInt(2), Root: types.EmptyRootHash, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-1", &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-2", &Account{Balance: big.NewInt(2), Root: types.EmptyRootHash.Bytes(), CodeHash: types.EmptyCodeHash.Bytes()}) helper.makeStorageTrie(hashData([]byte("acc-3")), []string{"key-1", "key-2", "key-3"}, []string{"val-1", "val-2", "val-3"}, true) - helper.addTrieAccount("acc-3", &types.StateAccount{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) + helper.addTrieAccount("acc-3", &Account{Balance: big.NewInt(3), Root: stRoot, CodeHash: types.EmptyCodeHash.Bytes()}) populateDangling(helper.diskdb) diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go index 4e324ee28b..04a61d4a12 100644 --- a/core/state/snapshot/iterator_fast.go +++ b/core/state/snapshot/iterator_fast.go @@ -32,7 +32,6 @@ import ( "sort" "github.com/ethereum/go-ethereum/common" - "golang.org/x/exp/slices" ) // weightedIterator is a iterator with an assigned weight. It is used to prioritise @@ -43,25 +42,32 @@ type weightedIterator struct { priority int } -func (it *weightedIterator) Cmp(other *weightedIterator) int { +// weightedIterators is a set of iterators implementing the sort.Interface. +type weightedIterators []*weightedIterator + +// Len implements sort.Interface, returning the number of active iterators. +func (its weightedIterators) Len() int { return len(its) } + +// Less implements sort.Interface, returning which of two iterators in the stack +// is before the other. +func (its weightedIterators) Less(i, j int) bool { // Order the iterators primarily by the account hashes - hashI := it.it.Hash() - hashJ := other.it.Hash() + hashI := its[i].it.Hash() + hashJ := its[j].it.Hash() switch bytes.Compare(hashI[:], hashJ[:]) { case -1: - return -1 + return true case 1: - return 1 + return false } // Same account/storage-slot in multiple layers, split by priority - if it.priority < other.priority { - return -1 - } - if it.priority > other.priority { - return 1 - } - return 0 + return its[i].priority < its[j].priority +} + +// Swap implements sort.Interface, swapping two entries in the iterator stack. +func (its weightedIterators) Swap(i, j int) { + its[i], its[j] = its[j], its[i] } // fastIterator is a more optimized multi-layer iterator which maintains a @@ -73,7 +79,7 @@ type fastIterator struct { curAccount []byte curSlot []byte - iterators []*weightedIterator + iterators weightedIterators initiated bool account bool fail error @@ -170,7 +176,7 @@ func (fi *fastIterator) init() { } } // Re-sort the entire list - slices.SortFunc(fi.iterators, func(a, b *weightedIterator) int { return a.Cmp(b) }) + sort.Sort(fi.iterators) fi.initiated = false } diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index f31570f791..30d9882bbe 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -61,7 +61,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, // is present in the database (or crashed mid-update). baseBlockHash := rawdb.ReadSnapshotBlockHash(diskdb) if baseBlockHash == (common.Hash{}) { - return nil, false, errors.New("missing or corrupted snapshot, no snapshot block hash") + return nil, false, fmt.Errorf("missing or corrupted snapshot, no snapshot block hash") } if baseBlockHash != blockHash { return nil, false, fmt.Errorf("block hash stored on disk (%#x) does not match last accepted (%#x)", baseBlockHash, blockHash) diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 2b29225d89..0d9707d029 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -35,7 +35,6 @@ import ( "time" "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/metrics" "github.com/ava-labs/subnet-evm/trie" "github.com/ava-labs/subnet-evm/utils" @@ -125,7 +124,7 @@ type Snapshot interface { // Account directly retrieves the account associated with a particular hash in // the snapshot slim data format. - Account(hash common.Hash) (*types.SlimAccount, error) + Account(hash common.Hash) (*Account, error) // AccountRLP directly retrieves the account RLP associated with a particular // hash in the snapshot slim data format. @@ -945,7 +944,7 @@ func NewDiskLayer(diskdb ethdb.KeyValueStore) Snapshot { // state sync uses iterators to access data, so this cache is not used. // initializing it out of caution. - cache: utils.NewMeteredCache(32*1024, "", 0), + cache: utils.NewMeteredCache(32*1024, "", "", 0), } } @@ -955,7 +954,7 @@ func NewTestTree(diskdb ethdb.KeyValueStore, blockHash, root common.Hash) *Tree diskdb: diskdb, root: root, blockHash: blockHash, - cache: utils.NewMeteredCache(128*256, "", 0), + cache: utils.NewMeteredCache(128*256, "", "", 0), created: time.Now(), } return &Tree{ diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index 12e6fcf603..72287e1f5b 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -51,10 +51,11 @@ func randomHash() common.Hash { // randomAccount generates a random account and returns it RLP encoded. func randomAccount() []byte { - a := &types.StateAccount{ + root := randomHash() + a := Account{ Balance: big.NewInt(rand.Int63()), Nonce: rand.Uint64(), - Root: randomHash(), + Root: root[:], CodeHash: types.EmptyCodeHash[:], } data, _ := rlp.EncodeToBytes(a) @@ -695,7 +696,7 @@ func TestReadStateDuringFlattening(t *testing.T) { snap := snaps.Snapshot(diffRootC) // Register the testing hook to access the state after flattening - var result = make(chan *types.SlimAccount) + var result = make(chan *Account) snaps.onFlatten = func() { // Spin up a thread to read the account from the pre-created // snapshot handler. It's expected to be blocked. diff --git a/core/txpool/blobpool/slotter.go b/core/state/snapshot/sort.go similarity index 53% rename from core/txpool/blobpool/slotter.go rename to core/state/snapshot/sort.go index 656dc51d8c..6254d37943 100644 --- a/core/txpool/blobpool/slotter.go +++ b/core/state/snapshot/sort.go @@ -1,4 +1,4 @@ -// (c) 2024, Ava Labs, Inc. +// (c) 2019-2020, Ava Labs, Inc. // // This file is a derived work, based on the go-ethereum library whose original // notices appear below. @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********** -// Copyright 2023 The go-ethereum Authors +// Copyright 2019 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -24,25 +24,23 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package blobpool +package snapshot -// newSlotter creates a helper method for the Billy datastore that returns the -// individual shelf sizes used to store transactions in. -// -// The slotter will create shelves for each possible blob count + some tx metadata -// wiggle room, up to the max permitted limits. -// -// The slotter also creates a shelf for 0-blob transactions. Whilst those are not -// allowed in the current protocol, having an empty shelf is not a relevant use -// of resources, but it makes stress testing with junk transactions simpler. -func newSlotter() func() (uint32, bool) { - slotsize := uint32(txAvgSize) - slotsize -= uint32(blobSize) // underflows, it's ok, will overflow back in the first return +import ( + "bytes" + + "github.com/ethereum/go-ethereum/common" +) + +// hashes is a helper to implement sort.Interface. +type hashes []common.Hash + +// Len is the number of elements in the collection. +func (hs hashes) Len() int { return len(hs) } - return func() (size uint32, done bool) { - slotsize += blobSize - finished := slotsize > maxBlobsPerTransaction*blobSize+txMaxSize +// Less reports whether the element with index i should sort before the element +// with index j. +func (hs hashes) Less(i, j int) bool { return bytes.Compare(hs[i][:], hs[j][:]) < 0 } - return slotsize, finished - } -} +// Swap swaps the elements with indexes i and j. +func (hs hashes) Swap(i, j int) { hs[i], hs[j] = hs[j], hs[i] } diff --git a/core/state/state_object.go b/core/state/state_object.go index 8846b81e49..d233fc2718 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -31,6 +31,7 @@ import ( "fmt" "io" "math/big" + "sync" "time" "github.com/ava-labs/subnet-evm/core/types" @@ -67,38 +68,33 @@ func (s Storage) Copy() Storage { // stateObject represents an Ethereum account which is being modified. // // The usage pattern is as follows: -// - First you need to obtain a state object. -// - Account values as well as storages can be accessed and modified through the object. -// - Finally, call commit to return the changes of storage trie and update account data. +// First you need to obtain a state object. +// Account values can be accessed and modified through the object. +// Finally, call commitTrie to write the modified storage trie into a database. type stateObject struct { + address common.Address + addrHash common.Hash // hash of ethereum address of the account + // dataLock protects the [data] field to prevent a race condition + // in the transaction pool tests. TODO remove after re-implementing + // tx pool to be synchronous. + dataLock sync.RWMutex + data types.StateAccount db *StateDB - address common.Address // address of ethereum account - addrHash common.Hash // hash of ethereum address of the account - origin *types.StateAccount // Account original data without any change applied, nil means it was not existent - data types.StateAccount // Account data with all mutations applied in the scope of block // Write caches. trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded - originStorage Storage // Storage cache of original entries to dedup rewrites + originStorage Storage // Storage cache of original entries to dedup rewrites, reset for every transaction pendingStorage Storage // Storage entries that need to be flushed to disk, at the end of an entire block - dirtyStorage Storage // Storage entries that have been modified in the current transaction execution, reset for every transaction + dirtyStorage Storage // Storage entries that have been modified in the current transaction execution // Cache flags. + // When an object is marked suicided it will be deleted from the trie + // during the "update" phase of the state transition. dirtyCode bool // true if the code was updated - - // Flag whether the account was marked as self-destructed. The self-destructed account - // is still accessible in the scope of same transaction. - selfDestructed bool - - // Flag whether the account was marked as deleted. A self-destructed account - // or an account that is considered as empty will be marked as deleted at - // the end of transaction and no longer accessible anymore. - deleted bool - - // Flag whether the object was created in the current transaction - created bool + suicided bool + deleted bool } // empty returns whether the account is considered empty. @@ -107,17 +103,21 @@ func (s *stateObject) empty() bool { } // newObject creates a state object. -func newObject(db *StateDB, address common.Address, acct *types.StateAccount) *stateObject { - origin := acct - if acct == nil { - acct = types.NewEmptyStateAccount() +func newObject(db *StateDB, address common.Address, data types.StateAccount) *stateObject { + if data.Balance == nil { + data.Balance = new(big.Int) + } + if data.CodeHash == nil { + data.CodeHash = types.EmptyCodeHash.Bytes() + } + if data.Root == (common.Hash{}) { + data.Root = types.EmptyRootHash } return &stateObject{ db: db, address: address, addrHash: crypto.Keccak256Hash(address[:]), - origin: origin, - data: *acct, + data: data, originStorage: make(Storage), pendingStorage: make(Storage), dirtyStorage: make(Storage), @@ -129,8 +129,8 @@ func (s *stateObject) EncodeRLP(w io.Writer) error { return rlp.Encode(w, &s.data) } -func (s *stateObject) markSelfdestructed() { - s.selfDestructed = true +func (s *stateObject) markSuicided() { + s.suicided = true } func (s *stateObject) touch() { @@ -147,15 +147,17 @@ func (s *stateObject) touch() { // getTrie returns the associated storage trie. The trie will be opened // if it's not loaded previously. An error will be returned if trie can't // be loaded. -func (s *stateObject) getTrie() (Trie, error) { +func (s *stateObject) getTrie(db Database) (Trie, error) { if s.trie == nil { // Try fetching from prefetcher first + // We don't prefetch empty tries if s.data.Root != types.EmptyRootHash && s.db.prefetcher != nil { - // When the miner is creating the pending state, there is no prefetcher + // When the miner is creating the pending state, there is no + // prefetcher s.trie = s.db.prefetcher.trie(s.addrHash, s.data.Root) } if s.trie == nil { - tr, err := s.db.db.OpenStorageTrie(s.db.originalRoot, s.address, s.data.Root) + tr, err := db.OpenStorageTrie(s.db.originalRoot, s.addrHash, s.data.Root) if err != nil { return nil, err } @@ -166,18 +168,18 @@ func (s *stateObject) getTrie() (Trie, error) { } // GetState retrieves a value from the account storage trie. -func (s *stateObject) GetState(key common.Hash) common.Hash { +func (s *stateObject) GetState(db Database, key common.Hash) common.Hash { // If we have a dirty value for this state entry, return it value, dirty := s.dirtyStorage[key] if dirty { return value } // Otherwise return the entry's original value - return s.GetCommittedState(key) + return s.GetCommittedState(db, key) } // GetCommittedState retrieves a value from the committed account storage trie. -func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { +func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Hash { // If we have a pending write or clean cached, return that if value, pending := s.pendingStorage[key]; pending { return value @@ -196,9 +198,8 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { } // If no live objects are available, attempt to use snapshots var ( - enc []byte - err error - value common.Hash + enc []byte + err error ) if s.db.snap != nil { start := time.Now() @@ -206,23 +207,16 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { if metrics.EnabledExpensive { s.db.SnapshotStorageReads += time.Since(start) } - if len(enc) > 0 { - _, content, _, err := rlp.Split(enc) - if err != nil { - s.db.setError(err) - } - value.SetBytes(content) - } } // If the snapshot is unavailable or reading from it fails, load from the database. if s.db.snap == nil || err != nil { start := time.Now() - tr, err := s.getTrie() + tr, err := s.getTrie(db) if err != nil { s.db.setError(err) return common.Hash{} } - val, err := tr.GetStorage(s.address, key.Bytes()) + enc, err = tr.GetStorage(s.address, key.Bytes()) if metrics.EnabledExpensive { s.db.StorageReads += time.Since(start) } @@ -230,16 +224,23 @@ func (s *stateObject) GetCommittedState(key common.Hash) common.Hash { s.db.setError(err) return common.Hash{} } - value.SetBytes(val) + } + var value common.Hash + if len(enc) > 0 { + _, content, _, err := rlp.Split(enc) + if err != nil { + s.db.setError(err) + } + value.SetBytes(content) } s.originStorage[key] = value return value } // SetState updates a value in account storage. -func (s *stateObject) SetState(key, value common.Hash) { +func (s *stateObject) SetState(db Database, key, value common.Hash) { // If the new value is the same as old, don't set - prev := s.GetState(key) + prev := s.GetState(db, key) if prev == value { return } @@ -277,7 +278,7 @@ func (s *stateObject) finalise(prefetch bool) { // updateTrie writes cached storage modifications into the object's storage trie. // It will return nil if the trie has not been loaded and no changes have been // made. An error will be returned if the trie can't be loaded/updated correctly. -func (s *stateObject) updateTrie() (Trie, error) { +func (s *stateObject) updateTrie(db Database) (Trie, error) { // Make sure all dirty slots are finalized into the pending storage area s.finalise(false) // Don't prefetch anymore, pull directly if need be if len(s.pendingStorage) == 0 { @@ -290,10 +291,9 @@ func (s *stateObject) updateTrie() (Trie, error) { // The snapshot storage map for the object var ( storage map[common.Hash][]byte - origin map[common.Hash][]byte hasher = s.db.hasher ) - tr, err := s.getTrie() + tr, err := s.getTrie(db) if err != nil { s.db.setError(err) return nil, err @@ -305,11 +305,9 @@ func (s *stateObject) updateTrie() (Trie, error) { if value == s.originStorage[key] { continue } - prev := s.originStorage[key] s.originStorage[key] = value - // rlp-encoded value to be used by the snapshot - var snapshotVal []byte + var v []byte if (value == common.Hash{}) { if err := tr.DeleteStorage(s.address, key[:]); err != nil { s.db.setError(err) @@ -317,43 +315,25 @@ func (s *stateObject) updateTrie() (Trie, error) { } s.db.StorageDeleted += 1 } else { - trimmedVal := common.TrimLeftZeroes(value[:]) // Encoding []byte cannot fail, ok to ignore the error. - snapshotVal, _ = rlp.EncodeToBytes(trimmedVal) - if err := tr.UpdateStorage(s.address, key[:], trimmedVal); err != nil { + v, _ = rlp.EncodeToBytes(common.TrimLeftZeroes(value[:])) + if err := tr.UpdateStorage(s.address, key[:], v); err != nil { s.db.setError(err) return nil, err } s.db.StorageUpdated += 1 } - // Cache the mutated storage slots until commit - if storage == nil { - if storage = s.db.storages[s.addrHash]; storage == nil { - storage = make(map[common.Hash][]byte) - s.db.storages[s.addrHash] = storage - } - } - khash := crypto.HashData(hasher, key[:]) - storage[khash] = snapshotVal // snapshotVal will be nil if it's deleted - - // Cache the original value of mutated storage slots - if origin == nil { - if origin = s.db.storagesOrigin[s.address]; origin == nil { - origin = make(map[common.Hash][]byte) - s.db.storagesOrigin[s.address] = origin - } - } - // Track the original value of slot only if it's mutated first time - if _, ok := origin[khash]; !ok { - if prev == (common.Hash{}) { - origin[khash] = nil // nil if it was not present previously - } else { - // Encoding []byte cannot fail, ok to ignore the error. - b, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(prev[:])) - origin[khash] = b + // If state snapshotting is active, cache the data til commit + if s.db.snap != nil { + if storage == nil { + // Retrieve the old storage map, if available, create a new one otherwise + if storage = s.db.snapStorage[s.addrHash]; storage == nil { + storage = make(map[common.Hash][]byte) + s.db.snapStorage[s.addrHash] = storage + } } + storage[crypto.HashData(hasher, key[:])] = v // v will be nil if it's deleted } - // Cache the items for preloading usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure } if s.db.prefetcher != nil { @@ -367,8 +347,8 @@ func (s *stateObject) updateTrie() (Trie, error) { // UpdateRoot sets the trie root to the current root hash of. An error // will be returned if trie root hash is not computed correctly. -func (s *stateObject) updateRoot() { - tr, err := s.updateTrie() +func (s *stateObject) updateRoot(db Database) { + tr, err := s.updateTrie(db) if err != nil { return } @@ -383,29 +363,23 @@ func (s *stateObject) updateRoot() { s.data.Root = tr.Hash() } -// commit returns the changes made in storage trie and updates the account data. -func (s *stateObject) commit() (*trienode.NodeSet, error) { - tr, err := s.updateTrie() +// commitTrie submits the storage changes into the storage trie and re-computes +// the root. Besides, all trie changes will be collected in a nodeset and returned. +func (s *stateObject) commitTrie(db Database) (*trienode.NodeSet, error) { + tr, err := s.updateTrie(db) if err != nil { return nil, err } // If nothing changed, don't bother with committing anything if tr == nil { - s.origin = s.data.Copy() return nil, nil } // Track the amount of time wasted on committing the storage trie if metrics.EnabledExpensive { defer func(start time.Time) { s.db.StorageCommits += time.Since(start) }(time.Now()) } - root, nodes, err := tr.Commit(false) - if err != nil { - return nil, err - } + root, nodes := tr.Commit(false) s.data.Root = root - - // Update original account data after commit - s.origin = s.data.Copy() return nodes, nil } @@ -445,24 +419,18 @@ func (s *stateObject) setBalance(amount *big.Int) { } func (s *stateObject) deepCopy(db *StateDB) *stateObject { - obj := &stateObject{ - db: db, - address: s.address, - addrHash: s.addrHash, - origin: s.origin, - data: s.data, - } + stateObject := newObject(db, s.address, s.data) if s.trie != nil { - obj.trie = db.db.CopyTrie(s.trie) + stateObject.trie = db.db.CopyTrie(s.trie) } - obj.code = s.code - obj.dirtyStorage = s.dirtyStorage.Copy() - obj.originStorage = s.originStorage.Copy() - obj.pendingStorage = s.pendingStorage.Copy() - obj.selfDestructed = s.selfDestructed - obj.dirtyCode = s.dirtyCode - obj.deleted = s.deleted - return obj + stateObject.code = s.code + stateObject.dirtyStorage = s.dirtyStorage.Copy() + stateObject.originStorage = s.originStorage.Copy() + stateObject.pendingStorage = s.pendingStorage.Copy() + stateObject.suicided = s.suicided + stateObject.dirtyCode = s.dirtyCode + stateObject.deleted = s.deleted + return stateObject } // @@ -475,14 +443,14 @@ func (s *stateObject) Address() common.Address { } // Code returns the contract code associated with this object, if any. -func (s *stateObject) Code() []byte { +func (s *stateObject) Code(db Database) []byte { if s.code != nil { return s.code } if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return nil } - code, err := s.db.db.ContractCode(s.address, common.BytesToHash(s.CodeHash())) + code, err := db.ContractCode(s.addrHash, common.BytesToHash(s.CodeHash())) if err != nil { s.db.setError(fmt.Errorf("can't load code hash %x: %v", s.CodeHash(), err)) } @@ -493,14 +461,14 @@ func (s *stateObject) Code() []byte { // CodeSize returns the size of the contract code associated with this object, // or zero if none. This method is an almost mirror of Code, but uses a cache // inside the database to avoid loading codes seen recently. -func (s *stateObject) CodeSize() int { +func (s *stateObject) CodeSize(db Database) int { if s.code != nil { return len(s.code) } if bytes.Equal(s.CodeHash(), types.EmptyCodeHash.Bytes()) { return 0 } - size, err := s.db.db.ContractCodeSize(s.address, common.BytesToHash(s.CodeHash())) + size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.CodeHash())) if err != nil { s.db.setError(fmt.Errorf("can't load code size %x: %v", s.CodeHash(), err)) } @@ -508,7 +476,7 @@ func (s *stateObject) CodeSize() int { } func (s *stateObject) SetCode(codeHash common.Hash, code []byte) { - prevcode := s.Code() + prevcode := s.Code(s.db.db) s.db.journal.append(codeChange{ account: &s.address, prevhash: s.CodeHash(), @@ -532,6 +500,8 @@ func (s *stateObject) SetNonce(nonce uint64) { } func (s *stateObject) setNonce(nonce uint64) { + s.dataLock.Lock() + defer s.dataLock.Unlock() s.data.Nonce = nonce } @@ -544,5 +514,7 @@ func (s *stateObject) Balance() *big.Int { } func (s *stateObject) Nonce() uint64 { + s.dataLock.RLock() + defer s.dataLock.RUnlock() return s.data.Nonce } diff --git a/core/state/state_test.go b/core/state/state_test.go index 547a599611..96d9570cff 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -40,22 +40,21 @@ import ( "github.com/ethereum/go-ethereum/ethdb" ) -type stateEnv struct { +type stateTest struct { db ethdb.Database state *StateDB } -func newStateEnv() *stateEnv { +func newStateTest() *stateTest { db := rawdb.NewMemoryDatabase() sdb, _ := New(types.EmptyRootHash, NewDatabase(db), nil) - return &stateEnv{db: db, state: sdb} + return &stateTest{db: db, state: sdb} } func TestIterativeDump(t *testing.T) { db := rawdb.NewMemoryDatabase() - tdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) - sdb, _ := New(types.EmptyRootHash, tdb, nil) - s := &stateEnv{db: db, state: sdb} + sdb, _ := New(types.EmptyRootHash, NewDatabaseWithConfig(db, &trie.Config{Preimages: true}), nil) + s := &stateTest{db: db, state: sdb} // generate a few entries obj1 := s.state.GetOrNewStateObject(common.BytesToAddress([]byte{0x01})) @@ -70,8 +69,7 @@ func TestIterativeDump(t *testing.T) { // write some of them to the trie s.state.updateStateObject(obj1) s.state.updateStateObject(obj2) - root, _ := s.state.Commit(0, false, false) - s.state, _ = New(root, tdb, nil) + s.state.Commit(false, false) b := &bytes.Buffer{} s.state.IterativeDump(nil, json.NewEncoder(b)) diff --git a/core/state/statedb.go b/core/state/statedb.go index 6fc489c228..3481669989 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -42,7 +42,6 @@ import ( "github.com/ava-labs/subnet-evm/predicate" "github.com/ava-labs/subnet-evm/trie" "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" @@ -68,38 +67,27 @@ func (n *proofList) Delete(key []byte) error { // StateDB structs within the ethereum protocol are used to store anything // within the merkle trie. StateDBs take care of caching and storing // nested states. It's the general query interface to retrieve: -// // * Contracts // * Accounts -// -// Once the state is committed, tries cached in stateDB (including account -// trie, storage tries) will no longer be functional. A new state instance -// must be created with new root and updated database for accessing post- -// commit states. type StateDB struct { db Database prefetcher *triePrefetcher trie Trie hasher crypto.KeccakState - snap snapshot.Snapshot // Nil if snapshot is not available // originalRoot is the pre-state root, before any changes were made. // It will be updated when the Commit is called. originalRoot common.Hash - // These maps hold the state changes (including the corresponding - // original value) that occurred in this **block**. - accounts map[common.Hash][]byte // The mutated accounts in 'slim RLP' encoding - storages map[common.Hash]map[common.Hash][]byte // The mutated slots in prefix-zero trimmed rlp format - accountsOrigin map[common.Address][]byte // The original value of mutated accounts in 'slim RLP' encoding - storagesOrigin map[common.Address]map[common.Hash][]byte // The original value of mutated slots in prefix-zero trimmed rlp format + snap snapshot.Snapshot + snapAccounts map[common.Hash][]byte + snapStorage map[common.Hash]map[common.Hash][]byte - // This map holds 'live' objects, which will get modified while processing - // a state transition. + // This map holds 'live' objects, which will get modified while processing a state transition. stateObjects map[common.Address]*stateObject - stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie - stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution - stateObjectsDestruct map[common.Address]*types.StateAccount // State objects destructed in the block along with its previous value + stateObjectsPending map[common.Address]struct{} // State objects finalized but not yet written to the trie + stateObjectsDirty map[common.Address]struct{} // State objects modified in the current execution + stateObjectsDestruct map[common.Address]struct{} // State objects destructed in the block // DB error. // State objects are used by the consensus core and VM which are @@ -113,13 +101,11 @@ type StateDB struct { // The refund counter, also used by state transitioning. refund uint64 - // The tx context and all occurred logs in the scope of transaction. thash common.Hash txIndex int logs map[common.Hash][]*types.Log logSize uint - // Preimages occurred seen by VM in the scope of block. preimages map[common.Hash][]byte // Per-transaction access list @@ -179,14 +165,10 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St db: db, trie: tr, originalRoot: root, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), stateObjects: make(map[common.Address]*stateObject), stateObjectsPending: make(map[common.Address]struct{}), stateObjectsDirty: make(map[common.Address]struct{}), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount), + stateObjectsDestruct: make(map[common.Address]struct{}), logs: make(map[common.Hash][]*types.Log), preimages: make(map[common.Hash][]byte), journal: newJournal(), @@ -200,6 +182,8 @@ func NewWithSnapshot(root common.Hash, db Database, snap snapshot.Snapshot) (*St return nil, fmt.Errorf("cannot create new statedb for root: %s, using snapshot with mismatched root: %s", root, snap.Root().Hex()) } sdb.snap = snap + sdb.snapAccounts = make(map[common.Hash][]byte) + sdb.snapStorage = make(map[common.Hash]map[common.Hash][]byte) } return sdb, nil } @@ -312,7 +296,7 @@ func (s *StateDB) AddRefund(gas uint64) { } // SubRefund removes gas from the refund counter. -// This method will set the refund counter to 0 if the gas is greater than the current refund. +// This method will panic if the refund counter goes below zero func (s *StateDB) SubRefund(gas uint64) { s.journal.append(refundChange{prev: s.refund}) if gas > s.refund { @@ -324,7 +308,7 @@ func (s *StateDB) SubRefund(gas uint64) { } // Exist reports whether the given account address exists in the state. -// Notably this also returns true for self-destructed accounts. +// Notably this also returns true for suicided accounts. func (s *StateDB) Exist(addr common.Address) bool { return s.getStateObject(addr) != nil } @@ -362,7 +346,7 @@ func (s *StateDB) TxIndex() int { func (s *StateDB) GetCode(addr common.Address) []byte { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.Code() + return stateObject.Code(s.db) } return nil } @@ -370,7 +354,7 @@ func (s *StateDB) GetCode(addr common.Address) []byte { func (s *StateDB) GetCodeSize(addr common.Address) int { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.CodeSize() + return stateObject.CodeSize(s.db) } return 0 } @@ -387,7 +371,7 @@ func (s *StateDB) GetCodeHash(addr common.Address) common.Hash { func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.GetState(hash) + return stateObject.GetState(s.db, hash) } return common.Hash{} } @@ -400,7 +384,7 @@ func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { // GetProofByHash returns the Merkle proof for a given account. func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { var proof proofList - err := s.trie.Prove(addrHash[:], &proof) + err := s.trie.Prove(addrHash[:], 0, &proof) return proof, err } @@ -414,7 +398,7 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, return nil, errors.New("storage trie for requested address does not exist") } var proof proofList - err = trie.Prove(crypto.Keccak256(key.Bytes()), &proof) + err = trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) if err != nil { return nil, err } @@ -425,7 +409,7 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, func (s *StateDB) GetCommittedState(addr common.Address, hash common.Hash) common.Hash { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.GetCommittedState(hash) + return stateObject.GetCommittedState(s.db, hash) } return common.Hash{} } @@ -444,16 +428,16 @@ func (s *StateDB) StorageTrie(addr common.Address) (Trie, error) { return nil, nil } cpy := stateObject.deepCopy(s) - if _, err := cpy.updateTrie(); err != nil { + if _, err := cpy.updateTrie(s.db); err != nil { return nil, err } - return cpy.getTrie() + return cpy.getTrie(s.db) } -func (s *StateDB) HasSelfDestructed(addr common.Address) bool { +func (s *StateDB) HasSuicided(addr common.Address) bool { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.selfDestructed + return stateObject.suicided } return false } @@ -502,59 +486,44 @@ func (s *StateDB) SetCode(addr common.Address, code []byte) { func (s *StateDB) SetState(addr common.Address, key, value common.Hash) { stateObject := s.GetOrNewStateObject(addr) if stateObject != nil { - stateObject.SetState(key, value) + stateObject.SetState(s.db, key, value) } } // SetStorage replaces the entire storage for the specified account with given -// storage. This function should only be used for debugging and the mutations -// must be discarded afterwards. +// storage. This function should only be used for debugging. func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) { // SetStorage needs to wipe existing storage. We achieve this by pretending // that the account self-destructed earlier in this block, by flagging // it in stateObjectsDestruct. The effect of doing so is that storage lookups // will not hit disk, since it is assumed that the disk-data is belonging // to a previous incarnation of the object. - // - // TODO(rjl493456442) this function should only be supported by 'unwritable' - // state and all mutations made should all be discarded afterwards. - if _, ok := s.stateObjectsDestruct[addr]; !ok { - s.stateObjectsDestruct[addr] = nil - } + s.stateObjectsDestruct[addr] = struct{}{} stateObject := s.GetOrNewStateObject(addr) for k, v := range storage { - stateObject.SetState(k, v) + stateObject.SetState(s.db, k, v) } } -// SelfDestruct marks the given account as selfdestructed. +// Suicide marks the given account as suicided. // This clears the account balance. // // The account's state object is still available until the state is committed, -// getStateObject will return a non-nil account after SelfDestruct. -func (s *StateDB) SelfDestruct(addr common.Address) { +// getStateObject will return a non-nil account after Suicide. +func (s *StateDB) Suicide(addr common.Address) bool { stateObject := s.getStateObject(addr) if stateObject == nil { - return + return false } - s.journal.append(selfDestructChange{ + s.journal.append(suicideChange{ account: &addr, - prev: stateObject.selfDestructed, + prev: stateObject.suicided, prevbalance: new(big.Int).Set(stateObject.Balance()), }) - stateObject.markSelfdestructed() + stateObject.markSuicided() stateObject.data.Balance = new(big.Int) -} - -func (s *StateDB) Selfdestruct6780(addr common.Address) { - stateObject := s.getStateObject(addr) - if stateObject == nil { - return - } - if stateObject.created { - s.SelfDestruct(addr) - } + return true } // SetTransientState sets transient storage for a given account. It @@ -599,24 +568,13 @@ func (s *StateDB) updateStateObject(obj *stateObject) { if err := s.trie.UpdateAccount(addr, &obj.data); err != nil { s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr[:], err)) } - if obj.dirtyCode { - s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code) - } - // Cache the data until commit. Note, this update mechanism is not symmetric - // to the deletion, because whereas it is enough to track account updates - // at commit time, deletions need tracking at transaction boundary level to - // ensure we capture state clearing. - s.accounts[obj.addrHash] = types.SlimAccountRLP(obj.data) - - // Track the original value of mutated account, nil means it was not present. - // Skip if it has been tracked (because updateStateObject may be called - // multiple times in a block). - if _, ok := s.accountsOrigin[obj.address]; !ok { - if obj.origin == nil { - s.accountsOrigin[obj.address] = nil - } else { - s.accountsOrigin[obj.address] = types.SlimAccountRLP(*obj.origin) - } + + // If state snapshotting is active, cache the data til commit. Note, this + // update mechanism is not symmetric to the deletion, because whereas it is + // enough to track account updates at commit time, deletions need tracking + // at transaction boundary level to ensure we capture state clearing. + if s.snap != nil { + s.snapAccounts[obj.addrHash] = snapshot.SlimAccountRLP(obj.data.Nonce, obj.data.Balance, obj.data.Root, obj.data.CodeHash) } } @@ -695,7 +653,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { } } // Insert into the live set - obj := newObject(s, addr, data) + obj := newObject(s, addr, *data) s.setStateObject(obj) return obj } @@ -717,40 +675,20 @@ func (s *StateDB) GetOrNewStateObject(addr common.Address) *stateObject { // the given address, it is overwritten and returned as the second return value. func (s *StateDB) createObject(addr common.Address) (newobj, prev *stateObject) { prev = s.getDeletedStateObject(addr) // Note, prev might have been deleted, we need that! - newobj = newObject(s, addr, nil) + + var prevdestruct bool + if prev != nil { + _, prevdestruct = s.stateObjectsDestruct[prev.address] + if !prevdestruct { + s.stateObjectsDestruct[prev.address] = struct{}{} + } + } + newobj = newObject(s, addr, types.StateAccount{}) if prev == nil { s.journal.append(createObjectChange{account: &addr}) } else { - // The original account should be marked as destructed and all cached - // account and storage data should be cleared as well. Note, it must - // be done here, otherwise the destruction event of "original account" - // will be lost. - _, prevdestruct := s.stateObjectsDestruct[prev.address] - if !prevdestruct { - s.stateObjectsDestruct[prev.address] = prev.origin - } - // There may be some cached account/storage data already since IntermediateRoot - // will be called for each transaction before byzantium fork which will always - // cache the latest account/storage data. - prevAccount, ok := s.accountsOrigin[prev.address] - s.journal.append(resetObjectChange{ - account: &addr, - prev: prev, - prevdestruct: prevdestruct, - prevAccount: s.accounts[prev.addrHash], - prevStorage: s.storages[prev.addrHash], - prevAccountOriginExist: ok, - prevAccountOrigin: prevAccount, - prevStorageOrigin: s.storagesOrigin[prev.address], - }) - delete(s.accounts, prev.addrHash) - delete(s.storages, prev.addrHash) - delete(s.accountsOrigin, prev.address) - delete(s.storagesOrigin, prev.address) + s.journal.append(resetObjectChange{prev: prev, prevdestruct: prevdestruct}) } - - newobj.created = true - s.setStateObject(newobj) if prev != nil && !prev.deleted { return newobj, prev @@ -775,23 +713,19 @@ func (s *StateDB) CreateAccount(addr common.Address) { } } -func (s *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { - so := s.getStateObject(addr) +func (db *StateDB) ForEachStorage(addr common.Address, cb func(key, value common.Hash) bool) error { + so := db.getStateObject(addr) if so == nil { return nil } - tr, err := so.getTrie() - if err != nil { - return err - } - trieIt, err := tr.NodeIterator(nil) + tr, err := so.getTrie(db.db) if err != nil { return err } - it := trie.NewIterator(trieIt) + it := trie.NewIterator(tr.NodeIterator(nil)) for it.Next() { - key := common.BytesToHash(s.trie.GetKey(it.Key)) + key := common.BytesToHash(db.trie.GetKey(it.Key)) if value, dirty := so.dirtyStorage[key]; dirty { if !cb(key, value) { return nil @@ -833,26 +767,16 @@ func (s *StateDB) Copy() *StateDB { db: s.db, trie: s.db.CopyTrie(s.trie), originalRoot: s.originalRoot, - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountsOrigin: make(map[common.Address][]byte), - storagesOrigin: make(map[common.Address]map[common.Hash][]byte), stateObjects: make(map[common.Address]*stateObject, len(s.journal.dirties)), stateObjectsPending: make(map[common.Address]struct{}, len(s.stateObjectsPending)), stateObjectsDirty: make(map[common.Address]struct{}, len(s.journal.dirties)), - stateObjectsDestruct: make(map[common.Address]*types.StateAccount, len(s.stateObjectsDestruct)), + stateObjectsDestruct: make(map[common.Address]struct{}, len(s.stateObjectsDestruct)), refund: s.refund, logs: make(map[common.Hash][]*types.Log, len(s.logs)), logSize: s.logSize, preimages: make(map[common.Hash][]byte, len(s.preimages)), journal: newJournal(), hasher: crypto.NewKeccakState(), - - // In order for the block producer to be able to use and make additions - // to the snapshot tree, we need to copy that as well. Otherwise, any - // block mined by ourselves will cause gaps in the tree, and force the - // miner to operate trie-backed only. - snap: s.snap, } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -886,18 +810,10 @@ func (s *StateDB) Copy() *StateDB { } state.stateObjectsDirty[addr] = struct{}{} } - // Deep copy the destruction markers. - for addr, value := range s.stateObjectsDestruct { - state.stateObjectsDestruct[addr] = value + // Deep copy the destruction flag. + for addr := range s.stateObjectsDestruct { + state.stateObjectsDestruct[addr] = struct{}{} } - // Deep copy the state changes made in the scope of block - // along with their original values. - state.accounts = copySet(s.accounts) - state.storages = copy2DSet(s.storages) - state.accountsOrigin = copySet(state.accountsOrigin) - state.storagesOrigin = copy2DSet(state.storagesOrigin) - - // Deep copy the logs occurred in the scope of block for hash, logs := range s.logs { cpy := make([]*types.Log, len(logs)) for i, l := range logs { @@ -906,7 +822,6 @@ func (s *StateDB) Copy() *StateDB { } state.logs[hash] = cpy } - // Deep copy the preimages occurred in the scope of block for hash, preimage := range s.preimages { state.preimages[hash] = preimage } @@ -926,6 +841,27 @@ func (s *StateDB) Copy() *StateDB { if s.prefetcher != nil { state.prefetcher = s.prefetcher.copy() } + if s.snap != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that as well. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snap = s.snap + + // deep copy needed + state.snapAccounts = make(map[common.Hash][]byte, len(s.snapAccounts)) + for k, v := range s.snapAccounts { + state.snapAccounts[k] = v + } + state.snapStorage = make(map[common.Hash]map[common.Hash][]byte, len(s.snapStorage)) + for k, v := range s.snapStorage { + temp := make(map[common.Hash][]byte, len(v)) + for kk, vv := range v { + temp[kk] = vv + } + state.snapStorage[k] = temp + } + } return state } @@ -974,26 +910,24 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { // Thus, we can safely ignore it here continue } - if obj.selfDestructed || (deleteEmptyObjects && obj.empty()) { + if obj.suicided || (deleteEmptyObjects && obj.empty()) { obj.deleted = true // We need to maintain account deletions explicitly (will remain - // set indefinitely). Note only the first occurred self-destruct - // event is tracked. - if _, ok := s.stateObjectsDestruct[obj.address]; !ok { - s.stateObjectsDestruct[obj.address] = obj.origin - } + // set indefinitely). + s.stateObjectsDestruct[obj.address] = struct{}{} + + // If state snapshotting is active, also mark the destruction there. // Note, we can't do this only at the end of a block because multiple // transactions within the same block might self destruct and then // resurrect an account; but the snapshotter needs both events. - delete(s.accounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storages, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) - delete(s.accountsOrigin, obj.address) // Clear out any previously updated account data (may be recreated via a resurrect) - delete(s.storagesOrigin, obj.address) // Clear out any previously updated storage data (may be recreated via a resurrect) + if s.snap != nil { + delete(s.snapAccounts, obj.addrHash) // Clear out any previously updated account data (may be recreated via a resurrect) + delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a resurrect) + } } else { obj.finalise(true) // Prefetch slots in the background } - obj.created = false s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} @@ -1037,7 +971,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // to pull useful data from disk. for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; !obj.deleted { - obj.updateRoot() + obj.updateRoot(s.db) } } // Now we're about to start to write changes to the trie. The trie is so far @@ -1088,156 +1022,19 @@ func (s *StateDB) clearJournalAndRefund() { s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries } -// deleteStorage iterates the storage trie belongs to the account and mark all -// slots inside as deleted. -func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) { - start := time.Now() - tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root) - if err != nil { - return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err) - } - it, err := tr.NodeIterator(nil) - if err != nil { - return false, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err) - } - var ( - set = trienode.NewNodeSet(addrHash) - slots = make(map[common.Hash][]byte) - stateSize common.StorageSize - nodeSize common.StorageSize - ) - for it.Next(true) { - // arbitrary stateSize limit, make it configurable - if stateSize+nodeSize > 512*1024*1024 { - log.Info("Skip large storage deletion", "address", addr.Hex(), "states", stateSize, "nodes", nodeSize) - if metrics.EnabledExpensive { - slotDeletionSkip.Inc(1) - } - return true, nil, nil, nil - } - if it.Leaf() { - slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob()) - stateSize += common.StorageSize(common.HashLength + len(it.LeafBlob())) - continue - } - if it.Hash() == (common.Hash{}) { - continue - } - nodeSize += common.StorageSize(len(it.Path())) - set.AddNode(it.Path(), trienode.NewDeleted()) - } - if err := it.Error(); err != nil { - return false, nil, nil, err - } - if metrics.EnabledExpensive { - if int64(len(slots)) > slotDeletionMaxCount.Value() { - slotDeletionMaxCount.Update(int64(len(slots))) - } - if int64(stateSize+nodeSize) > slotDeletionMaxSize.Value() { - slotDeletionMaxSize.Update(int64(stateSize + nodeSize)) - } - slotDeletionTimer.UpdateSince(start) - slotDeletionCount.Mark(int64(len(slots))) - slotDeletionSize.Mark(int64(stateSize + nodeSize)) - } - return false, slots, set, nil -} - -// handleDestruction processes all destruction markers and deletes the account -// and associated storage slots if necessary. There are four possible situations -// here: -// -// - the account was not existent and be marked as destructed -// -// - the account was not existent and be marked as destructed, -// however, it's resurrected later in the same block. -// -// - the account was existent and be marked as destructed -// -// - the account was existent and be marked as destructed, -// however it's resurrected later in the same block. -// -// In case (a), nothing needs be deleted, nil to nil transition can be ignored. -// -// In case (b), nothing needs be deleted, nil is used as the original value for -// newly created account and storages -// -// In case (c), **original** account along with its storages should be deleted, -// with their values be tracked as original value. -// -// In case (d), **original** account along with its storages should be deleted, -// with their values be tracked as original value. -func (s *StateDB) handleDestruction(nodes *trienode.MergedNodeSet) (map[common.Address]struct{}, error) { - incomplete := make(map[common.Address]struct{}) - for addr, prev := range s.stateObjectsDestruct { - // The original account was non-existing, and it's marked as destructed - // in the scope of block. It can be case (a) or (b). - // - for (a), skip it without doing anything. - // - for (b), track account's original value as nil. It may overwrite - // the data cached in s.accountsOrigin set by 'updateStateObject'. - addrHash := crypto.Keccak256Hash(addr[:]) - if prev == nil { - if _, ok := s.accounts[addrHash]; ok { - s.accountsOrigin[addr] = nil // case (b) - } - continue - } - // It can overwrite the data in s.accountsOrigin set by 'updateStateObject'. - s.accountsOrigin[addr] = types.SlimAccountRLP(*prev) // case (c) or (d) - - // Short circuit if the storage was empty. - if prev.Root == types.EmptyRootHash { - continue - } - // Remove storage slots belong to the account. - aborted, slots, set, err := s.deleteStorage(addr, addrHash, prev.Root) - if err != nil { - return nil, fmt.Errorf("failed to delete storage, err: %w", err) - } - // The storage is too huge to handle, skip it but mark as incomplete. - // For case (d), the account is resurrected might with a few slots - // created. In this case, wipe the entire storage state diff because - // of aborted deletion. - if aborted { - incomplete[addr] = struct{}{} - delete(s.storagesOrigin, addr) - continue - } - if s.storagesOrigin[addr] == nil { - s.storagesOrigin[addr] = slots - } else { - // It can overwrite the data in s.storagesOrigin[addrHash] set by - // 'object.updateTrie'. - for key, val := range slots { - s.storagesOrigin[addr][key] = val - } - } - if err := nodes.Merge(set); err != nil { - return nil, err - } - } - return incomplete, nil -} - // Commit writes the state to the underlying in-memory trie database. -func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) { - return s.commit(block, deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot) +func (s *StateDB) Commit(deleteEmptyObjects bool, referenceRoot bool) (common.Hash, error) { + return s.commit(deleteEmptyObjects, nil, common.Hash{}, common.Hash{}, referenceRoot) } // CommitWithSnap writes the state to the underlying in-memory trie database and // generates a snapshot layer for the newly committed state. -func (s *StateDB) CommitWithSnap(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { - return s.commit(block, deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot) +func (s *StateDB) CommitWithSnap(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { + return s.commit(deleteEmptyObjects, snaps, blockHash, parentHash, referenceRoot) } -// Once the state is committed, tries cached in stateDB (including account -// trie, storage tries) will no longer be functional. A new state instance -// must be created with new root and updated database for accessing post- -// commit states. -// -// The associated block number of the state transition is also provided -// for more chain context. -func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { +// Commit writes the state to the underlying in-memory trie database. +func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHash, parentHash common.Hash, referenceRoot bool) (common.Hash, error) { // Short circuit in case any database failure occurred earlier. if s.dbErr != nil { return common.Hash{}, fmt.Errorf("commit aborted due to earlier error: %v", s.dbErr) @@ -1254,38 +1051,37 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot. nodes = trienode.NewMergedNodeSet() codeWriter = s.db.DiskDB().NewBatch() ) - // Handle all state deletions first - incomplete, err := s.handleDestruction(nodes) - if err != nil { - return common.Hash{}, err - } - // Handle all state updates afterwards for addr := range s.stateObjectsDirty { - obj := s.stateObjects[addr] - if obj.deleted { - continue - } - // Write any contract code associated with the state object - if obj.code != nil && obj.dirtyCode { - rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) - obj.dirtyCode = false - } - // Write any storage changes in the state object to its storage trie - set, err := obj.commit() - if err != nil { - return common.Hash{}, err - } - // Merge the dirty nodes of storage trie into global set. It is possible - // that the account was destructed and then resurrected in the same block. - // In this case, the node set is shared by both accounts. - if set != nil { - if err := nodes.Merge(set); err != nil { + if obj := s.stateObjects[addr]; !obj.deleted { + // Write any contract code associated with the state object + if obj.code != nil && obj.dirtyCode { + rawdb.WriteCode(codeWriter, common.BytesToHash(obj.CodeHash()), obj.code) + obj.dirtyCode = false + } + // Write any storage changes in the state object to its storage trie + set, err := obj.commitTrie(s.db) + if err != nil { return common.Hash{}, err } - updates, deleted := set.Size() - storageTrieNodesUpdated += updates - storageTrieNodesDeleted += deleted + // Merge the dirty nodes of storage trie into global set. + if set != nil { + if err := nodes.Merge(set); err != nil { + return common.Hash{}, err + } + updates, deleted := set.Size() + storageTrieNodesUpdated += updates + storageTrieNodesDeleted += deleted + } } + // If the contract is destructed, the storage is still left in the + // database as dangling data. Theoretically it's should be wiped from + // database as well, but in hash-based-scheme it's extremely hard to + // determine that if the trie nodes are also referenced by other storage, + // and in path-based-scheme some technical challenges are still unsolved. + // Although it won't affect the correctness but please fix it TODO(rjl493456442). + } + if len(s.stateObjectsDirty) > 0 { + s.stateObjectsDirty = make(map[common.Address]struct{}) } if codeWriter.ValueSize() > 0 { if err := codeWriter.Write(); err != nil { @@ -1297,10 +1093,7 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot. if metrics.EnabledExpensive { start = time.Now() } - root, set, err := s.trie.Commit(true) - if err != nil { - return common.Hash{}, err - } + root, set := s.trie.Commit(true) // Merge the dirty nodes of account trie into global set if set != nil { if err := nodes.Merge(set); err != nil { @@ -1328,13 +1121,16 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot. if s.snap == nil { log.Error(fmt.Sprintf("cannot commit with snaps without a pre-existing snap layer, parentHash: %s, blockHash: %s", parentHash, blockHash)) } - if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.accounts, s.storages); err != nil { + if err := snaps.Update(blockHash, root, parentHash, s.convertAccountSet(s.stateObjectsDestruct), s.snapAccounts, s.snapStorage); err != nil { log.Warn("Failed to update snapshot tree", "to", root, "err", err) } if metrics.EnabledExpensive { s.SnapshotCommits += time.Since(start) } - s.snap = nil + s.snap, s.snapAccounts, s.snapStorage = nil, nil, nil + } + if len(s.stateObjectsDestruct) > 0 { + s.stateObjectsDestruct = make(map[common.Address]struct{}) } if root == (common.Hash{}) { root = types.EmptyRootHash @@ -1346,11 +1142,11 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot. if root != origin { start := time.Now() if referenceRoot { - if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil { + if err := s.db.TrieDB().UpdateAndReferenceRoot(root, origin, nodes); err != nil { return common.Hash{}, err } } else { - if err := s.db.TrieDB().Update(root, origin, block, nodes, triestate.New(s.accountsOrigin, s.storagesOrigin, incomplete)); err != nil { + if err := s.db.TrieDB().Update(root, origin, nodes); err != nil { return common.Hash{}, err } } @@ -1359,13 +1155,6 @@ func (s *StateDB) commit(block uint64, deleteEmptyObjects bool, snaps *snapshot. s.TrieDBCommits += time.Since(start) } } - // Clear all internal flags at the end of commit operation. - s.accounts = make(map[common.Hash][]byte) - s.storages = make(map[common.Hash]map[common.Hash][]byte) - s.accountsOrigin = make(map[common.Address][]byte) - s.storagesOrigin = make(map[common.Address]map[common.Hash][]byte) - s.stateObjectsDirty = make(map[common.Address]struct{}) - s.stateObjectsDestruct = make(map[common.Address]*types.StateAccount) return root, nil } @@ -1474,7 +1263,7 @@ func (s *StateDB) GetPredicateStorageSlots(address common.Address, index int) ([ } // convertAccountSet converts a provided account set from address keyed to hash keyed. -func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} { +func (s *StateDB) convertAccountSet(set map[common.Address]struct{}) map[common.Hash]struct{} { ret := make(map[common.Hash]struct{}, len(set)) for addr := range set { obj, exist := s.stateObjects[addr] @@ -1491,24 +1280,3 @@ func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) func (s *StateDB) SetPredicateStorageSlots(address common.Address, predicates [][]byte) { s.predicateStorageSlots[address] = predicates } - -// copySet returns a deep-copied set. -func copySet[k comparable](set map[k][]byte) map[k][]byte { - copied := make(map[k][]byte, len(set)) - for key, val := range set { - copied[key] = common.CopyBytes(val) - } - return copied -} - -// copy2DSet returns a two-dimensional deep-copied set. -func copy2DSet[k comparable](set map[k]map[common.Hash][]byte) map[k]map[common.Hash][]byte { - copied := make(map[k]map[common.Hash][]byte, len(set)) - for addr, subset := range set { - copied[addr] = make(map[common.Hash][]byte, len(subset)) - for key, val := range subset { - copied[addr][key] = common.CopyBytes(val) - } - } - return copied -} diff --git a/core/state/statedb_fuzz_test.go b/core/state/statedb_fuzz_test.go deleted file mode 100644 index 60c2d1df82..0000000000 --- a/core/state/statedb_fuzz_test.go +++ /dev/null @@ -1,386 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package state - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "math" - "math/big" - "math/rand" - "reflect" - "strings" - "testing" - "testing/quick" - - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" -) - -// A stateTest checks that the state changes are correctly captured. Instances -// of this test with pseudorandom content are created by Generate. -// -// The test works as follows: -// -// A list of states are created by applying actions. The state changes between -// each state instance are tracked and be verified. -type stateTest struct { - addrs []common.Address // all account addresses - actions [][]testAction // modifications to the state, grouped by block - chunk int // The number of actions per chunk - err error // failure details are reported through this field -} - -// newStateTestAction creates a random action that changes state. -func newStateTestAction(addr common.Address, r *rand.Rand, index int) testAction { - actions := []testAction{ - { - name: "SetBalance", - fn: func(a testAction, s *StateDB) { - s.SetBalance(addr, big.NewInt(a.args[0])) - }, - args: make([]int64, 1), - }, - { - name: "SetNonce", - fn: func(a testAction, s *StateDB) { - s.SetNonce(addr, uint64(a.args[0])) - }, - args: make([]int64, 1), - }, - { - name: "SetState", - fn: func(a testAction, s *StateDB) { - var key, val common.Hash - binary.BigEndian.PutUint16(key[:], uint16(a.args[0])) - binary.BigEndian.PutUint16(val[:], uint16(a.args[1])) - s.SetState(addr, key, val) - }, - args: make([]int64, 2), - }, - { - name: "SetCode", - fn: func(a testAction, s *StateDB) { - code := make([]byte, 16) - binary.BigEndian.PutUint64(code, uint64(a.args[0])) - binary.BigEndian.PutUint64(code[8:], uint64(a.args[1])) - s.SetCode(addr, code) - }, - args: make([]int64, 2), - }, - { - name: "CreateAccount", - fn: func(a testAction, s *StateDB) { - s.CreateAccount(addr) - }, - }, - { - name: "Selfdestruct", - fn: func(a testAction, s *StateDB) { - s.SelfDestruct(addr) - }, - }, - } - var nonRandom = index != -1 - if index == -1 { - index = r.Intn(len(actions)) - } - action := actions[index] - var names []string - if !action.noAddr { - names = append(names, addr.Hex()) - } - for i := range action.args { - if nonRandom { - action.args[i] = rand.Int63n(10000) + 1 // set balance to non-zero - } else { - action.args[i] = rand.Int63n(10000) - } - names = append(names, fmt.Sprint(action.args[i])) - } - action.name += " " + strings.Join(names, ", ") - return action -} - -// Generate returns a new snapshot test of the given size. All randomness is -// derived from r. -func (*stateTest) Generate(r *rand.Rand, size int) reflect.Value { - addrs := make([]common.Address, 5) - for i := range addrs { - addrs[i][0] = byte(i) - } - actions := make([][]testAction, rand.Intn(5)+1) - - for i := 0; i < len(actions); i++ { - actions[i] = make([]testAction, size) - for j := range actions[i] { - if j == 0 { - // Always include a set balance action to make sure - // the state changes are not empty. - actions[i][j] = newStateTestAction(common.HexToAddress("0xdeadbeef"), r, 0) - continue - } - actions[i][j] = newStateTestAction(addrs[r.Intn(len(addrs))], r, -1) - } - } - chunk := int(math.Sqrt(float64(size))) - if size > 0 && chunk == 0 { - chunk = 1 - } - return reflect.ValueOf(&stateTest{ - addrs: addrs, - actions: actions, - chunk: chunk, - }) -} - -func (test *stateTest) String() string { - out := new(bytes.Buffer) - for i, actions := range test.actions { - fmt.Fprintf(out, "---- block %d ----\n", i) - for j, action := range actions { - if j%test.chunk == 0 { - fmt.Fprintf(out, "---- transaction %d ----\n", j/test.chunk) - } - fmt.Fprintf(out, "%4d: %s\n", j%test.chunk, action.name) - } - } - return out.String() -} - -func (test *stateTest) run() bool { - var ( - roots []common.Hash - accountList []map[common.Address][]byte - storageList []map[common.Address]map[common.Hash][]byte - onCommit = func(states *triestate.Set) { - accountList = append(accountList, copySet(states.Accounts)) - storageList = append(storageList, copy2DSet(states.Storages)) - } - disk = rawdb.NewMemoryDatabase() - tdb = trie.NewDatabaseWithConfig(disk, &trie.Config{OnCommit: onCommit}) - sdb = NewDatabaseWithNodeDB(disk, tdb) - byzantium = rand.Intn(2) == 0 - ) - for i, actions := range test.actions { - root := types.EmptyRootHash - if i != 0 { - root = roots[len(roots)-1] - } - state, err := New(root, sdb, nil) - if err != nil { - panic(err) - } - for i, action := range actions { - if i%test.chunk == 0 && i != 0 { - if byzantium { - state.Finalise(true) // call finalise at the transaction boundary - } else { - state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary - } - } - action.fn(action, state) - } - if byzantium { - state.Finalise(true) // call finalise at the transaction boundary - } else { - state.IntermediateRoot(true) // call intermediateRoot at the transaction boundary - } - nroot, err := state.Commit(0, true, false) // call commit at the block boundary - if err != nil { - panic(err) - } - if nroot == root { - return true // filter out non-change state transition - } - roots = append(roots, nroot) - } - for i := 0; i < len(test.actions); i++ { - root := types.EmptyRootHash - if i != 0 { - root = roots[i-1] - } - test.err = test.verify(root, roots[i], tdb, accountList[i], storageList[i]) - if test.err != nil { - return false - } - } - return true -} - -// verifyAccountCreation this function is called once the state diff says that -// specific account was not present. A serial of checks will be performed to -// ensure the state diff is correct, includes: -// -// - the account was indeed not present in trie -// - the account is present in new trie, nil->nil is regarded as invalid -// - the slots transition is correct -func (test *stateTest) verifyAccountCreation(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, slots map[common.Hash][]byte) error { - // Verify account change - addrHash := crypto.Keccak256Hash(addr.Bytes()) - oBlob, err := otr.Get(addrHash.Bytes()) - if err != nil { - return err - } - nBlob, err := ntr.Get(addrHash.Bytes()) - if err != nil { - return err - } - if len(oBlob) != 0 { - return fmt.Errorf("unexpected account in old trie, %x", addrHash) - } - if len(nBlob) == 0 { - return fmt.Errorf("missing account in new trie, %x", addrHash) - } - - // Verify storage changes - var nAcct types.StateAccount - if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil { - return err - } - // Account has no slot, empty slot set is expected - if nAcct.Root == types.EmptyRootHash { - if len(slots) != 0 { - return fmt.Errorf("unexpected slot changes %x", addrHash) - } - return nil - } - // Account has slots, ensure all new slots are contained - st, err := trie.New(trie.StorageTrieID(next, addrHash, nAcct.Root), db) - if err != nil { - return err - } - for key, val := range slots { - st.Update(key.Bytes(), val) - } - if st.Hash() != types.EmptyRootHash { - return errors.New("invalid slot changes") - } - return nil -} - -// verifyAccountUpdate this function is called once the state diff says that -// specific account was present. A serial of checks will be performed to -// ensure the state diff is correct, includes: -// -// - the account was indeed present in trie -// - the account in old trie matches the provided value -// - the slots transition is correct -func (test *stateTest) verifyAccountUpdate(next common.Hash, db *trie.Database, otr, ntr *trie.Trie, addr common.Address, origin []byte, slots map[common.Hash][]byte) error { - // Verify account change - addrHash := crypto.Keccak256Hash(addr.Bytes()) - oBlob, err := otr.Get(addrHash.Bytes()) - if err != nil { - return err - } - nBlob, err := ntr.Get(addrHash.Bytes()) - if err != nil { - return err - } - if len(oBlob) == 0 { - return fmt.Errorf("missing account in old trie, %x", addrHash) - } - full, err := types.FullAccountRLP(origin) - if err != nil { - return err - } - if !bytes.Equal(full, oBlob) { - return fmt.Errorf("account value is not matched, %x", addrHash) - } - - // Decode accounts - var ( - oAcct types.StateAccount - nAcct types.StateAccount - nRoot common.Hash - ) - if err := rlp.DecodeBytes(oBlob, &oAcct); err != nil { - return err - } - if len(nBlob) == 0 { - nRoot = types.EmptyRootHash - } else { - if err := rlp.DecodeBytes(nBlob, &nAcct); err != nil { - return err - } - nRoot = nAcct.Root - } - - // Verify storage - st, err := trie.New(trie.StorageTrieID(next, addrHash, nRoot), db) - if err != nil { - return err - } - for key, val := range slots { - st.Update(key.Bytes(), val) - } - if st.Hash() != oAcct.Root { - return errors.New("invalid slot changes") - } - return nil -} - -func (test *stateTest) verify(root common.Hash, next common.Hash, db *trie.Database, accountsOrigin map[common.Address][]byte, storagesOrigin map[common.Address]map[common.Hash][]byte) error { - otr, err := trie.New(trie.StateTrieID(root), db) - if err != nil { - return err - } - ntr, err := trie.New(trie.StateTrieID(next), db) - if err != nil { - return err - } - for addr, account := range accountsOrigin { - var err error - if len(account) == 0 { - err = test.verifyAccountCreation(next, db, otr, ntr, addr, storagesOrigin[addr]) - } else { - err = test.verifyAccountUpdate(next, db, otr, ntr, addr, accountsOrigin[addr], storagesOrigin[addr]) - } - if err != nil { - return err - } - } - return nil -} - -func TestStateChanges(t *testing.T) { - config := &quick.Config{MaxCount: 1000} - err := quick.Check((*stateTest).run, config) - if cerr, ok := err.(*quick.CheckError); ok { - test := cerr.In[0].(*stateTest) - t.Errorf("%v:\n%s", test.err, test) - } else if err != nil { - t.Error(err) - } -} diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index b999a6274a..c1240ba46e 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -29,7 +29,6 @@ package state import ( "bytes" "encoding/binary" - "errors" "fmt" "math" "math/big" @@ -41,11 +40,8 @@ import ( "testing/quick" "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/state/snapshot" "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" ) // Tests that updating a state trie does not leak any database writes prior to @@ -116,7 +112,7 @@ func TestIntermediateLeaks(t *testing.T) { } // Commit and cross check the databases. - transRoot, err := transState.Commit(0, false, false) + transRoot, err := transState.Commit(false, false) if err != nil { t.Fatalf("failed to commit transition state: %v", err) } @@ -124,7 +120,7 @@ func TestIntermediateLeaks(t *testing.T) { t.Errorf("can not commit trie %v to persistent database", transRoot.Hex()) } - finalRoot, err := finalState.Commit(0, false, false) + finalRoot, err := finalState.Commit(false, false) if err != nil { t.Fatalf("failed to commit final state: %v", err) } @@ -311,9 +307,9 @@ func newTestAction(addr common.Address, r *rand.Rand) testAction { }, }, { - name: "SelfDestruct", + name: "Suicide", fn: func(a testAction, s *StateDB) { - s.SelfDestruct(addr) + s.Suicide(addr) }, }, { @@ -463,7 +459,7 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { } // Check basic accessor methods. checkeq("Exist", state.Exist(addr), checkstate.Exist(addr)) - checkeq("HasSelfdestructed", state.HasSelfDestructed(addr), checkstate.HasSelfDestructed(addr)) + checkeq("HasSuicided", state.HasSuicided(addr), checkstate.HasSuicided(addr)) checkeq("GetBalance", state.GetBalance(addr), checkstate.GetBalance(addr)) checkeq("GetNonce", state.GetNonce(addr), checkstate.GetNonce(addr)) checkeq("GetCode", state.GetCode(addr), checkstate.GetCode(addr)) @@ -495,9 +491,9 @@ func (test *snapshotTest) checkEqual(state, checkstate *StateDB) error { } func TestTouchDelete(t *testing.T) { - s := newStateEnv() + s := newStateTest() s.state.GetOrNewStateObject(common.Address{}) - root, _ := s.state.Commit(0, false, false) + root, _ := s.state.Commit(false, false) s.state, _ = NewWithSnapshot(root, s.state.db, s.state.snap) snapshot := s.state.Snapshot() @@ -532,8 +528,7 @@ func TestCopyOfCopy(t *testing.T) { // // See https://github.com/ethereum/go-ethereum/issues/20106. func TestCopyCommitCopy(t *testing.T) { - tdb := NewDatabase(rawdb.NewMemoryDatabase()) - state, _ := New(types.EmptyRootHash, tdb, nil) + state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) // Create an account and check if the retrieved balance is correct addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") @@ -570,6 +565,20 @@ func TestCopyCommitCopy(t *testing.T) { if val := copyOne.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("first copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } + + copyOne.Commit(false, false) + if balance := copyOne.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { + t.Fatalf("first copy post-commit balance mismatch: have %v, want %v", balance, 42) + } + if code := copyOne.GetCode(addr); !bytes.Equal(code, []byte("hello")) { + t.Fatalf("first copy post-commit code mismatch: have %x, want %x", code, []byte("hello")) + } + if val := copyOne.GetState(addr, skey); val != sval { + t.Fatalf("first copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) + } + if val := copyOne.GetCommittedState(addr, skey); val != sval { + t.Fatalf("first copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) + } // Copy the copy and check the balance once more copyTwo := copyOne.Copy() if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { @@ -581,23 +590,8 @@ func TestCopyCommitCopy(t *testing.T) { if val := copyTwo.GetState(addr, skey); val != sval { t.Fatalf("second copy non-committed storage slot mismatch: have %x, want %x", val, sval) } - if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) { - t.Fatalf("second copy committed storage slot mismatch: have %x, want %x", val, sval) - } - // Commit state, ensure states can be loaded from disk - root, _ := state.Commit(0, false, false) - state, _ = New(root, tdb, nil) - if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { - t.Fatalf("state post-commit balance mismatch: have %v, want %v", balance, 42) - } - if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) { - t.Fatalf("state post-commit code mismatch: have %x, want %x", code, []byte("hello")) - } - if val := state.GetState(addr, skey); val != sval { - t.Fatalf("state post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) - } - if val := state.GetCommittedState(addr, skey); val != sval { - t.Fatalf("state post-commit committed storage slot mismatch: have %x, want %x", val, sval) + if val := copyTwo.GetCommittedState(addr, skey); val != sval { + t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) } } @@ -657,6 +651,19 @@ func TestCopyCopyCommitCopy(t *testing.T) { if val := copyTwo.GetCommittedState(addr, skey); val != (common.Hash{}) { t.Fatalf("second copy pre-commit committed storage slot mismatch: have %x, want %x", val, common.Hash{}) } + copyTwo.Commit(false, false) + if balance := copyTwo.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { + t.Fatalf("second copy post-commit balance mismatch: have %v, want %v", balance, 42) + } + if code := copyTwo.GetCode(addr); !bytes.Equal(code, []byte("hello")) { + t.Fatalf("second copy post-commit code mismatch: have %x, want %x", code, []byte("hello")) + } + if val := copyTwo.GetState(addr, skey); val != sval { + t.Fatalf("second copy post-commit non-committed storage slot mismatch: have %x, want %x", val, sval) + } + if val := copyTwo.GetCommittedState(addr, skey); val != sval { + t.Fatalf("second copy post-commit committed storage slot mismatch: have %x, want %x", val, sval) + } // Copy the copy-copy and check the balance once more copyThree := copyTwo.Copy() if balance := copyThree.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { @@ -668,56 +675,11 @@ func TestCopyCopyCommitCopy(t *testing.T) { if val := copyThree.GetState(addr, skey); val != sval { t.Fatalf("third copy non-committed storage slot mismatch: have %x, want %x", val, sval) } - if val := copyThree.GetCommittedState(addr, skey); val != (common.Hash{}) { + if val := copyThree.GetCommittedState(addr, skey); val != sval { t.Fatalf("third copy committed storage slot mismatch: have %x, want %x", val, sval) } } -// TestCommitCopy tests the copy from a committed state is not functional. -func TestCommitCopy(t *testing.T) { - state, _ := New(types.EmptyRootHash, NewDatabase(rawdb.NewMemoryDatabase()), nil) - - // Create an account and check if the retrieved balance is correct - addr := common.HexToAddress("0xaffeaffeaffeaffeaffeaffeaffeaffeaffeaffe") - skey := common.HexToHash("aaa") - sval := common.HexToHash("bbb") - - state.SetBalance(addr, big.NewInt(42)) // Change the account trie - state.SetCode(addr, []byte("hello")) // Change an external metadata - state.SetState(addr, skey, sval) // Change the storage trie - - if balance := state.GetBalance(addr); balance.Cmp(big.NewInt(42)) != 0 { - t.Fatalf("initial balance mismatch: have %v, want %v", balance, 42) - } - if code := state.GetCode(addr); !bytes.Equal(code, []byte("hello")) { - t.Fatalf("initial code mismatch: have %x, want %x", code, []byte("hello")) - } - if val := state.GetState(addr, skey); val != sval { - t.Fatalf("initial non-committed storage slot mismatch: have %x, want %x", val, sval) - } - if val := state.GetCommittedState(addr, skey); val != (common.Hash{}) { - t.Fatalf("initial committed storage slot mismatch: have %x, want %x", val, common.Hash{}) - } - // Copy the committed state database, the copied one is not functional. - state.Commit(0, true, false) - copied := state.Copy() - if balance := copied.GetBalance(addr); balance.Cmp(big.NewInt(0)) != 0 { - t.Fatalf("unexpected balance: have %v", balance) - } - if code := copied.GetCode(addr); code != nil { - t.Fatalf("unexpected code: have %x", code) - } - if val := copied.GetState(addr, skey); val != (common.Hash{}) { - t.Fatalf("unexpected storage slot: have %x", val) - } - if val := copied.GetCommittedState(addr, skey); val != (common.Hash{}) { - t.Fatalf("unexpected storage slot: have %x", val) - } - if !errors.Is(copied.Error(), trie.ErrCommitted) { - t.Fatalf("unexpected state error, %v", copied.Error()) - } -} - // TestDeleteCreateRevert tests a weird state transition corner case that we hit // while changing the internals of StateDB. The workflow is that a contract is // self-destructed, then in a follow-up transaction (but same block) it's created @@ -733,11 +695,11 @@ func TestDeleteCreateRevert(t *testing.T) { addr := common.BytesToAddress([]byte("so")) state.SetBalance(addr, big.NewInt(1)) - root, _ := state.Commit(0, false, false) + root, _ := state.Commit(false, false) state, _ = NewWithSnapshot(root, state.db, state.snap) // Simulate self-destructing in one transaction, then create-reverting in another - state.SelfDestruct(addr) + state.Suicide(addr) state.Finalise(true) id := state.Snapshot() @@ -745,7 +707,7 @@ func TestDeleteCreateRevert(t *testing.T) { state.RevertToSnapshot(id) // Commit the entire state and make sure we don't crash and have the correct state - root, _ = state.Commit(0, true, false) + root, _ = state.Commit(true, false) state, _ = NewWithSnapshot(root, state.db, state.snap) if state.getStateObject(addr) != nil { @@ -769,7 +731,7 @@ func TestMissingTrieNodes(t *testing.T) { a2 := common.BytesToAddress([]byte("another")) state.SetBalance(a2, big.NewInt(100)) state.SetCode(a2, []byte{1, 2, 4}) - root, _ = state.Commit(0, false, false) + root, _ = state.Commit(false, false) t.Logf("root: %x", root) // force-flush state.Database().TrieDB().Cap(0) @@ -793,7 +755,7 @@ func TestMissingTrieNodes(t *testing.T) { } // Modify the state state.SetBalance(addr, big.NewInt(2)) - root, err := state.Commit(0, false, false) + root, err := state.Commit(false, false) if err == nil { t.Fatalf("expected error, got root :%x", root) } @@ -988,7 +950,7 @@ func TestFlushOrderDataLoss(t *testing.T) { state.SetState(common.Address{a}, common.Hash{a, s}, common.Hash{a, s}) } } - root, err := state.Commit(0, false, false) + root, err := state.Commit(false, false) if err != nil { t.Fatalf("failed to commit state trie: %v", err) } @@ -1046,37 +1008,3 @@ func TestStateDBTransientStorage(t *testing.T) { t.Fatalf("transient storage mismatch: have %x, want %x", got, value) } } - -func TestResetObject(t *testing.T) { - var ( - disk = rawdb.NewMemoryDatabase() - tdb = trie.NewDatabase(disk) - db = NewDatabaseWithNodeDB(disk, tdb) - snaps, _ = snapshot.New(snapshot.Config{CacheSize: 10}, disk, tdb, common.Hash{}, types.EmptyRootHash) - state, _ = New(types.EmptyRootHash, db, snaps) - addr = common.HexToAddress("0x1") - slotA = common.HexToHash("0x1") - slotB = common.HexToHash("0x2") - ) - // Initialize account with balance and storage in first transaction. - state.SetBalance(addr, big.NewInt(1)) - state.SetState(addr, slotA, common.BytesToHash([]byte{0x1})) - state.IntermediateRoot(true) - - // Reset account and mutate balance and storages - state.CreateAccount(addr) - state.SetBalance(addr, big.NewInt(2)) - state.SetState(addr, slotB, common.BytesToHash([]byte{0x2})) - root, _ := state.CommitWithSnap(0, true, snaps, common.Hash{}, common.Hash{}, false) - - // Ensure the original account is wiped properly - snap := snaps.Snapshot(root) - slot, _ := snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotA.Bytes())) - if len(slot) != 0 { - t.Fatalf("Unexpected storage slot") - } - slot, _ = snap.Storage(crypto.Keccak256Hash(addr.Bytes()), crypto.Keccak256Hash(slotB.Bytes())) - if !bytes.Equal(slot, []byte{0x2}) { - t.Fatalf("Unexpected storage slot value %v", slot) - } -} diff --git a/core/state/sync_test.go b/core/state/sync_test.go index 96912962fc..5a83a9ac91 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -21,7 +21,6 @@ import ( "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" @@ -39,7 +38,7 @@ type testAccount struct { func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { // Create an empty state db := rawdb.NewMemoryDatabase() - sdb := NewDatabaseWithConfig(db, &trie.Config{Preimages: true}) + sdb := NewDatabase(db) state, _ := New(types.EmptyRootHash, sdb, nil) // Fill it with some arbitrary data @@ -61,13 +60,13 @@ func makeTestState() (ethdb.Database, Database, common.Hash, []*testAccount) { if i%5 == 0 { for j := byte(0); j < 5; j++ { hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}) - obj.SetState(hash, hash) + obj.SetState(sdb, hash, hash) } } state.updateStateObject(obj) accounts = append(accounts, acc) } - root, _ := state.Commit(0, false, false) + root, _ := state.Commit(false, false) // Return the generated state return db, sdb, root, accounts diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go index 3a3c29baa3..7c65fd12c4 100644 --- a/core/state/trie_prefetcher.go +++ b/core/state/trie_prefetcher.go @@ -424,7 +424,7 @@ func newTrieOrchestrator(sf *subfetcher) *trieOrchestrator { return nil } } else { - base, err = sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root) + base, err = sf.db.OpenStorageTrie(sf.state, sf.owner, sf.root) if err != nil { log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) return nil diff --git a/core/state_processor.go b/core/state_processor.go index b0c308a3e3..240764da38 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -169,7 +169,7 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, blockContext return nil, err } // Create a new context to be used in the EVM environment - vmenv := vm.NewEVM(blockContext, vm.TxContext{BlobHashes: tx.BlobHashes()}, statedb, config, cfg) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } @@ -190,7 +190,7 @@ func ApplyPrecompileActivations(c *params.ChainConfig, parentTimestamp *uint64, // (or deconfigure it if it is being disabled.) if activatingConfig.IsDisabled() { log.Info("Disabling precompile", "name", module.ConfigKey) - statedb.SelfDestruct(module.Address) + statedb.Suicide(module.Address) // Calling Finalise here effectively commits Suicide call and wipes the contract state. // This enables re-configuration of the same contract state in the same block. // Without an immediate Finalise call after the Suicide, a reconfigured precompiled state can be wiped out diff --git a/core/state_processor_test.go b/core/state_processor_test.go index a0645bdba2..d3776e7288 100644 --- a/core/state_processor_test.go +++ b/core/state_processor_test.go @@ -27,13 +27,11 @@ package core import ( - "crypto/ecdsa" "math/big" "testing" "github.com/ava-labs/subnet-evm/consensus" "github.com/ava-labs/subnet-evm/consensus/dummy" - "github.com/ava-labs/subnet-evm/consensus/misc/eip4844" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/core/vm" @@ -43,10 +41,45 @@ import ( "github.com/ava-labs/subnet-evm/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/holiman/uint256" "golang.org/x/crypto/sha3" ) +var ( + cpcfg = *params.TestChainConfig + config = &cpcfg + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") +) + +func makeTx(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, testKey) + return tx +} + +func mkDynamicTx(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction { + tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ + Nonce: nonce, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Gas: gasLimit, + To: &to, + Value: big.NewInt(0), + }), signer, testKey) + return tx +} + +func mkDynamicCreationTx(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, data []byte) *types.Transaction { + tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ + Nonce: nonce, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Gas: gasLimit, + Value: big.NewInt(0), + Data: data, + }), signer, testKey) + return tx +} + func u64(val uint64) *uint64 { return &val } // TestStateProcessorErrors tests the output from the 'core' errors @@ -54,57 +87,7 @@ func u64(val uint64) *uint64 { return &val } // blockchain imports bad blocks, meaning blocks which have valid headers but // contain invalid transactions func TestStateProcessorErrors(t *testing.T) { - cpcfg := *params.TestChainConfig - config := &cpcfg - config.CancunTime = u64(0) config.FeeConfig.MinBaseFee = big.NewInt(params.TestMaxBaseFee) - - var ( - signer = types.LatestSigner(config) - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - ) - var makeTx = func(key *ecdsa.PrivateKey, nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, key) - return tx - } - var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction { - tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Gas: gasLimit, - To: &to, - Value: big.NewInt(0), - }), signer, key1) - return tx - } - var mkDynamicCreationTx = func(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, data []byte) *types.Transaction { - tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Gas: gasLimit, - Value: big.NewInt(0), - Data: data, - }), signer, key1) - return tx - } - var mkBlobTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, hashes []common.Hash) *types.Transaction { - tx, err := types.SignTx(types.NewTx(&types.BlobTx{ - Nonce: nonce, - GasTipCap: uint256.MustFromBig(gasTipCap), - GasFeeCap: uint256.MustFromBig(gasFeeCap), - Gas: gasLimit, - To: to, - BlobHashes: hashes, - Value: new(uint256.Int), - }), signer, key1) - if err != nil { - t.Fatal(err) - } - return tx - } - { // Tests against a 'recent' chain definition var ( db = rawdb.NewMemoryDatabase() @@ -118,10 +101,8 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.TestChainConfig.FeeConfig.GasLimit.Uint64(), } - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) - tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) ) - defer blockchain.Stop() bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) tooBigNumber := new(big.Int).Set(bigNumber) @@ -132,32 +113,32 @@ func TestStateProcessorErrors(t *testing.T) { }{ { // ErrNonceTooLow txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), }, want: "could not apply tx 1 [0x734d821c990099c6ae42d78072aadd3931c35328cf03ef4cf5b2a4ac9c398522]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1", }, { // ErrNonceTooHigh txs: []*types.Transaction{ - makeTx(key1, 100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), + makeTx(100, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(225000000000), nil), }, want: "could not apply tx 0 [0x0df36254cfbef8ed6961b38fc68aecc777177166144c8a56bc8919e23a559bf4]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0", }, { // ErrGasLimitReached txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), 15000001, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), 15000001, big.NewInt(225000000000), nil), }, want: "could not apply tx 0 [0x1354370681d2ab68247073d889736f8be4a8d87e35956f0c02658d3670803a66]: gas limit reached", }, { // ErrInsufficientFundsForTransfer txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(4000000000000000000), params.TxGas, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(4000000000000000000), params.TxGas, big.NewInt(225000000000), nil), }, want: "could not apply tx 0 [0x1632f2bffcce84a5c91dd8ab2016128fccdbcfbe0485d2c67457e1c793c72a4b]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 4004725000000000000", }, { // ErrInsufficientFunds txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil), }, want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 18900000000000000000000", }, @@ -167,13 +148,13 @@ func TestStateProcessorErrors(t *testing.T) { // multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment { // ErrIntrinsicGas txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(225000000000), nil), }, want: "could not apply tx 0 [0x2fc3e3b5cc26917d413e26983fe189475f47d4f0757e32aaa5561fcb9c9dc432]: intrinsic gas too low: have 20000, want 21000", }, { // ErrGasLimitReached txs: []*types.Transaction{ - makeTx(key1, 0, common.Address{}, big.NewInt(0), params.TxGas*762, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas*762, big.NewInt(225000000000), nil), }, want: "could not apply tx 0 [0x76c07cc2b32007eb1a9c3fa066d579a3d77ec4ecb79bbc266624a601d7b08e46]: gas limit reached", }, @@ -218,24 +199,6 @@ func TestStateProcessorErrors(t *testing.T) { }, want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000", }, - { // ErrMaxInitCodeSizeExceeded - txs: []*types.Transaction{ - mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.TestInitialBaseFee), tooBigInitCode[:]), - }, - want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152", - }, - { // ErrIntrinsicGas: Not enough gas to cover init code - txs: []*types.Transaction{ - mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.TestInitialBaseFee), make([]byte, 320)), - }, - want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300", - }, - { // ErrBlobFeeCapTooLow - txs: []*types.Transaction{ - mkBlobTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(1), []common.Hash{(common.Hash{1})}), - }, - want: "could not apply tx 0 [0x6c11015985ce82db691d7b2d017acda296db88b811c3c60dc71449c76256c716]: max fee per gas less than block base fee: address 0x71562b71999873DB5b286dF957af199Ec94617F7, maxFeePerGas: 1 baseFee: 225000000000", - }, } { block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) @@ -338,6 +301,69 @@ func TestStateProcessorErrors(t *testing.T) { } } } + + // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (Durango/EIP-3860) enabled. + { + var ( + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + Config: ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + MandatoryNetworkUpgrades: params.MandatoryNetworkUpgrades{ + SubnetEVMTimestamp: utils.NewUint64(0), + DurangoTimestamp: utils.NewUint64(0), + }, + FeeConfig: params.DefaultFeeConfig, + }, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + }, + GasLimit: params.DefaultFeeConfig.GasLimit.Uint64(), + } + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, common.Hash{}, false) + tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} + smallInitCode = [320]byte{} + ) + defer blockchain.Stop() + for i, tt := range []struct { + txs []*types.Transaction + want string + }{ + { // ErrMaxInitCodeSizeExceeded + txs: []*types.Transaction{ + mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.TestInitialBaseFee), tooBigInitCode[:]), + }, + want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152", + }, + { // ErrIntrinsicGas: Not enough gas to cover init code + txs: []*types.Transaction{ + mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.TestInitialBaseFee), smallInitCode[:]), + }, + want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300", + }, + } { + block := GenerateBadBlock(gspec.ToBlock(), dummy.NewCoinbaseFaker(), tt.txs, gspec.Config) + _, err := blockchain.InsertChain(types.Blocks{block}) + if err == nil { + t.Fatal("block imported without errors") + } + if have, want := err.Error(), tt.want; have != want { + t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) + } + } + } } // TestBadTxAllowListBlock tests the output generated when the @@ -449,7 +475,6 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr hasher := sha3.NewLegacyKeccak256() hasher.Write(header.Number.Bytes()) var cumulativeGas uint64 - var nBlobs int for _, tx := range txs { txh := tx.Hash() hasher.Write(txh[:]) @@ -458,20 +483,8 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr receipt.GasUsed = tx.Gas() receipts = append(receipts, receipt) cumulativeGas += tx.Gas() - nBlobs += len(tx.BlobHashes()) } header.Root = common.BytesToHash(hasher.Sum(nil)) - if config.IsCancun(header.Number, header.Time) { - var pExcess, pUsed = uint64(0), uint64(0) - if parent.ExcessBlobGas() != nil { - pExcess = *parent.ExcessBlobGas() - pUsed = *parent.BlobGasUsed() - } - excess := eip4844.CalcExcessBlobGas(pExcess, pUsed) - used := uint64(nBlobs * params.BlobTxBlobGasPerBlob) - header.ExcessBlobGas = &excess - header.BlobGasUsed = &used - } // Assemble and return the final block for sealing return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) } diff --git a/core/state_transition.go b/core/state_transition.go index 80c2013056..74b17f53e7 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -27,12 +27,10 @@ package core import ( - "errors" "fmt" "math" "math/big" - "github.com/ava-labs/subnet-evm/consensus/misc/eip4844" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/core/vm" "github.com/ava-labs/subnet-evm/params" @@ -186,18 +184,16 @@ func toWordSize(size uint64) uint64 { // A Message contains the data derived from a single transaction that is relevant to state // processing. type Message struct { - To *common.Address - From common.Address - Nonce uint64 - Value *big.Int - GasLimit uint64 - GasPrice *big.Int - GasFeeCap *big.Int - GasTipCap *big.Int - Data []byte - AccessList types.AccessList - BlobGasFeeCap *big.Int - BlobHashes []common.Hash + To *common.Address + From common.Address + Nonce uint64 + Value *big.Int + GasLimit uint64 + GasPrice *big.Int + GasFeeCap *big.Int + GasTipCap *big.Int + Data []byte + AccessList types.AccessList // When SkipAccountChecks is true, the message nonce is not checked against the // account nonce in state. It also disables checking that the sender is an EOA. @@ -218,8 +214,6 @@ func TransactionToMessage(tx *types.Transaction, s types.Signer, baseFee *big.In Data: tx.Data(), AccessList: tx.AccessList(), SkipAccountChecks: false, - BlobHashes: tx.BlobHashes(), - BlobGasFeeCap: tx.BlobGasFeeCap(), } // If baseFee provided, set gasPrice to effectiveGasPrice. if baseFee != nil { @@ -293,24 +287,12 @@ func (st *StateTransition) to() common.Address { func (st *StateTransition) buyGas() error { mgval := new(big.Int).SetUint64(st.msg.GasLimit) mgval = mgval.Mul(mgval, st.msg.GasPrice) - balanceCheck := new(big.Int).Set(mgval) + balanceCheck := mgval if st.msg.GasFeeCap != nil { - balanceCheck.SetUint64(st.msg.GasLimit) - balanceCheck = balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap) + balanceCheck = new(big.Int).SetUint64(st.msg.GasLimit) + balanceCheck.Mul(balanceCheck, st.msg.GasFeeCap) balanceCheck.Add(balanceCheck, st.msg.Value) } - if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { - if blobGas := st.blobGasUsed(); blobGas > 0 { - // Check that the user has enough funds to cover blobGasUsed * tx.BlobGasFeeCap - blobBalanceCheck := new(big.Int).SetUint64(blobGas) - blobBalanceCheck.Mul(blobBalanceCheck, st.msg.BlobGasFeeCap) - balanceCheck.Add(balanceCheck, blobBalanceCheck) - // Pay for blobGasUsed * actual blob fee - blobFee := new(big.Int).SetUint64(blobGas) - blobFee.Mul(blobFee, eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas)) - mgval.Add(mgval, blobFee) - } - } if have, want := st.state.GetBalance(st.msg.From), balanceCheck; have.Cmp(want) < 0 { return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From.Hex(), have, want) } @@ -384,29 +366,6 @@ func (st *StateTransition) preCheck() error { } } } - // Check the blob version validity - if msg.BlobHashes != nil { - if len(msg.BlobHashes) == 0 { - return errors.New("blob transaction missing blob hashes") - } - for i, hash := range msg.BlobHashes { - if hash[0] != params.BlobTxHashVersion { - return fmt.Errorf("blob %d hash version mismatch (have %d, supported %d)", - i, hash[0], params.BlobTxHashVersion) - } - } - } - - if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time) { - if st.blobGasUsed() > 0 { - // Check that the user is paying at least the current blob fee - blobFee := eip4844.CalcBlobFee(*st.evm.Context.ExcessBlobGas) - if st.msg.BlobGasFeeCap.Cmp(blobFee) < 0 { - return fmt.Errorf("%w: address %v have %v want %v", ErrBlobFeeCapTooLow, st.msg.From.Hex(), st.msg.BlobGasFeeCap, blobFee) - } - } - } - return st.buyGas() } @@ -447,7 +406,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { var ( msg = st.msg sender = vm.AccountRef(msg.From) - rules = st.evm.ChainConfig().Rules(st.evm.Context.BlockNumber, st.evm.Context.Time) + rules = st.evm.ChainConfig().AvalancheRules(st.evm.Context.BlockNumber, st.evm.Context.Time) contractCreation = msg.To == nil ) @@ -521,8 +480,3 @@ func (st *StateTransition) refundGas(subnetEVM bool) { func (st *StateTransition) gasUsed() uint64 { return st.initialGas - st.gasRemaining } - -// blobGasUsed returns the amount of blob gas used by the message. -func (st *StateTransition) blobGasUsed() uint64 { - return uint64(len(st.msg.BlobHashes) * params.BlobTxBlobGasPerBlob) -} diff --git a/core/trie_stress_bench_test.go b/core/trie_stress_bench_test.go index faaea2ca10..8f7c0b9ce2 100644 --- a/core/trie_stress_bench_test.go +++ b/core/trie_stress_bench_test.go @@ -32,7 +32,6 @@ import ( "testing" "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/precompile/contract" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -52,10 +51,6 @@ func BenchmarkTrie(t *testing.B) { func stressTestTrieDb(t *testing.B, numContracts int, callsPerBlock int, elements int64, gasTxLimit uint64) func(int, *BlockGen) { require := require.New(t) - config := params.TestChainConfig - signer := types.LatestSigner(config) - testKey, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - contractAddr := make([]common.Address, numContracts) contractTxs := make([]*types.Transaction, numContracts) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go deleted file mode 100644 index e4ab5dbfd7..0000000000 --- a/core/txpool/blobpool/blobpool.go +++ /dev/null @@ -1,1649 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package blobpool implements the EIP-4844 blob transaction pool. -package blobpool - -import ( - "container/heap" - "fmt" - "math" - "math/big" - "os" - "path/filepath" - "sort" - "sync" - "time" - - "github.com/ava-labs/subnet-evm/consensus/dummy" - "github.com/ava-labs/subnet-evm/consensus/misc/eip4844" - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/metrics" - "github.com/ava-labs/subnet-evm/params" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - "github.com/holiman/billy" - "github.com/holiman/uint256" -) - -const ( - // blobSize is the protocol constrained byte size of a single blob in a - // transaction. There can be multiple of these embedded into a single tx. - blobSize = params.BlobTxFieldElementsPerBlob * params.BlobTxBytesPerFieldElement - - // maxBlobsPerTransaction is the maximum number of blobs a single transaction - // is allowed to contain. Whilst the spec states it's unlimited, the block - // data slots are protocol bound, which implicitly also limit this. - maxBlobsPerTransaction = params.BlobTxMaxBlobGasPerBlock / params.BlobTxBlobGasPerBlob - - // txAvgSize is an approximate byte size of a transaction metadata to avoid - // tiny overflows causing all txs to move a shelf higher, wasting disk space. - txAvgSize = 4 * 1024 - - // txMaxSize is the maximum size a single transaction can have, outside - // the included blobs. Since blob transactions are pulled instead of pushed, - // and only a small metadata is kept in ram, the rest is on disk, there is - // no critical limit that should be enforced. Still, capping it to some sane - // limit can never hurt. - txMaxSize = 1024 * 1024 - - // maxTxsPerAccount is the maximum number of blob transactions admitted from - // a single account. The limit is enforced to minimize the DoS potential of - // a private tx cancelling publicly propagated blobs. - // - // Note, transactions resurrected by a reorg are also subject to this limit, - // so pushing it down too aggressively might make resurrections non-functional. - maxTxsPerAccount = 16 - - // pendingTransactionStore is the subfolder containing the currently queued - // blob transactions. - pendingTransactionStore = "queue" - - // limboedTransactionStore is the subfolder containing the currently included - // but not yet finalized transaction blobs. - limboedTransactionStore = "limbo" -) - -// blobTx is a wrapper around types.BlobTx which also contains the literal blob -// data along with all the transaction metadata. -type blobTx struct { - Tx *types.Transaction - - Blobs []kzg4844.Blob - Commits []kzg4844.Commitment - Proofs []kzg4844.Proof -} - -// blobTxMeta is the minimal subset of types.BlobTx necessary to validate and -// schedule the blob transactions into the following blocks. Only ever add the -// bare minimum needed fields to keep the size down (and thus number of entries -// larger with the same memory consumption). -type blobTxMeta struct { - hash common.Hash // Transaction hash to maintain the lookup table - id uint64 // Storage ID in the pool's persistent store - size uint32 // Byte size in the pool's persistent store - - nonce uint64 // Needed to prioritize inclusion order within an account - costCap *uint256.Int // Needed to validate cumulative balance sufficiency - execTipCap *uint256.Int // Needed to prioritize inclusion order across accounts and validate replacement price bump - execFeeCap *uint256.Int // Needed to validate replacement price bump - blobFeeCap *uint256.Int // Needed to validate replacement price bump - - basefeeJumps float64 // Absolute number of 1559 fee adjustments needed to reach the tx's fee cap - blobfeeJumps float64 // Absolute number of 4844 fee adjustments needed to reach the tx's blob fee cap - - evictionExecTip *uint256.Int // Worst gas tip across all previous nonces - evictionExecFeeJumps float64 // Worst base fee (converted to fee jumps) across all previous nonces - evictionBlobFeeJumps float64 // Worse blob fee (converted to fee jumps) across all previous nonces -} - -// newBlobTxMeta retrieves the indexed metadata fields from a blob transaction -// and assembles a helper struct to track in memory. -func newBlobTxMeta(id uint64, size uint32, tx *types.Transaction) *blobTxMeta { - meta := &blobTxMeta{ - hash: tx.Hash(), - id: id, - size: size, - nonce: tx.Nonce(), - costCap: uint256.MustFromBig(tx.Cost()), - execTipCap: uint256.MustFromBig(tx.GasTipCap()), - execFeeCap: uint256.MustFromBig(tx.GasFeeCap()), - blobFeeCap: uint256.MustFromBig(tx.BlobGasFeeCap()), - } - meta.basefeeJumps = dynamicFeeJumps(meta.execFeeCap) - meta.blobfeeJumps = dynamicFeeJumps(meta.blobFeeCap) - - return meta -} - -// BlobPool is the transaction pool dedicated to EIP-4844 blob transactions. -// -// Blob transactions are special snowflakes that are designed for a very specific -// purpose (rollups) and are expected to adhere to that specific use case. These -// behavioural expectations allow us to design a transaction pool that is more robust -// (i.e. resending issues) and more resilient to DoS attacks (e.g. replace-flush -// attacks) than the generic tx pool. These improvements will also mean, however, -// that we enforce a significantly more aggressive strategy on entering and exiting -// the pool: -// -// - Blob transactions are large. With the initial design aiming for 128KB blobs, -// we must ensure that these only traverse the network the absolute minimum -// number of times. Broadcasting to sqrt(peers) is out of the question, rather -// these should only ever be announced and the remote side should request it if -// it wants to. -// -// - Block blob-space is limited. With blocks being capped to a few blob txs, we -// can make use of the very low expected churn rate within the pool. Notably, -// we should be able to use a persistent disk backend for the pool, solving -// the tx resend issue that plagues the generic tx pool, as long as there's no -// artificial churn (i.e. pool wars). -// -// - Purpose of blobs are layer-2s. Layer-2s are meant to use blob transactions to -// commit to their own current state, which is independent of Ethereum mainnet -// (state, txs). This means that there's no reason for blob tx cancellation or -// replacement, apart from a potential basefee / miner tip adjustment. -// -// - Replacements are expensive. Given their size, propagating a replacement -// blob transaction to an existing one should be aggressively discouraged. -// Whilst generic transactions can start at 1 Wei gas cost and require a 10% -// fee bump to replace, we suggest requiring a higher min cost (e.g. 1 gwei) -// and a more aggressive bump (100%). -// -// - Cancellation is prohibitive. Evicting an already propagated blob tx is a huge -// DoS vector. As such, a) replacement (higher-fee) blob txs mustn't invalidate -// already propagated (future) blob txs (cumulative fee); b) nonce-gapped blob -// txs are disallowed; c) the presence of blob transactions exclude non-blob -// transactions. -// -// - Malicious cancellations are possible. Although the pool might prevent txs -// that cancel blobs, blocks might contain such transaction (malicious miner -// or flashbotter). The pool should cap the total number of blob transactions -// per account as to prevent propagating too much data before cancelling it -// via a normal transaction. It should nonetheless be high enough to support -// resurrecting reorged transactions. Perhaps 4-16. -// -// - Local txs are meaningless. Mining pools historically used local transactions -// for payouts or for backdoor deals. With 1559 in place, the basefee usually -// dominates the final price, so 0 or non-0 tip doesn't change much. Blob txs -// retain the 1559 2D gas pricing (and introduce on top a dynamic blob gas fee), -// so locality is moot. With a disk backed blob pool avoiding the resend issue, -// there's also no need to save own transactions for later. -// -// - No-blob blob-txs are bad. Theoretically there's no strong reason to disallow -// blob txs containing 0 blobs. In practice, admitting such txs into the pool -// breaks the low-churn invariant as blob constraints don't apply anymore. Even -// though we could accept blocks containing such txs, a reorg would require moving -// them back into the blob pool, which can break invariants. -// -// - Dropping blobs needs delay. When normal transactions are included, they -// are immediately evicted from the pool since they are contained in the -// including block. Blobs however are not included in the execution chain, -// so a mini reorg cannot re-pool "lost" blob transactions. To support reorgs, -// blobs are retained on disk until they are finalised. -// -// - Blobs can arrive via flashbots. Blocks might contain blob transactions we -// have never seen on the network. Since we cannot recover them from blocks -// either, the engine_newPayload needs to give them to us, and we cache them -// until finality to support reorgs without tx losses. -// -// Whilst some constraints above might sound overly aggressive, the general idea is -// that the blob pool should work robustly for its intended use case and whilst -// anyone is free to use blob transactions for arbitrary non-rollup use cases, -// they should not be allowed to run amok the network. -// -// Implementation wise there are a few interesting design choices: -// -// - Adding a transaction to the pool blocks until persisted to disk. This is -// viable because TPS is low (2-4 blobs per block initially, maybe 8-16 at -// peak), so natural churn is a couple MB per block. Replacements doing O(n) -// updates are forbidden and transaction propagation is pull based (i.e. no -// pileup of pending data). -// -// - When transactions are chosen for inclusion, the primary criteria is the -// signer tip (and having a basefee/data fee high enough of course). However, -// same-tip transactions will be split by their basefee/datafee, preferring -// those that are closer to the current network limits. The idea being that -// very relaxed ones can be included even if the fees go up, when the closer -// ones could already be invalid. -// -// When the pool eventually reaches saturation, some old transactions - that may -// never execute - will need to be evicted in favor of newer ones. The eviction -// strategy is quite complex: -// -// - Exceeding capacity evicts the highest-nonce of the account with the lowest -// paying blob transaction anywhere in the pooled nonce-sequence, as that tx -// would be executed the furthest in the future and is thus blocking anything -// after it. The smallest is deliberately not evicted to avoid a nonce-gap. -// -// - Analogously, if the pool is full, the consideration price of a new tx for -// evicting an old one is the smallest price in the entire nonce-sequence of -// the account. This avoids malicious users DoSing the pool with seemingly -// high paying transactions hidden behind a low-paying blocked one. -// -// - Since blob transactions have 3 price parameters: execution tip, execution -// fee cap and data fee cap, there's no singular parameter to create a total -// price ordering on. What's more, since the base fee and blob fee can move -// independently of one another, there's no pre-defined way to combine them -// into a stable order either. This leads to a multi-dimensional problem to -// solve after every block. -// -// - The first observation is that comparing 1559 base fees or 4844 blob fees -// needs to happen in the context of their dynamism. Since these fees jump -// up or down in ~1.125 multipliers (at max) across blocks, comparing fees -// in two transactions should be based on log1.125(fee) to eliminate noise. -// -// - The second observation is that the basefee and blobfee move independently, -// so there's no way to split mixed txs on their own (A has higher base fee, -// B has higher blob fee). Rather than look at the absolute fees, the useful -// metric is the max time it can take to exceed the transaction's fee caps. -// Specifically, we're interested in the number of jumps needed to go from -// the current fee to the transaction's cap: -// -// jumps = log1.125(txfee) - log1.125(basefee) -// -// - The third observation is that the base fee tends to hover around rather -// than swing wildly. The number of jumps needed from the current fee starts -// to get less relevant the higher it is. To remove the noise here too, the -// pool will use log(jumps) as the delta for comparing transactions. -// -// delta = sign(jumps) * log(abs(jumps)) -// -// - To establish a total order, we need to reduce the dimensionality of the -// two base fees (log jumps) to a single value. The interesting aspect from -// the pool's perspective is how fast will a tx get executable (fees going -// down, crossing the smaller negative jump counter) or non-executable (fees -// going up, crossing the smaller positive jump counter). As such, the pool -// cares only about the min of the two delta values for eviction priority. -// -// priority = min(delta-basefee, delta-blobfee) -// -// - The above very aggressive dimensionality and noise reduction should result -// in transaction being grouped into a small number of buckets, the further -// the fees the larger the buckets. This is good because it allows us to use -// the miner tip meaningfully as a splitter. -// -// - For the scenario where the pool does not contain non-executable blob txs -// anymore, it does not make sense to grant a later eviction priority to txs -// with high fee caps since it could enable pool wars. As such, any positive -// priority will be grouped together. -// -// priority = min(delta-basefee, delta-blobfee, 0) -// -// Optimisation tradeoffs: -// -// - Eviction relies on 3 fee minimums per account (exec tip, exec cap and blob -// cap). Maintaining these values across all transactions from the account is -// problematic as each transaction replacement or inclusion would require a -// rescan of all other transactions to recalculate the minimum. Instead, the -// pool maintains a rolling minimum across the nonce range. Updating all the -// minimums will need to be done only starting at the swapped in/out nonce -// and leading up to the first no-change. -type BlobPool struct { - config Config // Pool configuration - reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools - - store billy.Database // Persistent data store for the tx metadata and blobs - stored uint64 // Useful data size of all transactions on disk - limbo *limbo // Persistent data store for the non-finalized blobs - - signer types.Signer // Transaction signer to use for sender recovery - chain BlockChain // Chain object to access the state through - - head *types.Header // Current head of the chain - state *state.StateDB // Current state at the head of the chain - gasTip *uint256.Int // Currently accepted minimum gas tip - - lookup map[common.Hash]uint64 // Lookup table mapping hashes to tx billy entries - index map[common.Address][]*blobTxMeta // Blob transactions grouped by accounts, sorted by nonce - spent map[common.Address]*uint256.Int // Expenditure tracking for individual accounts - evict *evictHeap // Heap of cheapest accounts for eviction when full - - eventFeed event.Feed // Event feed to send out new tx events on pool inclusion - eventScope event.SubscriptionScope // Event scope to track and mass unsubscribe on termination - - lock sync.RWMutex // Mutex protecting the pool during reorg handling -} - -// New creates a new blob transaction pool to gather, sort and filter inbound -// blob transactions from the network. -func New(config Config, chain BlockChain) *BlobPool { - // Sanitize the input to ensure no vulnerable gas prices are set - config = (&config).sanitize() - - // Create the transaction pool with its initial settings - return &BlobPool{ - config: config, - signer: types.LatestSigner(chain.Config()), - chain: chain, - lookup: make(map[common.Hash]uint64), - index: make(map[common.Address][]*blobTxMeta), - spent: make(map[common.Address]*uint256.Int), - } -} - -// Filter returns whether the given transaction can be consumed by the blob pool. -func (p *BlobPool) Filter(tx *types.Transaction) bool { - return tx.Type() == types.BlobTxType -} - -// Init sets the gas price needed to keep a transaction in the pool and the chain -// head to allow balance / nonce checks. The transaction journal will be loaded -// from disk and filtered based on the provided starting settings. -func (p *BlobPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error { - p.reserve = reserve - - var ( - queuedir string - limbodir string - ) - if p.config.Datadir != "" { - queuedir = filepath.Join(p.config.Datadir, pendingTransactionStore) - if err := os.MkdirAll(queuedir, 0700); err != nil { - return err - } - limbodir = filepath.Join(p.config.Datadir, limboedTransactionStore) - if err := os.MkdirAll(limbodir, 0700); err != nil { - return err - } - } - state, err := p.chain.StateAt(head.Root) - if err != nil { - return err - } - p.head, p.state = head, state - - // Index all transactions on disk and delete anything inprocessable - var fails []uint64 - index := func(id uint64, size uint32, blob []byte) { - if p.parseTransaction(id, size, blob) != nil { - fails = append(fails, id) - } - } - store, err := billy.Open(billy.Options{Path: queuedir}, newSlotter(), index) - if err != nil { - return err - } - p.store = store - - if len(fails) > 0 { - log.Warn("Dropping invalidated blob transactions", "ids", fails) - for _, id := range fails { - if err := p.store.Delete(id); err != nil { - p.Close() - return err - } - } - } - // Sort the indexed transactions by nonce and delete anything gapped, create - // the eviction heap of anyone still standing - for addr := range p.index { - p.recheck(addr, nil) - } - feeConfig, _, err := p.chain.GetFeeConfigAt(p.head) - if err != nil { - p.Close() - return err - } - _, baseFee, err := dummy.EstimateNextBaseFee( - p.chain.Config(), - feeConfig, - p.head, - uint64(time.Now().Unix()), - ) - if err != nil { - p.Close() - return err - } - var ( - // basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), p.head)) - basefee = uint256.MustFromBig(baseFee) - blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice)) - ) - if p.head.ExcessBlobGas != nil { - blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*p.head.ExcessBlobGas)) - } - p.evict = newPriceHeap(basefee, blobfee, &p.index) - - // Pool initialized, attach the blob limbo to it to track blobs included - // recently but not yet finalized - p.limbo, err = newLimbo(limbodir) - if err != nil { - p.Close() - return err - } - // Set the configured gas tip, triggering a filtering of anything just loaded - basefeeGauge.Update(int64(basefee.Uint64())) - blobfeeGauge.Update(int64(blobfee.Uint64())) - - p.SetGasTip(gasTip) - - // Since the user might have modified their pool's capacity, evict anything - // above the current allowance - for p.stored > p.config.Datacap { - p.drop() - } - // Update the metrics and return the constructed pool - datacapGauge.Update(int64(p.config.Datacap)) - p.updateStorageMetrics() - return nil -} - -// Close closes down the underlying persistent store. -func (p *BlobPool) Close() error { - var errs []error - if p.limbo != nil { - if err := p.limbo.Close(); err != nil { - errs = append(errs, err) - } - } - if err := p.store.Close(); err != nil { - errs = append(errs, err) - } - p.eventScope.Close() - - switch { - case errs == nil: - return nil - case len(errs) == 1: - return errs[0] - default: - return fmt.Errorf("%v", errs) - } -} - -// parseTransaction is a callback method on pool creation that gets called for -// each transaction on disk to create the in-memory metadata index. -func (p *BlobPool) parseTransaction(id uint64, size uint32, blob []byte) error { - item := new(blobTx) - if err := rlp.DecodeBytes(blob, item); err != nil { - // This path is impossible unless the disk data representation changes - // across restarts. For that ever unprobable case, recover gracefully - // by ignoring this data entry. - log.Error("Failed to decode blob pool entry", "id", id, "err", err) - return err - } - meta := newBlobTxMeta(id, size, item.Tx) - - sender, err := p.signer.Sender(item.Tx) - if err != nil { - // This path is impossible unless the signature validity changes across - // restarts. For that ever unprobable case, recover gracefully by ignoring - // this data entry. - log.Error("Failed to recover blob tx sender", "id", id, "hash", item.Tx.Hash(), "err", err) - return err - } - if _, ok := p.index[sender]; !ok { - if err := p.reserve(sender, true); err != nil { - return err - } - p.index[sender] = []*blobTxMeta{} - p.spent[sender] = new(uint256.Int) - } - p.index[sender] = append(p.index[sender], meta) - p.spent[sender] = new(uint256.Int).Add(p.spent[sender], meta.costCap) - - p.lookup[meta.hash] = meta.id - p.stored += uint64(meta.size) - - return nil -} - -// recheck verifies the pool's content for a specific account and drops anything -// that does not fit anymore (dangling or filled nonce, overdraft). -func (p *BlobPool) recheck(addr common.Address, inclusions map[common.Hash]uint64) { - // Sort the transactions belonging to the account so reinjects can be simpler - txs := p.index[addr] - if inclusions != nil && txs == nil { // during reorgs, we might find new accounts - return - } - sort.Slice(txs, func(i, j int) bool { - return txs[i].nonce < txs[j].nonce - }) - // If there is a gap between the chain state and the blob pool, drop - // all the transactions as they are non-executable. Similarly, if the - // entire tx range was included, drop all. - var ( - next = p.state.GetNonce(addr) - gapped = txs[0].nonce > next - filled = txs[len(txs)-1].nonce < next - ) - if gapped || filled { - var ( - ids []uint64 - nonces []uint64 - ) - for i := 0; i < len(txs); i++ { - ids = append(ids, txs[i].id) - nonces = append(nonces, txs[i].nonce) - - p.stored -= uint64(txs[i].size) - delete(p.lookup, txs[i].hash) - - // Included transactions blobs need to be moved to the limbo - if filled && inclusions != nil { - p.offload(addr, txs[i].nonce, txs[i].id, inclusions) - } - } - delete(p.index, addr) - delete(p.spent, addr) - if inclusions != nil { // only during reorgs will the heap will be initialized - heap.Remove(p.evict, p.evict.index[addr]) - } - p.reserve(addr, false) - - if gapped { - log.Warn("Dropping dangling blob transactions", "from", addr, "missing", next, "drop", nonces, "ids", ids) - } else { - log.Trace("Dropping filled blob transactions", "from", addr, "filled", nonces, "ids", ids) - } - for _, id := range ids { - if err := p.store.Delete(id); err != nil { - log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) - } - } - return - } - // If there is overlap between the chain state and the blob pool, drop - // anything below the current state - if txs[0].nonce < next { - var ( - ids []uint64 - nonces []uint64 - ) - for txs[0].nonce < next { - ids = append(ids, txs[0].id) - nonces = append(nonces, txs[0].nonce) - - p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[0].costCap) - p.stored -= uint64(txs[0].size) - delete(p.lookup, txs[0].hash) - - // Included transactions blobs need to be moved to the limbo - if inclusions != nil { - p.offload(addr, txs[0].nonce, txs[0].id, inclusions) - } - txs = txs[1:] - } - log.Trace("Dropping overlapped blob transactions", "from", addr, "overlapped", nonces, "ids", ids, "left", len(txs)) - for _, id := range ids { - if err := p.store.Delete(id); err != nil { - log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) - } - } - p.index[addr] = txs - } - // Iterate over the transactions to initialize their eviction thresholds - // and to detect any nonce gaps - txs[0].evictionExecTip = txs[0].execTipCap - txs[0].evictionExecFeeJumps = txs[0].basefeeJumps - txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps - - for i := 1; i < len(txs); i++ { - // If there's no nonce gap, initialize the evicion thresholds as the - // minimum between the cumulative thresholds and the current tx fees - if txs[i].nonce == txs[i-1].nonce+1 { - txs[i].evictionExecTip = txs[i-1].evictionExecTip - if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 { - txs[i].evictionExecTip = txs[i].execTipCap - } - txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps - if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps { - txs[i].evictionExecFeeJumps = txs[i].basefeeJumps - } - txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps - if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps { - txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps - } - continue - } - // Sanity check that there's no double nonce. This case would be a coding - // error, but better know about it - if txs[i].nonce == txs[i-1].nonce { - log.Error("Duplicate nonce blob transaction", "from", addr, "nonce", txs[i].nonce) - } - // Otherwise if there's a nonce gap evict all later transactions - var ( - ids []uint64 - nonces []uint64 - ) - for j := i; j < len(txs); j++ { - ids = append(ids, txs[j].id) - nonces = append(nonces, txs[j].nonce) - - p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[j].costCap) - p.stored -= uint64(txs[j].size) - delete(p.lookup, txs[j].hash) - } - txs = txs[:i] - - log.Error("Dropping gapped blob transactions", "from", addr, "missing", txs[i-1].nonce+1, "drop", nonces, "ids", ids) - for _, id := range ids { - if err := p.store.Delete(id); err != nil { - log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) - } - } - p.index[addr] = txs - break - } - // Ensure that there's no over-draft, this is expected to happen when some - // transactions get included without publishing on the network - var ( - balance = uint256.MustFromBig(p.state.GetBalance(addr)) - spent = p.spent[addr] - ) - if spent.Cmp(balance) > 0 { - // Evict the highest nonce transactions until the pending set falls under - // the account's available balance - var ( - ids []uint64 - nonces []uint64 - ) - for p.spent[addr].Cmp(balance) > 0 { - last := txs[len(txs)-1] - txs[len(txs)-1] = nil - txs = txs[:len(txs)-1] - - ids = append(ids, last.id) - nonces = append(nonces, last.nonce) - - p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap) - p.stored -= uint64(last.size) - delete(p.lookup, last.hash) - } - if len(txs) == 0 { - delete(p.index, addr) - delete(p.spent, addr) - if inclusions != nil { // only during reorgs will the heap will be initialized - heap.Remove(p.evict, p.evict.index[addr]) - } - p.reserve(addr, false) - } else { - p.index[addr] = txs - } - log.Warn("Dropping overdrafted blob transactions", "from", addr, "balance", balance, "spent", spent, "drop", nonces, "ids", ids) - for _, id := range ids { - if err := p.store.Delete(id); err != nil { - log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) - } - } - } - // Sanity check that no account can have more queued transactions than the - // DoS protection threshold. - if len(txs) > maxTxsPerAccount { - // Evict the highest nonce transactions until the pending set falls under - // the account's transaction cap - var ( - ids []uint64 - nonces []uint64 - ) - for len(txs) > maxTxsPerAccount { - last := txs[len(txs)-1] - txs[len(txs)-1] = nil - txs = txs[:len(txs)-1] - - ids = append(ids, last.id) - nonces = append(nonces, last.nonce) - - p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], last.costCap) - p.stored -= uint64(last.size) - delete(p.lookup, last.hash) - } - p.index[addr] = txs - - log.Warn("Dropping overcapped blob transactions", "from", addr, "kept", len(txs), "drop", nonces, "ids", ids) - for _, id := range ids { - if err := p.store.Delete(id); err != nil { - log.Error("Failed to delete blob transaction", "from", addr, "id", id, "err", err) - } - } - } - // Included cheap transactions might have left the remaining ones better from - // an eviction point, fix any potential issues in the heap. - if _, ok := p.index[addr]; ok && inclusions != nil { - heap.Fix(p.evict, p.evict.index[addr]) - } -} - -// offload removes a tracked blob transaction from the pool and moves it into the -// limbo for tracking until finality. -// -// The method may log errors for various unexpcted scenarios but will not return -// any of it since there's no clear error case. Some errors may be due to coding -// issues, others caused by signers mining MEV stuff or swapping transactions. In -// all cases, the pool needs to continue operating. -func (p *BlobPool) offload(addr common.Address, nonce uint64, id uint64, inclusions map[common.Hash]uint64) { - data, err := p.store.Get(id) - if err != nil { - log.Error("Blobs missing for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err) - return - } - item := new(blobTx) - if err = rlp.DecodeBytes(data, item); err != nil { - log.Error("Blobs corrupted for included transaction", "from", addr, "nonce", nonce, "id", id, "err", err) - return - } - block, ok := inclusions[item.Tx.Hash()] - if !ok { - log.Warn("Blob transaction swapped out by signer", "from", addr, "nonce", nonce, "id", id) - return - } - if err := p.limbo.push(item.Tx.Hash(), block, item.Blobs, item.Commits, item.Proofs); err != nil { - log.Warn("Failed to offload blob tx into limbo", "err", err) - return - } -} - -// Reset implements txpool.SubPool, allowing the blob pool's internal state to be -// kept in sync with the main transacion pool's internal state. -func (p *BlobPool) Reset(oldHead, newHead *types.Header) { - waitStart := time.Now() - p.lock.Lock() - resetwaitHist.Update(time.Since(waitStart).Nanoseconds()) - defer p.lock.Unlock() - - defer func(start time.Time) { - resettimeHist.Update(time.Since(start).Nanoseconds()) - }(time.Now()) - - statedb, err := p.chain.StateAt(newHead.Root) - if err != nil { - log.Error("Failed to reset blobpool state", "err", err) - return - } - p.head = newHead - p.state = statedb - - // Run the reorg between the old and new head and figure out which accounts - // need to be rechecked and which transactions need to be readded - if reinject, inclusions := p.reorg(oldHead, newHead); reinject != nil { - for addr, txs := range reinject { - // Blindly push all the lost transactions back into the pool - for _, tx := range txs { - p.reinject(addr, tx) - } - // Recheck the account's pooled transactions to drop included and - // invalidated one - p.recheck(addr, inclusions) - } - } - // Flush out any blobs from limbo that are older than the latest finality - if p.chain.Config().IsCancun(p.head.Number, p.head.Time) { - p.limbo.finalize(p.chain.CurrentFinalBlock()) - } - feeConfig, _, err := p.chain.GetFeeConfigAt(p.head) - if err != nil { - log.Error("Failed to get fee config to reset blobpool fees", "err", err) - return - } - _, baseFee, err := dummy.EstimateNextBaseFee( - p.chain.Config(), - feeConfig, - p.head, - uint64(time.Now().Unix()), - ) - if err != nil { - log.Error("Failed to estimate next base fee to reset blobpool fees", "err", err) - return - } - // Reset the price heap for the new set of basefee/blobfee pairs - var ( - // basefee = uint256.MustFromBig(eip1559.CalcBaseFee(p.chain.Config(), newHead)) - basefee = uint256.MustFromBig(baseFee) - blobfee = uint256.MustFromBig(big.NewInt(params.BlobTxMinBlobGasprice)) - ) - if newHead.ExcessBlobGas != nil { - blobfee = uint256.MustFromBig(eip4844.CalcBlobFee(*newHead.ExcessBlobGas)) - } - p.evict.reinit(basefee, blobfee, false) - - basefeeGauge.Update(int64(basefee.Uint64())) - blobfeeGauge.Update(int64(blobfee.Uint64())) - p.updateStorageMetrics() -} - -// reorg assembles all the transactors and missing transactions between an old -// and new head to figure out which account's tx set needs to be rechecked and -// which transactions need to be requeued. -// -// The transactionblock inclusion infos are also returned to allow tracking any -// just-included blocks by block number in the limbo. -func (p *BlobPool) reorg(oldHead, newHead *types.Header) (map[common.Address][]*types.Transaction, map[common.Hash]uint64) { - // If the pool was not yet initialized, don't do anything - if oldHead == nil { - return nil, nil - } - // If the reorg is too deep, avoid doing it (will happen during snap sync) - oldNum := oldHead.Number.Uint64() - newNum := newHead.Number.Uint64() - - if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { - return nil, nil - } - // Reorg seems shallow enough to pull in all transactions into memory - var ( - transactors = make(map[common.Address]struct{}) - discarded = make(map[common.Address][]*types.Transaction) - included = make(map[common.Address][]*types.Transaction) - inclusions = make(map[common.Hash]uint64) - - rem = p.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) - add = p.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) - ) - if add == nil { - // if the new head is nil, it means that something happened between - // the firing of newhead-event and _now_: most likely a - // reorg caused by sync-reversion or explicit sethead back to an - // earlier block. - log.Warn("Blobpool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) - return nil, nil - } - if rem == nil { - // This can happen if a setHead is performed, where we simply discard - // the old head from the chain. If that is the case, we don't have the - // lost transactions anymore, and there's nothing to add. - if newNum >= oldNum { - // If we reorged to a same or higher number, then it's not a case - // of setHead - log.Warn("Blobpool reset with missing old head", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - return nil, nil - } - // If the reorg ended up on a lower number, it's indicative of setHead - // being the cause - log.Debug("Skipping blobpool reset caused by setHead", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - return nil, nil - } - // Both old and new blocks exist, traverse through the progression chain - // and accumulate the transactors and transactions - for rem.NumberU64() > add.NumberU64() { - for _, tx := range rem.Transactions() { - from, _ := p.signer.Sender(tx) - - discarded[from] = append(discarded[from], tx) - transactors[from] = struct{}{} - } - if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash()) - return nil, nil - } - } - for add.NumberU64() > rem.NumberU64() { - for _, tx := range add.Transactions() { - from, _ := p.signer.Sender(tx) - - included[from] = append(included[from], tx) - inclusions[tx.Hash()] = add.NumberU64() - transactors[from] = struct{}{} - } - if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash()) - return nil, nil - } - } - for rem.Hash() != add.Hash() { - for _, tx := range rem.Transactions() { - from, _ := p.signer.Sender(tx) - - discarded[from] = append(discarded[from], tx) - transactors[from] = struct{}{} - } - if rem = p.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by blobpool", "block", oldHead.Number, "hash", oldHead.Hash()) - return nil, nil - } - for _, tx := range add.Transactions() { - from, _ := p.signer.Sender(tx) - - included[from] = append(included[from], tx) - inclusions[tx.Hash()] = add.NumberU64() - transactors[from] = struct{}{} - } - if add = p.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by blobpool", "block", newHead.Number, "hash", newHead.Hash()) - return nil, nil - } - } - // Generate the set of transactions per address to pull back into the pool, - // also updating the rest along the way - reinject := make(map[common.Address][]*types.Transaction) - for addr := range transactors { - // Generate the set that was lost to reinject into the pool - lost := make([]*types.Transaction, 0, len(discarded[addr])) - for _, tx := range types.TxDifference(discarded[addr], included[addr]) { - if p.Filter(tx) { - lost = append(lost, tx) - } - } - reinject[addr] = lost - - // Update the set that was already reincluded to track the blocks in limbo - for _, tx := range types.TxDifference(included[addr], discarded[addr]) { - if p.Filter(tx) { - p.limbo.update(tx.Hash(), inclusions[tx.Hash()]) - } - } - } - return reinject, inclusions -} - -// reinject blindly pushes a transaction previously included in the chain - and -// just reorged out - into the pool. The transaction is assumed valid (having -// been in the chain), thus the only validation needed is nonce sorting and over- -// draft checks after injection. -// -// Note, the method will not initialize the eviction cache values as those will -// be done once for all transactions belonging to an account after all individual -// transactions are injected back into the pool. -func (p *BlobPool) reinject(addr common.Address, tx *types.Transaction) { - // Retrieve the associated blob from the limbo. Without the blobs, we cannot - // add the transaction back into the pool as it is not mineable. - blobs, commits, proofs, err := p.limbo.pull(tx.Hash()) - if err != nil { - log.Error("Blobs unavailable, dropping reorged tx", "err", err) - return - } - // Serialize the transaction back into the primary datastore - blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs}) - if err != nil { - log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) - return - } - id, err := p.store.Put(blob) - if err != nil { - log.Error("Failed to write transaction into storage", "hash", tx.Hash(), "err", err) - return - } - // Update the indixes and metrics - meta := newBlobTxMeta(id, p.store.Size(id), tx) - - if _, ok := p.index[addr]; !ok { - if err := p.reserve(addr, true); err != nil { - log.Warn("Failed to reserve account for blob pool", "tx", tx.Hash(), "from", addr, "err", err) - return - } - p.index[addr] = []*blobTxMeta{meta} - p.spent[addr] = meta.costCap - p.evict.Push(addr) - } else { - p.index[addr] = append(p.index[addr], meta) - p.spent[addr] = new(uint256.Int).Add(p.spent[addr], meta.costCap) - } - p.lookup[meta.hash] = meta.id - p.stored += uint64(meta.size) -} - -// SetGasTip implements txpool.SubPool, allowing the blob pool's gas requirements -// to be kept in sync with the main transacion pool's gas requirements. -func (p *BlobPool) SetGasTip(tip *big.Int) { - p.lock.Lock() - defer p.lock.Unlock() - - // Store the new minimum gas tip - old := p.gasTip - p.gasTip = uint256.MustFromBig(tip) - - // If the min miner fee increased, remove transactions below the new threshold - if old == nil || p.gasTip.Cmp(old) > 0 { - for addr, txs := range p.index { - for i, tx := range txs { - if tx.execTipCap.Cmp(p.gasTip) < 0 { - // Drop the offending transaction - var ( - ids = []uint64{tx.id} - nonces = []uint64{tx.nonce} - ) - p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], txs[i].costCap) - p.stored -= uint64(tx.size) - delete(p.lookup, tx.hash) - txs[i] = nil - - // Drop everything afterwards, no gaps allowed - for j, tx := range txs[i+1:] { - ids = append(ids, tx.id) - nonces = append(nonces, tx.nonce) - - p.spent[addr] = new(uint256.Int).Sub(p.spent[addr], tx.costCap) - p.stored -= uint64(tx.size) - delete(p.lookup, tx.hash) - txs[i+1+j] = nil - } - // Clear out the dropped transactions from the index - if i > 0 { - p.index[addr] = txs[:i] - heap.Fix(p.evict, p.evict.index[addr]) - } else { - delete(p.index, addr) - delete(p.spent, addr) - - heap.Remove(p.evict, p.evict.index[addr]) - p.reserve(addr, false) - } - // Clear out the transactions from the data store - log.Warn("Dropping underpriced blob transaction", "from", addr, "rejected", tx.nonce, "tip", tx.execTipCap, "want", tip, "drop", nonces, "ids", ids) - for _, id := range ids { - if err := p.store.Delete(id); err != nil { - log.Error("Failed to delete dropped transaction", "id", id, "err", err) - } - } - break - } - } - } - } - log.Debug("Blobpool tip threshold updated", "tip", tip) - pooltipGauge.Update(tip.Int64()) - p.updateStorageMetrics() -} - -// validateTx checks whether a transaction is valid according to the consensus -// rules and adheres to some heuristic limits of the local node (price and size). -func (p *BlobPool) validateTx(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error { - // Ensure the transaction adheres to basic pool filters (type, size, tip) and - // consensus rules - baseOpts := &txpool.ValidationOptions{ - Config: p.chain.Config(), - Accept: 1 << types.BlobTxType, - MaxSize: txMaxSize, - MinTip: p.gasTip.ToBig(), - } - if err := txpool.ValidateTransaction(tx, blobs, commits, proofs, p.head, p.signer, baseOpts); err != nil { - return err - } - // Ensure the transaction adheres to the stateful pool filters (nonce, balance) - stateOpts := &txpool.ValidationOptionsWithState{ - State: p.state, - - FirstNonceGap: func(addr common.Address) uint64 { - // Nonce gaps are not permitted in the blob pool, the first gap will - // be the next nonce shifted by however many transactions we already - // have pooled. - return p.state.GetNonce(addr) + uint64(len(p.index[addr])) - }, - UsedAndLeftSlots: func(addr common.Address) (int, int) { - have := len(p.index[addr]) - if have >= maxTxsPerAccount { - return have, 0 - } - return have, maxTxsPerAccount - have - }, - ExistingExpenditure: func(addr common.Address) *big.Int { - if spent := p.spent[addr]; spent != nil { - return spent.ToBig() - } - return new(big.Int) - }, - ExistingCost: func(addr common.Address, nonce uint64) *big.Int { - next := p.state.GetNonce(addr) - if uint64(len(p.index[addr])) > nonce-next { - return p.index[addr][int(tx.Nonce()-next)].costCap.ToBig() - } - return nil - }, - } - if err := txpool.ValidateTransactionWithState(tx, p.signer, stateOpts); err != nil { - return err - } - // If the transaction replaces an existing one, ensure that price bumps are - // adhered to. - var ( - from, _ = p.signer.Sender(tx) // already validated above - next = p.state.GetNonce(from) - ) - if uint64(len(p.index[from])) > tx.Nonce()-next { - // Account can support the replacement, but the price bump must also be met - prev := p.index[from][int(tx.Nonce()-next)] - switch { - case tx.GasFeeCapIntCmp(prev.execFeeCap.ToBig()) <= 0: - return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap) - case tx.GasTipCapIntCmp(prev.execTipCap.ToBig()) <= 0: - return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap) - case tx.BlobGasFeeCapIntCmp(prev.blobFeeCap.ToBig()) <= 0: - return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap) - } - var ( - multiplier = uint256.NewInt(100 + p.config.PriceBump) - onehundred = uint256.NewInt(100) - - minGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execFeeCap), onehundred) - minGasTipCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.execTipCap), onehundred) - minBlobGasFeeCap = new(uint256.Int).Div(new(uint256.Int).Mul(multiplier, prev.blobFeeCap), onehundred) - ) - switch { - case tx.GasFeeCapIntCmp(minGasFeeCap.ToBig()) < 0: - return fmt.Errorf("%w: new tx gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasFeeCap(), prev.execFeeCap, p.config.PriceBump) - case tx.GasTipCapIntCmp(minGasTipCap.ToBig()) < 0: - return fmt.Errorf("%w: new tx gas tip cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.GasTipCap(), prev.execTipCap, p.config.PriceBump) - case tx.BlobGasFeeCapIntCmp(minBlobGasFeeCap.ToBig()) < 0: - return fmt.Errorf("%w: new tx blob gas fee cap %v <= %v queued + %d%% replacement penalty", txpool.ErrReplaceUnderpriced, tx.BlobGasFeeCap(), prev.blobFeeCap, p.config.PriceBump) - } - } - return nil -} - -// Has returns an indicator whether subpool has a transaction cached with the -// given hash. -func (p *BlobPool) Has(hash common.Hash) bool { - p.lock.RLock() - defer p.lock.RUnlock() - - _, ok := p.lookup[hash] - return ok -} - -func (p *BlobPool) HasLocal(hash common.Hash) bool { - // TODO: add support to check local transactions - return p.Has(hash) -} - -// Get returns a transaction if it is contained in the pool, or nil otherwise. -func (p *BlobPool) Get(hash common.Hash) *txpool.Transaction { - // Track the amount of time waiting to retrieve a fully resolved blob tx from - // the pool and the amount of time actually spent on pulling the data from disk. - getStart := time.Now() - p.lock.RLock() - getwaitHist.Update(time.Since(getStart).Nanoseconds()) - defer p.lock.RUnlock() - - defer func(start time.Time) { - gettimeHist.Update(time.Since(start).Nanoseconds()) - }(time.Now()) - - // Pull the blob from disk and return an assembled response - id, ok := p.lookup[hash] - if !ok { - return nil - } - data, err := p.store.Get(id) - if err != nil { - log.Error("Tracked blob transaction missing from store", "hash", hash, "id", id, "err", err) - return nil - } - item := new(blobTx) - if err = rlp.DecodeBytes(data, item); err != nil { - log.Error("Blobs corrupted for traced transaction", "hash", hash, "id", id, "err", err) - return nil - } - return &txpool.Transaction{ - Tx: item.Tx, - BlobTxBlobs: item.Blobs, - BlobTxCommits: item.Commits, - BlobTxProofs: item.Proofs, - } -} - -// Add inserts a set of blob transactions into the pool if they pass validation (both -// consensus validity and pool restictions). -func (p *BlobPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error { - errs := make([]error, len(txs)) - for i, tx := range txs { - errs[i] = p.add(tx.Tx, tx.BlobTxBlobs, tx.BlobTxCommits, tx.BlobTxProofs) - } - return errs -} - -// Add inserts a new blob transaction into the pool if it passes validation (both -// consensus validity and pool restictions). -func (p *BlobPool) add(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) (err error) { - // The blob pool blocks on adding a transaction. This is because blob txs are - // only even pulled form the network, so this method will act as the overload - // protection for fetches. - waitStart := time.Now() - p.lock.Lock() - addwaitHist.Update(time.Since(waitStart).Nanoseconds()) - defer p.lock.Unlock() - - defer func(start time.Time) { - addtimeHist.Update(time.Since(start).Nanoseconds()) - }(time.Now()) - - // Ensure the transaction is valid from all perspectives - if err := p.validateTx(tx, blobs, commits, proofs); err != nil { - log.Trace("Transaction validation failed", "hash", tx.Hash(), "err", err) - return err - } - // If the address is not yet known, request exclusivity to track the account - // only by this subpool until all transactions are evicted - from, _ := types.Sender(p.signer, tx) // already validated above - if _, ok := p.index[from]; !ok { - if err := p.reserve(from, true); err != nil { - return err - } - defer func() { - // If the transaction is rejected by some post-validation check, remove - // the lock on the reservation set. - // - // Note, `err` here is the named error return, which will be initialized - // by a return statement before running deferred methods. Take care with - // removing or subscoping err as it will break this clause. - if err != nil { - p.reserve(from, false) - } - }() - } - // Transaction permitted into the pool from a nonce and cost perspective, - // insert it into the database and update the indices - blob, err := rlp.EncodeToBytes(&blobTx{Tx: tx, Blobs: blobs, Commits: commits, Proofs: proofs}) - if err != nil { - log.Error("Failed to encode transaction for storage", "hash", tx.Hash(), "err", err) - return err - } - id, err := p.store.Put(blob) - if err != nil { - return err - } - meta := newBlobTxMeta(id, p.store.Size(id), tx) - - var ( - next = p.state.GetNonce(from) - offset = int(tx.Nonce() - next) - newacc = false - ) - var oldEvictionExecFeeJumps, oldEvictionBlobFeeJumps float64 - if txs, ok := p.index[from]; ok { - oldEvictionExecFeeJumps = txs[len(txs)-1].evictionExecFeeJumps - oldEvictionBlobFeeJumps = txs[len(txs)-1].evictionBlobFeeJumps - } - if len(p.index[from]) > offset { - // Transaction replaces a previously queued one - prev := p.index[from][offset] - if err := p.store.Delete(prev.id); err != nil { - // Shitty situation, but try to recover gracefully instead of going boom - log.Error("Failed to delete replaced transaction", "id", prev.id, "err", err) - } - // Update the transaction index - p.index[from][offset] = meta - p.spent[from] = new(uint256.Int).Sub(p.spent[from], prev.costCap) - p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap) - - delete(p.lookup, prev.hash) - p.lookup[meta.hash] = meta.id - p.stored += uint64(meta.size) - uint64(prev.size) - } else { - // Transaction extends previously scheduled ones - p.index[from] = append(p.index[from], meta) - if _, ok := p.spent[from]; !ok { - p.spent[from] = new(uint256.Int) - newacc = true - } - p.spent[from] = new(uint256.Int).Add(p.spent[from], meta.costCap) - p.lookup[meta.hash] = meta.id - p.stored += uint64(meta.size) - } - // Recompute the rolling eviction fields. In case of a replacement, this will - // recompute all subsequent fields. In case of an append, this will only do - // the fresh calculation. - txs := p.index[from] - - for i := offset; i < len(txs); i++ { - // The first transaction will always use itself - if i == 0 { - txs[0].evictionExecTip = txs[0].execTipCap - txs[0].evictionExecFeeJumps = txs[0].basefeeJumps - txs[0].evictionBlobFeeJumps = txs[0].blobfeeJumps - - continue - } - // Subsequent transactions will use a rolling calculation - txs[i].evictionExecTip = txs[i-1].evictionExecTip - if txs[i].evictionExecTip.Cmp(txs[i].execTipCap) > 0 { - txs[i].evictionExecTip = txs[i].execTipCap - } - txs[i].evictionExecFeeJumps = txs[i-1].evictionExecFeeJumps - if txs[i].evictionExecFeeJumps > txs[i].basefeeJumps { - txs[i].evictionExecFeeJumps = txs[i].basefeeJumps - } - txs[i].evictionBlobFeeJumps = txs[i-1].evictionBlobFeeJumps - if txs[i].evictionBlobFeeJumps > txs[i].blobfeeJumps { - txs[i].evictionBlobFeeJumps = txs[i].blobfeeJumps - } - } - // Update the eviction heap with the new information: - // - If the transaction is from a new account, add it to the heap - // - If the account had a singleton tx replaced, update the heap (new price caps) - // - If the account has a transaction replaced or appended, update the heap if significantly changed - switch { - case newacc: - heap.Push(p.evict, from) - - case len(txs) == 1: // 1 tx and not a new acc, must be replacement - heap.Fix(p.evict, p.evict.index[from]) - - default: // replacement or new append - evictionExecFeeDiff := oldEvictionExecFeeJumps - txs[len(txs)-1].evictionExecFeeJumps - evictionBlobFeeDiff := oldEvictionBlobFeeJumps - txs[len(txs)-1].evictionBlobFeeJumps - - if math.Abs(evictionExecFeeDiff) > 0.001 || math.Abs(evictionBlobFeeDiff) > 0.001 { // need math.Abs, can go up and down - heap.Fix(p.evict, p.evict.index[from]) - } - } - // If the pool went over the allowed data limit, evict transactions until - // we're again below the threshold - for p.stored > p.config.Datacap { - p.drop() - } - p.updateStorageMetrics() - - return nil -} - -// drop removes the worst transaction from the pool. It is primarily used when a -// freshly added transaction overflows the pool and needs to evict something. The -// method is also called on startup if the user resizes their storage, might be an -// expensive run but it should be fine-ish. -func (p *BlobPool) drop() { - // Peek at the account with the worse transaction set to evict from (Go's heap - // stores the minimum at index zero of the heap slice) and retrieve it's last - // transaction. - var ( - from = p.evict.addrs[0] // cannot call drop on empty pool - - txs = p.index[from] - drop = txs[len(txs)-1] - last = len(txs) == 1 - ) - // Remove the transaction from the pool's index - if last { - delete(p.index, from) - delete(p.spent, from) - p.reserve(from, false) - } else { - txs[len(txs)-1] = nil - txs = txs[:len(txs)-1] - - p.index[from] = txs - p.spent[from] = new(uint256.Int).Sub(p.spent[from], drop.costCap) - } - p.stored -= uint64(drop.size) - delete(p.lookup, drop.hash) - - // Remove the transaction from the pool's evicion heap: - // - If the entire account was dropped, pop off the address - // - Otherwise, if the new tail has better eviction caps, fix the heap - if last { - heap.Pop(p.evict) - } else { - tail := txs[len(txs)-1] // new tail, surely exists - - evictionExecFeeDiff := tail.evictionExecFeeJumps - drop.evictionExecFeeJumps - evictionBlobFeeDiff := tail.evictionBlobFeeJumps - drop.evictionBlobFeeJumps - - if evictionExecFeeDiff > 0.001 || evictionBlobFeeDiff > 0.001 { // no need for math.Abs, monotonic decreasing - heap.Fix(p.evict, 0) - } - } - // Remove the transaction from the data store - log.Warn("Evicting overflown blob transaction", "from", from, "evicted", drop.nonce, "id", drop.id) - if err := p.store.Delete(drop.id); err != nil { - log.Error("Failed to drop evicted transaction", "id", drop.id, "err", err) - } -} - -// Pending retrieves all currently processable transactions, grouped by origin -// account and sorted by nonce. -func (p *BlobPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction { - // Track the amount of time waiting to retrieve the list of pending blob txs - // from the pool and the amount of time actually spent on assembling the data. - // The latter will be pretty much moot, but we've kept it to have symmetric - // across all user operations. - pendStart := time.Now() - p.lock.RLock() - pendwaitHist.Update(time.Since(pendStart).Nanoseconds()) - defer p.lock.RUnlock() - - defer func(start time.Time) { - pendtimeHist.Update(time.Since(start).Nanoseconds()) - }(time.Now()) - - pending := make(map[common.Address][]*txpool.LazyTransaction) - for addr, txs := range p.index { - var lazies []*txpool.LazyTransaction - for _, tx := range txs { - lazies = append(lazies, &txpool.LazyTransaction{ - Pool: p, - Hash: tx.hash, - Time: time.Now(), // TODO(karalabe): Maybe save these and use that? - GasFeeCap: tx.execFeeCap.ToBig(), - GasTipCap: tx.execTipCap.ToBig(), - }) - } - if len(lazies) > 0 { - pending[addr] = lazies - } - } - return pending -} - -func (p *BlobPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*txpool.LazyTransaction { - return p.Pending(enforceTips) -} - -// PendingFrom returns the same set of transactions that would be returned from Pending restricted to only -// transactions from [addrs]. -func (p *BlobPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address][]*txpool.LazyTransaction { - // Track the amount of time waiting to retrieve the list of pending blob txs - // from the pool and the amount of time actually spent on assembling the data. - // The latter will be pretty much moot, but we've kept it to have symmetric - // across all user operations. - pendStart := time.Now() - p.lock.RLock() - pendwaitHist.Update(time.Since(pendStart).Nanoseconds()) - defer p.lock.RUnlock() - - defer func(start time.Time) { - pendtimeHist.Update(time.Since(start).Nanoseconds()) - }(time.Now()) - - pending := make(map[common.Address][]*txpool.LazyTransaction) - for _, addr := range addrs { - txs, ok := p.index[addr] - if !ok { - continue - } - var lazies []*txpool.LazyTransaction - for _, tx := range txs { - lazies = append(lazies, &txpool.LazyTransaction{ - Pool: p, - Hash: tx.hash, - Time: time.Now(), // TODO(karalabe): Maybe save these and use that? - GasFeeCap: tx.execFeeCap.ToBig(), - GasTipCap: tx.execTipCap.ToBig(), - }) - } - if len(lazies) > 0 { - pending[addr] = lazies - } - } - return pending -} - -// IteratePending iterates over [pool.pending] until [f] returns false. -// The caller must not modify [tx]. Returns false if iteration was interrupted. -func (pool *BlobPool) IteratePending(f func(tx *txpool.Transaction) bool) bool { - pool.lock.RLock() - defer pool.lock.RUnlock() - - for _, list := range pool.index { - for _, txId := range list { - tx := pool.Get(txId.hash) - if tx == nil { - continue - } - if !f(tx) { - return false - } - } - } - return true -} - -func (p *BlobPool) SetMinFee(minFee *big.Int) {} - -// updateStorageMetrics retrieves a bunch of stats from the data store and pushes -// them out as metrics. -func (p *BlobPool) updateStorageMetrics() { - stats := p.store.Infos() - - var ( - dataused uint64 - datareal uint64 - slotused uint64 - - oversizedDataused uint64 - oversizedDatagaps uint64 - oversizedSlotused uint64 - oversizedSlotgaps uint64 - ) - for _, shelf := range stats.Shelves { - slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize) - slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize) - - dataused += slotDataused - datareal += slotDataused + slotDatagaps - slotused += shelf.FilledSlots - - metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused)) - metrics.GetOrRegisterGauge(fmt.Sprintf(shelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps)) - metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots)) - metrics.GetOrRegisterGauge(fmt.Sprintf(shelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots)) - - if shelf.SlotSize/blobSize > maxBlobsPerTransaction { - oversizedDataused += slotDataused - oversizedDatagaps += slotDatagaps - oversizedSlotused += shelf.FilledSlots - oversizedSlotgaps += shelf.GappedSlots - } - } - datausedGauge.Update(int64(dataused)) - datarealGauge.Update(int64(datareal)) - slotusedGauge.Update(int64(slotused)) - - oversizedDatausedGauge.Update(int64(oversizedDataused)) - oversizedDatagapsGauge.Update(int64(oversizedDatagaps)) - oversizedSlotusedGauge.Update(int64(oversizedSlotused)) - oversizedSlotgapsGauge.Update(int64(oversizedSlotgaps)) - - p.updateLimboMetrics() -} - -// updateLimboMetrics retrieves a bunch of stats from the limbo store and pushes -// // them out as metrics. -func (p *BlobPool) updateLimboMetrics() { - stats := p.limbo.store.Infos() - - var ( - dataused uint64 - datareal uint64 - slotused uint64 - ) - for _, shelf := range stats.Shelves { - slotDataused := shelf.FilledSlots * uint64(shelf.SlotSize) - slotDatagaps := shelf.GappedSlots * uint64(shelf.SlotSize) - - dataused += slotDataused - datareal += slotDataused + slotDatagaps - slotused += shelf.FilledSlots - - metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatausedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDataused)) - metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfDatagapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(slotDatagaps)) - metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotusedGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.FilledSlots)) - metrics.GetOrRegisterGauge(fmt.Sprintf(limboShelfSlotgapsGaugeName, shelf.SlotSize/blobSize), nil).Update(int64(shelf.GappedSlots)) - } - limboDatausedGauge.Update(int64(dataused)) - limboDatarealGauge.Update(int64(datareal)) - limboSlotusedGauge.Update(int64(slotused)) -} - -// SubscribeTransactions registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { - return p.eventScope.Track(p.eventFeed.Subscribe(ch)) -} - -// Nonce returns the next nonce of an account, with all transactions executable -// by the pool already applied on top. -func (p *BlobPool) Nonce(addr common.Address) uint64 { - p.lock.Lock() - defer p.lock.Unlock() - - if txs, ok := p.index[addr]; ok { - return txs[len(txs)-1].nonce + 1 - } - return p.state.GetNonce(addr) -} - -// Stats retrieves the current pool stats, namely the number of pending and the -// number of queued (non-executable) transactions. -func (p *BlobPool) Stats() (int, int) { - p.lock.Lock() - defer p.lock.Unlock() - - var pending int - for _, txs := range p.index { - pending += len(txs) - } - return pending, 0 // No non-executable txs in the blob pool -} - -// Content retrieves the data content of the transaction pool, returning all the -// pending as well as queued transactions, grouped by account and sorted by nonce. -// -// For the blob pool, this method will return nothing for now. -// TODO(karalabe): Abstract out the returned metadata. -func (p *BlobPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { - return make(map[common.Address][]*types.Transaction), make(map[common.Address][]*types.Transaction) -} - -// ContentFrom retrieves the data content of the transaction pool, returning the -// pending as well as queued transactions of this address, grouped by nonce. -// -// For the blob pool, this method will return nothing for now. -// TODO(karalabe): Abstract out the returned metadata. -func (p *BlobPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { - return []*types.Transaction{}, []*types.Transaction{} -} - -// Locals retrieves the accounts currently considered local by the pool. -// -// There is no notion of local accounts in the blob pool. -func (p *BlobPool) Locals() []common.Address { - return []common.Address{} -} - -// Status returns the known status (unknown/pending/queued) of a transaction -// identified by their hashes. -func (p *BlobPool) Status(hash common.Hash) txpool.TxStatus { - if p.Has(hash) { - return txpool.TxStatusPending - } - return txpool.TxStatusUnknown -} diff --git a/core/txpool/blobpool/blobpool_test.go b/core/txpool/blobpool/blobpool_test.go deleted file mode 100644 index 07422590c8..0000000000 --- a/core/txpool/blobpool/blobpool_test.go +++ /dev/null @@ -1,1273 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "bytes" - "crypto/ecdsa" - "crypto/sha256" - "errors" - "math" - "math/big" - "os" - "path/filepath" - "sync" - "testing" - "time" - - "github.com/ava-labs/subnet-evm/commontype" - "github.com/ava-labs/subnet-evm/consensus/dummy" - "github.com/ava-labs/subnet-evm/consensus/misc/eip4844" - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/params" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/ethdb/memorydb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - "github.com/holiman/billy" - "github.com/holiman/uint256" -) - -var ( - emptyBlob = kzg4844.Blob{} - emptyBlobCommit, _ = kzg4844.BlobToCommitment(emptyBlob) - emptyBlobProof, _ = kzg4844.ComputeBlobProof(emptyBlob, emptyBlobCommit) - emptyBlobVHash = blobHash(emptyBlobCommit) -) - -func blobHash(commit kzg4844.Commitment) common.Hash { - hasher := sha256.New() - hasher.Write(commit[:]) - hash := hasher.Sum(nil) - - var vhash common.Hash - vhash[0] = params.BlobTxHashVersion - copy(vhash[1:], hash[1:]) - - return vhash -} - -// Chain configuration with Cancun enabled. -// -// TODO(karalabe): replace with params.MainnetChainConfig after Cancun. -var testChainConfig *params.ChainConfig - -func init() { - testChainConfig = new(params.ChainConfig) - *testChainConfig = *params.TestChainConfig - testChainConfig.FeeConfig.MinBaseFee = new(big.Int).SetUint64(1) - - testChainConfig.CancunTime = new(uint64) - *testChainConfig.CancunTime = uint64(time.Now().Unix()) -} - -// testBlockChain is a mock of the live chain for testing the pool. -type testBlockChain struct { - config *params.ChainConfig - basefee *uint256.Int - blobfee *uint256.Int - statedb *state.StateDB -} - -func (bc *testBlockChain) Config() *params.ChainConfig { - return bc.config -} - -func (bc *testBlockChain) CurrentBlock() *types.Header { - // Yolo, life is too short to invert mist.CalcBaseFee and misc.CalcBlobFee, - // just binary search it them. - - // The base fee at 5714 ETH translates into the 21000 base gas higher than - // mainnet ether existence, use that as a cap for the tests. - var ( - blockNumber = big.NewInt(1) // Note: London fork is not based on number in Avalanche - blockTime = *bc.config.CancunTime + 1 - gasLimit = uint64(30_000_000) - ) - lo := new(big.Int) - hi := new(big.Int).Mul(big.NewInt(5714), new(big.Int).Exp(big.NewInt(10), big.NewInt(18), nil)) - - for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 { - mid := new(big.Int).Add(lo, hi) - mid.Div(mid, big.NewInt(2)) - parent := &types.Header{ - Number: blockNumber, - Time: blockTime, - GasLimit: gasLimit, - GasUsed: 0, - BaseFee: mid, - Extra: make([]byte, params.DynamicFeeExtraDataSize), - } - _, baseFee, err := dummy.CalcBaseFee( - bc.config, bc.config.FeeConfig, parent, blockTime, - ) - if err != nil { - panic(err) - } - if baseFee.Cmp(bc.basefee.ToBig()) > 0 { - hi = mid - } else { - lo = mid - } - } - baseFee := lo - - // The excess blob gas at 2^27 translates into a blob fee higher than mainnet - // ether existence, use that as a cap for the tests. - lo = new(big.Int) - hi = new(big.Int).Exp(big.NewInt(2), big.NewInt(27), nil) - - for new(big.Int).Add(lo, big.NewInt(1)).Cmp(hi) != 0 { - mid := new(big.Int).Add(lo, hi) - mid.Div(mid, big.NewInt(2)) - - if eip4844.CalcBlobFee(mid.Uint64()).Cmp(bc.blobfee.ToBig()) > 0 { - hi = mid - } else { - lo = mid - } - } - excessBlobGas := lo.Uint64() - - return &types.Header{ - Number: blockNumber, - Time: blockTime, - GasLimit: gasLimit, - BaseFee: baseFee, - ExcessBlobGas: &excessBlobGas, - Extra: make([]byte, params.DynamicFeeExtraDataSize), - } -} - -func (bc *testBlockChain) CurrentFinalBlock() *types.Header { - return &types.Header{ - Number: big.NewInt(0), - } -} - -func (bt *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { - return nil -} - -func (bc *testBlockChain) StateAt(common.Hash) (*state.StateDB, error) { - return bc.statedb, nil -} - -func (bc *testBlockChain) GetFeeConfigAt(header *types.Header) (commontype.FeeConfig, *big.Int, error) { - return bc.config.FeeConfig, nil, nil -} - -// makeAddressReserver is a utility method to sanity check that accounts are -// properly reserved by the blobpool (no duplicate reserves or unreserves). -func makeAddressReserver() txpool.AddressReserver { - var ( - reserved = make(map[common.Address]struct{}) - lock sync.Mutex - ) - return func(addr common.Address, reserve bool) error { - lock.Lock() - defer lock.Unlock() - - _, exists := reserved[addr] - if reserve { - if exists { - panic("already reserved") - } - reserved[addr] = struct{}{} - return nil - } - if !exists { - panic("not reserved") - } - delete(reserved, addr) - return nil - } -} - -// makeTx is a utility method to construct a random blob transaction and sign it -// with a valid key, only setting the interesting fields from the perspective of -// the blob pool. -func makeTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64, key *ecdsa.PrivateKey) *types.Transaction { - tx, _ := types.SignNewTx(key, types.LatestSigner(testChainConfig), makeUnsignedTx(nonce, gasTipCap, gasFeeCap, blobFeeCap)) - return tx -} - -// makeUnsignedTx is a utility method to construct a random blob tranasaction -// without signing it. -func makeUnsignedTx(nonce uint64, gasTipCap uint64, gasFeeCap uint64, blobFeeCap uint64) *types.BlobTx { - return &types.BlobTx{ - ChainID: uint256.MustFromBig(testChainConfig.ChainID), - Nonce: nonce, - GasTipCap: uint256.NewInt(gasTipCap), - GasFeeCap: uint256.NewInt(gasFeeCap), - Gas: 21000, - BlobFeeCap: uint256.NewInt(blobFeeCap), - BlobHashes: []common.Hash{emptyBlobVHash}, - Value: uint256.NewInt(100), - } -} - -// verifyPoolInternals iterates over all the transactions in the pool and checks -// that sort orders, calculated fields, cumulated fields are correct. -func verifyPoolInternals(t *testing.T, pool *BlobPool) { - // Mark this method as a helper to remove from stack traces - t.Helper() - - // Verify that all items in the index are present in the lookup and nothing more - seen := make(map[common.Hash]struct{}) - for addr, txs := range pool.index { - for _, tx := range txs { - if _, ok := seen[tx.hash]; ok { - t.Errorf("duplicate hash #%x in transaction index: address %s, nonce %d", tx.hash, addr, tx.nonce) - } - seen[tx.hash] = struct{}{} - } - } - for hash, id := range pool.lookup { - if _, ok := seen[hash]; !ok { - t.Errorf("lookup entry missing from transaction index: hash #%x, id %d", hash, id) - } - delete(seen, hash) - } - for hash := range seen { - t.Errorf("indexed transaction hash #%x missing from lookup table", hash) - } - // Verify that transactions are sorted per account and contain no nonce gaps - for addr, txs := range pool.index { - for i := 1; i < len(txs); i++ { - if txs[i].nonce != txs[i-1].nonce+1 { - t.Errorf("addr %v, tx %d nonce mismatch: have %d, want %d", addr, i, txs[i].nonce, txs[i-1].nonce+1) - } - } - } - // Verify that calculated evacuation thresholds are correct - for addr, txs := range pool.index { - if !txs[0].evictionExecTip.Eq(txs[0].execTipCap) { - t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, 0, txs[0].evictionExecTip, txs[0].execTipCap) - } - if math.Abs(txs[0].evictionExecFeeJumps-txs[0].basefeeJumps) > 0.001 { - t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionExecFeeJumps, txs[0].basefeeJumps) - } - if math.Abs(txs[0].evictionBlobFeeJumps-txs[0].blobfeeJumps) > 0.001 { - t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, 0, txs[0].evictionBlobFeeJumps, txs[0].blobfeeJumps) - } - for i := 1; i < len(txs); i++ { - wantExecTip := txs[i-1].evictionExecTip - if wantExecTip.Gt(txs[i].execTipCap) { - wantExecTip = txs[i].execTipCap - } - if !txs[i].evictionExecTip.Eq(wantExecTip) { - t.Errorf("addr %v, tx %d eviction execution tip mismatch: have %d, want %d", addr, i, txs[i].evictionExecTip, wantExecTip) - } - - wantExecFeeJumps := txs[i-1].evictionExecFeeJumps - if wantExecFeeJumps > txs[i].basefeeJumps { - wantExecFeeJumps = txs[i].basefeeJumps - } - if math.Abs(txs[i].evictionExecFeeJumps-wantExecFeeJumps) > 0.001 { - t.Errorf("addr %v, tx %d eviction execution fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionExecFeeJumps, wantExecFeeJumps) - } - - wantBlobFeeJumps := txs[i-1].evictionBlobFeeJumps - if wantBlobFeeJumps > txs[i].blobfeeJumps { - wantBlobFeeJumps = txs[i].blobfeeJumps - } - if math.Abs(txs[i].evictionBlobFeeJumps-wantBlobFeeJumps) > 0.001 { - t.Errorf("addr %v, tx %d eviction blob fee jumps mismatch: have %f, want %f", addr, i, txs[i].evictionBlobFeeJumps, wantBlobFeeJumps) - } - } - } - // Verify that account balance accumulations are correct - for addr, txs := range pool.index { - spent := new(uint256.Int) - for _, tx := range txs { - spent.Add(spent, tx.costCap) - } - if !pool.spent[addr].Eq(spent) { - t.Errorf("addr %v expenditure mismatch: have %d, want %d", addr, pool.spent[addr], spent) - } - } - // Verify that pool storage size is correct - var stored uint64 - for _, txs := range pool.index { - for _, tx := range txs { - stored += uint64(tx.size) - } - } - if pool.stored != stored { - t.Errorf("pool storage mismatch: have %d, want %d", pool.stored, stored) - } - // Verify the price heap internals - verifyHeapInternals(t, pool.evict) -} - -// Tests that transactions can be loaded from disk on startup and that they are -// correctly discarded if invalid. -// -// - 1. A transaction that cannot be decoded must be dropped -// - 2. A transaction that cannot be recovered (bad signature) must be dropped -// - 3. All transactions after a nonce gap must be dropped -// - 4. All transactions after an underpriced one (including it) must be dropped -func TestOpenDrops(t *testing.T) { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - - // Create a temporary folder for the persistent backend - storage, _ := os.MkdirTemp("", "blobpool-") - defer os.RemoveAll(storage) - - os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) - - // Insert a malformed transaction to verify that decoding errors (or format - // changes) are handled gracefully (case 1) - malformed, _ := store.Put([]byte("this is a badly encoded transaction")) - - // Insert a transaction with a bad signature to verify that stale junk after - // potential hard-forks can get evicted (case 2) - tx := types.NewTx(&types.BlobTx{ - ChainID: uint256.MustFromBig(testChainConfig.ChainID), - GasTipCap: new(uint256.Int), - GasFeeCap: new(uint256.Int), - Gas: 0, - Value: new(uint256.Int), - Data: nil, - BlobFeeCap: new(uint256.Int), - V: new(uint256.Int), - R: new(uint256.Int), - S: new(uint256.Int), - }) - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - badsig, _ := store.Put(blob) - - // Insert a sequence of transactions with a nonce gap in between to verify - // that anything gapped will get evicted (case 3) - var ( - gapper, _ = crypto.GenerateKey() - - valids = make(map[uint64]struct{}) - gapped = make(map[uint64]struct{}) - ) - for _, nonce := range []uint64{0, 1, 3, 4, 6, 7} { // first gap at #2, another at #5 - tx := makeTx(nonce, 1, 1, 1, gapper) - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - - id, _ := store.Put(blob) - if nonce < 2 { - valids[id] = struct{}{} - } else { - gapped[id] = struct{}{} - } - } - // Insert a sequence of transactions with a gapped starting nonce to verify - // that the entire set will get dropped. - var ( - dangler, _ = crypto.GenerateKey() - dangling = make(map[uint64]struct{}) - ) - for _, nonce := range []uint64{1, 2, 3} { // first gap at #0, all set dangling - tx := makeTx(nonce, 1, 1, 1, dangler) - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - - id, _ := store.Put(blob) - dangling[id] = struct{}{} - } - // Insert a sequence of transactions with already passed nonces to veirfy - // that the entire set will get dropped. - var ( - filler, _ = crypto.GenerateKey() - filled = make(map[uint64]struct{}) - ) - for _, nonce := range []uint64{0, 1, 2} { // account nonce at 3, all set filled - tx := makeTx(nonce, 1, 1, 1, filler) - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - - id, _ := store.Put(blob) - filled[id] = struct{}{} - } - // Insert a sequence of transactions with partially passed nonces to veirfy - // that the included part of the set will get dropped - var ( - overlapper, _ = crypto.GenerateKey() - overlapped = make(map[uint64]struct{}) - ) - for _, nonce := range []uint64{0, 1, 2, 3} { // account nonce at 2, half filled - tx := makeTx(nonce, 1, 1, 1, overlapper) - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - - id, _ := store.Put(blob) - if nonce >= 2 { - valids[id] = struct{}{} - } else { - overlapped[id] = struct{}{} - } - } - // Insert a sequence of transactions with an underpriced first to verify that - // the entire set will get dropped (case 4). - var ( - underpayer, _ = crypto.GenerateKey() - underpaid = make(map[uint64]struct{}) - ) - for i := 0; i < 5; i++ { // make #0 underpriced - var tx *types.Transaction - if i == 0 { - tx = makeTx(uint64(i), 0, 0, 0, underpayer) - } else { - tx = makeTx(uint64(i), 1, 1, 1, underpayer) - } - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - - id, _ := store.Put(blob) - underpaid[id] = struct{}{} - } - - // Insert a sequence of transactions with an underpriced in between to verify - // that it and anything newly gapped will get evicted (case 4). - var ( - outpricer, _ = crypto.GenerateKey() - outpriced = make(map[uint64]struct{}) - ) - for i := 0; i < 5; i++ { // make #2 underpriced - var tx *types.Transaction - if i == 2 { - tx = makeTx(uint64(i), 0, 0, 0, outpricer) - } else { - tx = makeTx(uint64(i), 1, 1, 1, outpricer) - } - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - - id, _ := store.Put(blob) - if i < 2 { - valids[id] = struct{}{} - } else { - outpriced[id] = struct{}{} - } - } - // Insert a sequence of transactions fully overdrafted to verify that the - // entire set will get invalidated. - var ( - exceeder, _ = crypto.GenerateKey() - exceeded = make(map[uint64]struct{}) - ) - for _, nonce := range []uint64{0, 1, 2} { // nonce 0 overdrafts the account - var tx *types.Transaction - if nonce == 0 { - tx = makeTx(nonce, 1, 100, 1, exceeder) - } else { - tx = makeTx(nonce, 1, 1, 1, exceeder) - } - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - - id, _ := store.Put(blob) - exceeded[id] = struct{}{} - } - // Insert a sequence of transactions partially overdrafted to verify that part - // of the set will get invalidated. - var ( - overdrafter, _ = crypto.GenerateKey() - overdrafted = make(map[uint64]struct{}) - ) - for _, nonce := range []uint64{0, 1, 2} { // nonce 1 overdrafts the account - var tx *types.Transaction - if nonce == 1 { - tx = makeTx(nonce, 1, 100, 1, overdrafter) - } else { - tx = makeTx(nonce, 1, 1, 1, overdrafter) - } - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - - id, _ := store.Put(blob) - if nonce < 1 { - valids[id] = struct{}{} - } else { - overdrafted[id] = struct{}{} - } - } - // Insert a sequence of transactions overflowing the account cap to verify - // that part of the set will get invalidated. - var ( - overcapper, _ = crypto.GenerateKey() - overcapped = make(map[uint64]struct{}) - ) - for nonce := uint64(0); nonce < maxTxsPerAccount+3; nonce++ { - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: makeTx(nonce, 1, 1, 1, overcapper)}) - - id, _ := store.Put(blob) - if nonce < maxTxsPerAccount { - valids[id] = struct{}{} - } else { - overcapped[id] = struct{}{} - } - } - store.Close() - - // Create a blob pool out of the pre-seeded data - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) - statedb.AddBalance(crypto.PubkeyToAddress(gapper.PublicKey), big.NewInt(1000000)) - statedb.AddBalance(crypto.PubkeyToAddress(dangler.PublicKey), big.NewInt(1000000)) - statedb.AddBalance(crypto.PubkeyToAddress(filler.PublicKey), big.NewInt(1000000)) - statedb.SetNonce(crypto.PubkeyToAddress(filler.PublicKey), 3) - statedb.AddBalance(crypto.PubkeyToAddress(overlapper.PublicKey), big.NewInt(1000000)) - statedb.SetNonce(crypto.PubkeyToAddress(overlapper.PublicKey), 2) - statedb.AddBalance(crypto.PubkeyToAddress(underpayer.PublicKey), big.NewInt(1000000)) - statedb.AddBalance(crypto.PubkeyToAddress(outpricer.PublicKey), big.NewInt(1000000)) - statedb.AddBalance(crypto.PubkeyToAddress(exceeder.PublicKey), big.NewInt(1000000)) - statedb.AddBalance(crypto.PubkeyToAddress(overdrafter.PublicKey), big.NewInt(1000000)) - statedb.AddBalance(crypto.PubkeyToAddress(overcapper.PublicKey), big.NewInt(10000000)) - statedb.Commit(0, true, false) - - chain := &testBlockChain{ - config: testChainConfig, - basefee: uint256.NewInt(uint64(params.TestInitialBaseFee)), - blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice), - statedb: statedb, - } - pool := New(Config{Datadir: storage}, chain) - if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { - t.Fatalf("failed to create blob pool: %v", err) - } - defer pool.Close() - - // Verify that the malformed (case 1), badly signed (case 2) and gapped (case - // 3) txs have been deleted from the pool - alive := make(map[uint64]struct{}) - for _, txs := range pool.index { - for _, tx := range txs { - switch tx.id { - case malformed: - t.Errorf("malformed RLP transaction remained in storage") - case badsig: - t.Errorf("invalidly signed transaction remained in storage") - default: - if _, ok := dangling[tx.id]; ok { - t.Errorf("dangling transaction remained in storage: %d", tx.id) - } else if _, ok := filled[tx.id]; ok { - t.Errorf("filled transaction remained in storage: %d", tx.id) - } else if _, ok := overlapped[tx.id]; ok { - t.Errorf("overlapped transaction remained in storage: %d", tx.id) - } else if _, ok := gapped[tx.id]; ok { - t.Errorf("gapped transaction remained in storage: %d", tx.id) - } else if _, ok := underpaid[tx.id]; ok { - t.Errorf("underpaid transaction remained in storage: %d", tx.id) - } else if _, ok := outpriced[tx.id]; ok { - t.Errorf("outpriced transaction remained in storage: %d", tx.id) - } else if _, ok := exceeded[tx.id]; ok { - t.Errorf("fully overdrafted transaction remained in storage: %d", tx.id) - } else if _, ok := overdrafted[tx.id]; ok { - t.Errorf("partially overdrafted transaction remained in storage: %d", tx.id) - } else if _, ok := overcapped[tx.id]; ok { - t.Errorf("overcapped transaction remained in storage: %d", tx.id) - } else { - alive[tx.id] = struct{}{} - } - } - } - } - // Verify that the rest of the transactions remained alive - if len(alive) != len(valids) { - t.Errorf("valid transaction count mismatch: have %d, want %d", len(alive), len(valids)) - } - for id := range alive { - if _, ok := valids[id]; !ok { - t.Errorf("extra transaction %d", id) - } - } - for id := range valids { - if _, ok := alive[id]; !ok { - t.Errorf("missing transaction %d", id) - } - } - // Verify all the calculated pool internals. Interestingly, this is **not** - // a duplication of the above checks, this actually validates the verifier - // using the above already hard coded checks. - // - // Do not remove this, nor alter the above to be generic. - verifyPoolInternals(t, pool) -} - -// Tests that transactions loaded from disk are indexed corrently. -// -// - 1. Transactions must be groupped by sender, sorted by nonce -// - 2. Eviction thresholds are calculated correctly for the sequences -// - 3. Balance usage of an account is totals across all transactions -func TestOpenIndex(t *testing.T) { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - - // Create a temporary folder for the persistent backend - storage, _ := os.MkdirTemp("", "blobpool-") - defer os.RemoveAll(storage) - - os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) - - // Insert a sequence of transactions with varying price points to check that - // the cumulative minimumw will be maintained. - var ( - key, _ = crypto.GenerateKey() - addr = crypto.PubkeyToAddress(key.PublicKey) - - txExecTipCaps = []uint64{10, 25, 5, 7, 1, 100} - txExecFeeCaps = []uint64{100, 90, 200, 10, 80, 300} - txBlobFeeCaps = []uint64{55, 66, 77, 33, 22, 11} - - //basefeeJumps = []float64{39.098, 38.204, 44.983, 19.549, 37.204, 48.426} // log 1.125 (exec fee cap) - //blobfeeJumps = []float64{34.023, 35.570, 36.879, 29.686, 26.243, 20.358} // log 1.125 (blob fee cap) - - evictExecTipCaps = []uint64{10, 10, 5, 5, 1, 1} - evictExecFeeJumps = []float64{39.098, 38.204, 38.204, 19.549, 19.549, 19.549} // min(log 1.125 (exec fee cap)) - evictBlobFeeJumps = []float64{34.023, 34.023, 34.023, 29.686, 26.243, 20.358} // min(log 1.125 (blob fee cap)) - - totalSpent = uint256.NewInt(21000*(100+90+200+10+80+300) + blobSize*(55+66+77+33+22+11) + 100*6) // 21000 gas x price + 128KB x blobprice + value - ) - for _, i := range []int{5, 3, 4, 2, 0, 1} { // Randomize the tx insertion order to force sorting on load - tx := makeTx(uint64(i), txExecTipCaps[i], txExecFeeCaps[i], txBlobFeeCaps[i], key) - blob, _ := rlp.EncodeToBytes(&blobTx{Tx: tx}) - store.Put(blob) - } - store.Close() - - // Create a blob pool out of the pre-seeded data - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) - statedb.AddBalance(addr, big.NewInt(1_000_000_000)) - statedb.Commit(0, true, false) - - chain := &testBlockChain{ - config: testChainConfig, - basefee: uint256.NewInt(uint64(params.TestInitialBaseFee)), - blobfee: uint256.NewInt(params.BlobTxMinBlobGasprice), - statedb: statedb, - } - pool := New(Config{Datadir: storage}, chain) - if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { - t.Fatalf("failed to create blob pool: %v", err) - } - defer pool.Close() - - // Verify that the transactions have been sorted by nonce (case 1) - for i := 0; i < len(pool.index[addr]); i++ { - if pool.index[addr][i].nonce != uint64(i) { - t.Errorf("tx %d nonce mismatch: have %d, want %d", i, pool.index[addr][i].nonce, uint64(i)) - } - } - // Verify that the cumulative fee minimums have been correctly calculated (case 2) - for i, cap := range evictExecTipCaps { - if !pool.index[addr][i].evictionExecTip.Eq(uint256.NewInt(cap)) { - t.Errorf("eviction tip cap %d mismatch: have %d, want %d", i, pool.index[addr][i].evictionExecTip, cap) - } - } - for i, jumps := range evictExecFeeJumps { - if math.Abs(pool.index[addr][i].evictionExecFeeJumps-jumps) > 0.001 { - t.Errorf("eviction fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionExecFeeJumps, jumps) - } - } - for i, jumps := range evictBlobFeeJumps { - if math.Abs(pool.index[addr][i].evictionBlobFeeJumps-jumps) > 0.001 { - t.Errorf("eviction blob fee cap jumps %d mismatch: have %f, want %f", i, pool.index[addr][i].evictionBlobFeeJumps, jumps) - } - } - // Verify that the balance usage has been correctly calculated (case 3) - if !pool.spent[addr].Eq(totalSpent) { - t.Errorf("expenditure mismatch: have %d, want %d", pool.spent[addr], totalSpent) - } - // Verify all the calculated pool internals. Interestingly, this is **not** - // a duplication of the above checks, this actually validates the verifier - // using the above already hard coded checks. - // - // Do not remove this, nor alter the above to be generic. - verifyPoolInternals(t, pool) -} - -// Tests that after indexing all the loaded transactions from disk, a price heap -// is correctly constructed based on the head basefee and blobfee. -func TestOpenHeap(t *testing.T) { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - - // Create a temporary folder for the persistent backend - storage, _ := os.MkdirTemp("", "blobpool-") - defer os.RemoveAll(storage) - - os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) - - // Insert a few transactions from a few accounts. To remove randomness from - // the heap initialization, use a deterministic account/tx/priority ordering. - var ( - key1, _ = crypto.GenerateKey() - key2, _ = crypto.GenerateKey() - key3, _ = crypto.GenerateKey() - - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - addr3 = crypto.PubkeyToAddress(key3.PublicKey) - ) - if bytes.Compare(addr1[:], addr2[:]) > 0 { - key1, addr1, key2, addr2 = key2, addr2, key1, addr1 - } - if bytes.Compare(addr1[:], addr3[:]) > 0 { - key1, addr1, key3, addr3 = key3, addr3, key1, addr1 - } - if bytes.Compare(addr2[:], addr3[:]) > 0 { - key2, addr2, key3, addr3 = key3, addr3, key2, addr2 - } - var ( - tx1 = makeTx(0, 1, 1000, 90, key1) - tx2 = makeTx(0, 1, 800, 70, key2) - tx3 = makeTx(0, 1, 1500, 110, key3) - - blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1}) - blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2}) - blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3}) - - heapOrder = []common.Address{addr2, addr1, addr3} - heapIndex = map[common.Address]int{addr2: 0, addr1: 1, addr3: 2} - ) - store.Put(blob1) - store.Put(blob2) - store.Put(blob3) - store.Close() - - // Create a blob pool out of the pre-seeded data - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) - statedb.AddBalance(addr1, big.NewInt(1_000_000_000)) - statedb.AddBalance(addr2, big.NewInt(1_000_000_000)) - statedb.AddBalance(addr3, big.NewInt(1_000_000_000)) - statedb.Commit(0, true, false) - - chain := &testBlockChain{ - config: testChainConfig, - basefee: uint256.NewInt(1050), - blobfee: uint256.NewInt(105), - statedb: statedb, - } - pool := New(Config{Datadir: storage}, chain) - if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { - t.Fatalf("failed to create blob pool: %v", err) - } - defer pool.Close() - - // Verify that the heap's internal state matches the expectations - for i, addr := range pool.evict.addrs { - if addr != heapOrder[i] { - t.Errorf("slot %d mismatch: have %v, want %v", i, addr, heapOrder[i]) - } - } - for addr, i := range pool.evict.index { - if i != heapIndex[addr] { - t.Errorf("index for %v mismatch: have %d, want %d", addr, i, heapIndex[addr]) - } - } - // Verify all the calculated pool internals. Interestingly, this is **not** - // a duplication of the above checks, this actually validates the verifier - // using the above already hard coded checks. - // - // Do not remove this, nor alter the above to be generic. - verifyPoolInternals(t, pool) -} - -// Tests that after the pool's previous state is loaded back, any transactions -// over the new storage cap will get dropped. -func TestOpenCap(t *testing.T) { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - - // Create a temporary folder for the persistent backend - storage, _ := os.MkdirTemp("", "blobpool-") - defer os.RemoveAll(storage) - - os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) - - // Insert a few transactions from a few accounts - var ( - key1, _ = crypto.GenerateKey() - key2, _ = crypto.GenerateKey() - key3, _ = crypto.GenerateKey() - - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - addr2 = crypto.PubkeyToAddress(key2.PublicKey) - addr3 = crypto.PubkeyToAddress(key3.PublicKey) - - tx1 = makeTx(0, 1, 1000, 100, key1) - tx2 = makeTx(0, 1, 800, 70, key2) - tx3 = makeTx(0, 1, 1500, 110, key3) - - blob1, _ = rlp.EncodeToBytes(&blobTx{Tx: tx1, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}}) - blob2, _ = rlp.EncodeToBytes(&blobTx{Tx: tx2, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}}) - blob3, _ = rlp.EncodeToBytes(&blobTx{Tx: tx3, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}}) - - keep = []common.Address{addr1, addr3} - drop = []common.Address{addr2} - size = uint64(2 * (txAvgSize + blobSize)) - ) - store.Put(blob1) - store.Put(blob2) - store.Put(blob3) - store.Close() - - // Verify pool capping twice: first by reducing the data cap, then restarting - // with a high cap to ensure everything was persisted previously - for _, datacap := range []uint64{2 * (txAvgSize + blobSize), 100 * (txAvgSize + blobSize)} { - // Create a blob pool out of the pre-seeded data, but cap it to 2 blob transaction - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) - statedb.AddBalance(addr1, big.NewInt(1_000_000_000)) - statedb.AddBalance(addr2, big.NewInt(1_000_000_000)) - statedb.AddBalance(addr3, big.NewInt(1_000_000_000)) - statedb.Commit(0, true, false) - - chain := &testBlockChain{ - config: testChainConfig, - basefee: uint256.NewInt(1050), - blobfee: uint256.NewInt(105), - statedb: statedb, - } - pool := New(Config{Datadir: storage, Datacap: datacap}, chain) - if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { - t.Fatalf("failed to create blob pool: %v", err) - } - // Verify that enough transactions have been dropped to get the pool's size - // under the requested limit - if len(pool.index) != len(keep) { - t.Errorf("tracked account count mismatch: have %d, want %d", len(pool.index), len(keep)) - } - for _, addr := range keep { - if _, ok := pool.index[addr]; !ok { - t.Errorf("expected account %v missing from pool", addr) - } - } - for _, addr := range drop { - if _, ok := pool.index[addr]; ok { - t.Errorf("unexpected account %v present in pool", addr) - } - } - if pool.stored != size { - t.Errorf("pool stored size mismatch: have %v, want %v", pool.stored, size) - } - // Verify all the calculated pool internals. Interestingly, this is **not** - // a duplication of the above checks, this actually validates the verifier - // using the above already hard coded checks. - // - // Do not remove this, nor alter the above to be generic. - verifyPoolInternals(t, pool) - - pool.Close() - } -} - -// Tests that adding transaction will correctly store it in the persistent store -// and update all the indices. -// -// Note, this tests mostly checks the pool transaction shuffling logic or things -// specific to the blob pool. It does not do an exhaustive transaction validity -// check. -func TestAdd(t *testing.T) { - log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - - // seed is a helper tumpe to seed an initial state db and pool - type seed struct { - balance uint64 - nonce uint64 - txs []*types.BlobTx - } - - // addtx is a helper sender/tx tuple to represent a new tx addition - type addtx struct { - from string - tx *types.BlobTx - err error - } - - tests := []struct { - seeds map[string]seed - adds []addtx - }{ - // Transactions from new accounts should be accepted if their initial - // nonce matches the expected one from the statedb. Higher or lower must - // be rejected. - { - seeds: map[string]seed{ - "alice": {balance: 21100 + blobSize}, - "bob": {balance: 21100 + blobSize, nonce: 1}, - "claire": {balance: 21100 + blobSize}, - "dave": {balance: 21100 + blobSize, nonce: 1}, - }, - adds: []addtx{ - { // New account, no previous txs: accept nonce 0 - from: "alice", - tx: makeUnsignedTx(0, 1, 1, 1), - err: nil, - }, - { // Old account, 1 tx in chain, 0 pending: accept nonce 1 - from: "bob", - tx: makeUnsignedTx(1, 1, 1, 1), - err: nil, - }, - { // New account, no previous txs: reject nonce 1 - from: "claire", - tx: makeUnsignedTx(1, 1, 1, 1), - err: core.ErrNonceTooHigh, - }, - { // Old account, 1 tx in chain, 0 pending: reject nonce 0 - from: "dave", - tx: makeUnsignedTx(0, 1, 1, 1), - err: core.ErrNonceTooLow, - }, - { // Old account, 1 tx in chain, 0 pending: reject nonce 2 - from: "dave", - tx: makeUnsignedTx(2, 1, 1, 1), - err: core.ErrNonceTooHigh, - }, - }, - }, - // Transactions from already pooled accounts should only be accepted if - // the nonces are contiguous (ignore prices for now, will check later) - { - seeds: map[string]seed{ - "alice": { - balance: 1000000, - txs: []*types.BlobTx{ - makeUnsignedTx(0, 1, 1, 1), - }, - }, - "bob": { - balance: 1000000, - nonce: 1, - txs: []*types.BlobTx{ - makeUnsignedTx(1, 1, 1, 1), - }, - }, - }, - adds: []addtx{ - { // New account, 1 tx pending: reject replacement nonce 0 (ignore price for now) - from: "alice", - tx: makeUnsignedTx(0, 1, 1, 1), - err: txpool.ErrReplaceUnderpriced, - }, - { // New account, 1 tx pending: accept nonce 1 - from: "alice", - tx: makeUnsignedTx(1, 1, 1, 1), - err: nil, - }, - { // New account, 2 txs pending: reject nonce 3 - from: "alice", - tx: makeUnsignedTx(3, 1, 1, 1), - err: core.ErrNonceTooHigh, - }, - { // New account, 2 txs pending: accept nonce 2 - from: "alice", - tx: makeUnsignedTx(2, 1, 1, 1), - err: nil, - }, - { // New account, 3 txs pending: accept nonce 3 now - from: "alice", - tx: makeUnsignedTx(3, 1, 1, 1), - err: nil, - }, - { // Old account, 1 tx in chain, 1 tx pending: reject replacement nonce 1 (ignore price for now) - from: "bob", - tx: makeUnsignedTx(1, 1, 1, 1), - err: txpool.ErrReplaceUnderpriced, - }, - { // Old account, 1 tx in chain, 1 tx pending: accept nonce 2 (ignore price for now) - from: "bob", - tx: makeUnsignedTx(2, 1, 1, 1), - err: nil, - }, - }, - }, - // Transactions should only be accepted into the pool if the cumulative - // expenditure doesn't overflow the account balance - { - seeds: map[string]seed{ - "alice": {balance: 63299 + 3*blobSize}, // 3 tx - 1 wei - }, - adds: []addtx{ - { // New account, no previous txs: accept nonce 0 with 21100 wei spend - from: "alice", - tx: makeUnsignedTx(0, 1, 1, 1), - err: nil, - }, - { // New account, 1 pooled tx with 21100 wei spent: accept nonce 1 with 21100 wei spend - from: "alice", - tx: makeUnsignedTx(1, 1, 1, 1), - err: nil, - }, - { // New account, 2 pooled tx with 42200 wei spent: reject nonce 2 with 21100 wei spend (1 wei overflow) - from: "alice", - tx: makeUnsignedTx(2, 1, 1, 1), - err: core.ErrInsufficientFunds, - }, - }, - }, - // Transactions should only be accepted into the pool if the total count - // from the same account doesn't overflow the pool limits - { - seeds: map[string]seed{ - "alice": {balance: 10000000}, - }, - adds: []addtx{ - { // New account, no previous txs, 16 slots left: accept nonce 0 - from: "alice", - tx: makeUnsignedTx(0, 1, 1, 1), - err: nil, - }, - { // New account, 1 pooled tx, 15 slots left: accept nonce 1 - from: "alice", - tx: makeUnsignedTx(1, 1, 1, 1), - err: nil, - }, - { // New account, 2 pooled tx, 14 slots left: accept nonce 2 - from: "alice", - tx: makeUnsignedTx(2, 1, 1, 1), - err: nil, - }, - { // New account, 3 pooled tx, 13 slots left: accept nonce 3 - from: "alice", - tx: makeUnsignedTx(3, 1, 1, 1), - err: nil, - }, - { // New account, 4 pooled tx, 12 slots left: accept nonce 4 - from: "alice", - tx: makeUnsignedTx(4, 1, 1, 1), - err: nil, - }, - { // New account, 5 pooled tx, 11 slots left: accept nonce 5 - from: "alice", - tx: makeUnsignedTx(5, 1, 1, 1), - err: nil, - }, - { // New account, 6 pooled tx, 10 slots left: accept nonce 6 - from: "alice", - tx: makeUnsignedTx(6, 1, 1, 1), - err: nil, - }, - { // New account, 7 pooled tx, 9 slots left: accept nonce 7 - from: "alice", - tx: makeUnsignedTx(7, 1, 1, 1), - err: nil, - }, - { // New account, 8 pooled tx, 8 slots left: accept nonce 8 - from: "alice", - tx: makeUnsignedTx(8, 1, 1, 1), - err: nil, - }, - { // New account, 9 pooled tx, 7 slots left: accept nonce 9 - from: "alice", - tx: makeUnsignedTx(9, 1, 1, 1), - err: nil, - }, - { // New account, 10 pooled tx, 6 slots left: accept nonce 10 - from: "alice", - tx: makeUnsignedTx(10, 1, 1, 1), - err: nil, - }, - { // New account, 11 pooled tx, 5 slots left: accept nonce 11 - from: "alice", - tx: makeUnsignedTx(11, 1, 1, 1), - err: nil, - }, - { // New account, 12 pooled tx, 4 slots left: accept nonce 12 - from: "alice", - tx: makeUnsignedTx(12, 1, 1, 1), - err: nil, - }, - { // New account, 13 pooled tx, 3 slots left: accept nonce 13 - from: "alice", - tx: makeUnsignedTx(13, 1, 1, 1), - err: nil, - }, - { // New account, 14 pooled tx, 2 slots left: accept nonce 14 - from: "alice", - tx: makeUnsignedTx(14, 1, 1, 1), - err: nil, - }, - { // New account, 15 pooled tx, 1 slots left: accept nonce 15 - from: "alice", - tx: makeUnsignedTx(15, 1, 1, 1), - err: nil, - }, - { // New account, 16 pooled tx, 0 slots left: accept nonce 15 replacement - from: "alice", - tx: makeUnsignedTx(15, 10, 10, 10), - err: nil, - }, - { // New account, 16 pooled tx, 0 slots left: reject nonce 16 with overcap - from: "alice", - tx: makeUnsignedTx(16, 1, 1, 1), - err: txpool.ErrAccountLimitExceeded, - }, - }, - }, - // Previously existing transactions should be allowed to be replaced iff - // the new cumulative expenditure can be covered by the account and the - // prices are bumped all around (no percentage check here). - { - seeds: map[string]seed{ - "alice": {balance: 2*100 + 5*21000 + 3*blobSize}, - }, - adds: []addtx{ - { // New account, no previous txs: reject nonce 0 with 341172 wei spend - from: "alice", - tx: makeUnsignedTx(0, 1, 20, 1), - err: core.ErrInsufficientFunds, - }, - { // New account, no previous txs: accept nonce 0 with 173172 wei spend - from: "alice", - tx: makeUnsignedTx(0, 1, 2, 1), - err: nil, - }, - { // New account, 1 pooled tx with 173172 wei spent: accept nonce 1 with 152172 wei spend - from: "alice", - tx: makeUnsignedTx(1, 1, 1, 1), - err: nil, - }, - { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with 599684 wei spend (173072 extra) (would overflow balance at nonce 1) - from: "alice", - tx: makeUnsignedTx(0, 2, 5, 2), - err: core.ErrInsufficientFunds, - }, - { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gastip-bump - from: "alice", - tx: makeUnsignedTx(0, 1, 3, 2), - err: txpool.ErrReplaceUnderpriced, - }, - { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-gascap-bump - from: "alice", - tx: makeUnsignedTx(0, 2, 2, 2), - err: txpool.ErrReplaceUnderpriced, - }, - { // New account, 2 pooled tx with 325344 wei spent: reject nonce 0 with no-blobcap-bump - from: "alice", - tx: makeUnsignedTx(0, 2, 4, 1), - err: txpool.ErrReplaceUnderpriced, - }, - { // New account, 2 pooled tx with 325344 wei spent: accept nonce 0 with 84100 wei spend (42000 extra) - from: "alice", - tx: makeUnsignedTx(0, 2, 4, 2), - err: nil, - }, - }, - }, - // Previously existing transactions should be allowed to be replaced iff - // the new prices are bumped by a sufficient amount. - { - seeds: map[string]seed{ - "alice": {balance: 100 + 8*21000 + 4*blobSize}, - }, - adds: []addtx{ - { // New account, no previous txs: accept nonce 0 - from: "alice", - tx: makeUnsignedTx(0, 2, 4, 2), - err: nil, - }, - { // New account, 1 pooled tx: reject nonce 0 with low-gastip-bump - from: "alice", - tx: makeUnsignedTx(0, 3, 8, 4), - err: txpool.ErrReplaceUnderpriced, - }, - { // New account, 1 pooled tx: reject nonce 0 with low-gascap-bump - from: "alice", - tx: makeUnsignedTx(0, 4, 6, 4), - err: txpool.ErrReplaceUnderpriced, - }, - { // New account, 1 pooled tx: reject nonce 0 with low-blobcap-bump - from: "alice", - tx: makeUnsignedTx(0, 4, 8, 3), - err: txpool.ErrReplaceUnderpriced, - }, - { // New account, 1 pooled tx: accept nonce 0 with all-bumps - from: "alice", - tx: makeUnsignedTx(0, 4, 8, 4), - err: nil, - }, - }, - }, - } - for i, tt := range tests { - // Create a temporary folder for the persistent backend - storage, _ := os.MkdirTemp("", "blobpool-") - defer os.RemoveAll(storage) // late defer, still ok - - os.MkdirAll(filepath.Join(storage, pendingTransactionStore), 0700) - store, _ := billy.Open(billy.Options{Path: filepath.Join(storage, pendingTransactionStore)}, newSlotter(), nil) - - // Insert the seed transactions for the pool startup - var ( - keys = make(map[string]*ecdsa.PrivateKey) - addrs = make(map[string]common.Address) - ) - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewDatabase(memorydb.New())), nil) - for acc, seed := range tt.seeds { - // Generate a new random key/address for the seed account - keys[acc], _ = crypto.GenerateKey() - addrs[acc] = crypto.PubkeyToAddress(keys[acc].PublicKey) - - // Seed the state database with this acocunt - statedb.AddBalance(addrs[acc], new(big.Int).SetUint64(seed.balance)) - statedb.SetNonce(addrs[acc], seed.nonce) - - // Sign the seed transactions and store them in the data store - for _, tx := range seed.txs { - var ( - signed, _ = types.SignNewTx(keys[acc], types.LatestSigner(testChainConfig), tx) - blob, _ = rlp.EncodeToBytes(&blobTx{Tx: signed, Blobs: []kzg4844.Blob{emptyBlob}, Commits: []kzg4844.Commitment{emptyBlobCommit}, Proofs: []kzg4844.Proof{emptyBlobProof}}) - ) - store.Put(blob) - } - } - statedb.Commit(0, true, false) - store.Close() - - // Create a blob pool out of the pre-seeded dats - chain := &testBlockChain{ - config: testChainConfig, - basefee: uint256.NewInt(1050), - blobfee: uint256.NewInt(105), - statedb: statedb, - } - pool := New(Config{Datadir: storage}, chain) - if err := pool.Init(big.NewInt(1), chain.CurrentBlock(), makeAddressReserver()); err != nil { - t.Fatalf("test %d: failed to create blob pool: %v", i, err) - } - verifyPoolInternals(t, pool) - - // Add each transaction one by one, verifying the pool internals in between - for j, add := range tt.adds { - signed, _ := types.SignNewTx(keys[add.from], types.LatestSigner(testChainConfig), add.tx) - if err := pool.add(signed, []kzg4844.Blob{emptyBlob}, []kzg4844.Commitment{emptyBlobCommit}, []kzg4844.Proof{emptyBlobProof}); !errors.Is(err, add.err) { - t.Errorf("test %d, tx %d: adding transaction error mismatch: have %v, want %v", i, j, err, add.err) - } - verifyPoolInternals(t, pool) - } - // Verify the pool internals and close down the test - verifyPoolInternals(t, pool) - pool.Close() - } -} diff --git a/core/txpool/blobpool/config.go b/core/txpool/blobpool/config.go deleted file mode 100644 index 6015b1baf6..0000000000 --- a/core/txpool/blobpool/config.go +++ /dev/null @@ -1,60 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "github.com/ethereum/go-ethereum/log" -) - -// Config are the configuration parameters of the blob transaction pool. -type Config struct { - Datadir string // Data directory containing the currently executable blobs - Datacap uint64 // Soft-cap of database storage (hard cap is larger due to overhead) - PriceBump uint64 // Minimum price bump percentage to replace an already existing nonce -} - -// DefaultConfig contains the default configurations for the transaction pool. -var DefaultConfig = Config{ - Datadir: "blobpool", - Datacap: 10 * 1024 * 1024 * 1024, - PriceBump: 100, // either have patience or be aggressive, no mushy ground -} - -// sanitize checks the provided user configurations and changes anything that's -// unreasonable or unworkable. -func (config *Config) sanitize() Config { - conf := *config - if conf.Datacap < 1 { - log.Warn("Sanitizing invalid blobpool storage cap", "provided", conf.Datacap, "updated", DefaultConfig.Datacap) - conf.Datacap = DefaultConfig.Datacap - } - if conf.PriceBump < 1 { - log.Warn("Sanitizing invalid blobpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) - conf.PriceBump = DefaultConfig.PriceBump - } - return conf -} diff --git a/core/txpool/blobpool/evictheap.go b/core/txpool/blobpool/evictheap.go deleted file mode 100644 index 13e1f1f6ef..0000000000 --- a/core/txpool/blobpool/evictheap.go +++ /dev/null @@ -1,156 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "bytes" - "container/heap" - "math" - "sort" - - "github.com/ethereum/go-ethereum/common" - "github.com/holiman/uint256" -) - -// evictHeap is a helper data structure to keep track of the cheapest bottleneck -// transaction from each account to determine which account to evict from. -// -// The heap internally tracks a slice of cheapest transactions from each account -// and a mapping from addresses to indices for direct removals/udates. -// -// The goal of the heap is to decide which account has the worst bottleneck to -// evict transactions from. -type evictHeap struct { - metas *map[common.Address][]*blobTxMeta // Pointer to the blob pool's index for price retrievals - - basefeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the base fee - blobfeeJumps float64 // Pre-calculated absolute dynamic fee jumps for the blob fee - - addrs []common.Address // Heap of addresses to retrieve the cheapest out of - index map[common.Address]int // Indices into the heap for replacements -} - -// newPriceHeap creates a new heap of cheapets accounts in the blob pool to evict -// from in case of over saturation. -func newPriceHeap(basefee *uint256.Int, blobfee *uint256.Int, index *map[common.Address][]*blobTxMeta) *evictHeap { - heap := &evictHeap{ - metas: index, - index: make(map[common.Address]int), - } - // Populate the heap in account sort order. Not really needed in practice, - // but it makes the heap initialization deterministic and less annoying to - // test in unit tests. - addrs := make([]common.Address, 0, len(*index)) - for addr := range *index { - addrs = append(addrs, addr) - } - sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i][:], addrs[j][:]) < 0 }) - - for _, addr := range addrs { - heap.index[addr] = len(heap.addrs) - heap.addrs = append(heap.addrs, addr) - } - heap.reinit(basefee, blobfee, true) - return heap -} - -// reinit updates the pre-calculated dynamic fee jumps in the price heap and runs -// the sorting algorithm from scratch on the entire heap. -func (h *evictHeap) reinit(basefee *uint256.Int, blobfee *uint256.Int, force bool) { - // If the update is mostly the same as the old, don't sort pointlessly - basefeeJumps := dynamicFeeJumps(basefee) - blobfeeJumps := dynamicFeeJumps(blobfee) - - if !force && math.Abs(h.basefeeJumps-basefeeJumps) < 0.01 && math.Abs(h.blobfeeJumps-blobfeeJumps) < 0.01 { // TODO(karalabe): 0.01 enough, maybe should be smaller? Maybe this optimization is moot? - return - } - // One or both of the dynamic fees jumped, resort the pool - h.basefeeJumps = basefeeJumps - h.blobfeeJumps = blobfeeJumps - - heap.Init(h) -} - -// Len implements sort.Interface as part of heap.Interface, returning the number -// of accounts in the pool which can be considered for eviction. -func (h *evictHeap) Len() int { - return len(h.addrs) -} - -// Less implements sort.Interface as part of heap.Interface, returning which of -// the two requested accounts has a cheaper bottleneck. -func (h *evictHeap) Less(i, j int) bool { - txsI := (*(h.metas))[h.addrs[i]] - txsJ := (*(h.metas))[h.addrs[j]] - - lastI := txsI[len(txsI)-1] - lastJ := txsJ[len(txsJ)-1] - - prioI := evictionPriority(h.basefeeJumps, lastI.evictionExecFeeJumps, h.blobfeeJumps, lastI.evictionBlobFeeJumps) - if prioI > 0 { - prioI = 0 - } - prioJ := evictionPriority(h.basefeeJumps, lastJ.evictionExecFeeJumps, h.blobfeeJumps, lastJ.evictionBlobFeeJumps) - if prioJ > 0 { - prioJ = 0 - } - if prioI == prioJ { - return lastI.evictionExecTip.Lt(lastJ.evictionExecTip) - } - return prioI < prioJ -} - -// Swap implements sort.Interface as part of heap.Interface, maintaining both the -// order of the accounts according to the heap, and the account->item slot mapping -// for replacements. -func (h *evictHeap) Swap(i, j int) { - h.index[h.addrs[i]], h.index[h.addrs[j]] = h.index[h.addrs[j]], h.index[h.addrs[i]] - h.addrs[i], h.addrs[j] = h.addrs[j], h.addrs[i] -} - -// Push implements heap.Interface, appending an item to the end of the account -// ordering as well as the address to item slot mapping. -func (h *evictHeap) Push(x any) { - h.index[x.(common.Address)] = len(h.addrs) - h.addrs = append(h.addrs, x.(common.Address)) -} - -// Pop implements heap.Interface, removing and returning the last element of the -// heap. -// -// Note, use `heap.Pop`, not `evictHeap.Pop`. This method is used by Go's heap, -// to provide the functionality, it does not embed it. -func (h *evictHeap) Pop() any { - // Remove the last element from the heap - size := len(h.addrs) - addr := h.addrs[size-1] - h.addrs = h.addrs[:size-1] - - // Unindex the removed element and return - delete(h.index, addr) - return addr -} diff --git a/core/txpool/blobpool/evictheap_test.go b/core/txpool/blobpool/evictheap_test.go deleted file mode 100644 index 622a3869ea..0000000000 --- a/core/txpool/blobpool/evictheap_test.go +++ /dev/null @@ -1,330 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "container/heap" - mrand "math/rand" - "testing" - - "github.com/ava-labs/subnet-evm/params" - "github.com/ethereum/go-ethereum/common" - "github.com/holiman/uint256" -) - -var rand = mrand.New(mrand.NewSource(1)) - -// verifyHeapInternals verifies that all accounts present in the index are also -// present in the heap and internals are consistent across various indices. -func verifyHeapInternals(t *testing.T, evict *evictHeap) { - t.Helper() - - // Ensure that all accounts are present in the heap and no extras - seen := make(map[common.Address]struct{}) - for i, addr := range evict.addrs { - seen[addr] = struct{}{} - if _, ok := (*evict.metas)[addr]; !ok { - t.Errorf("heap contains unexpected address at slot %d: %v", i, addr) - } - } - for addr := range *evict.metas { - if _, ok := seen[addr]; !ok { - t.Errorf("heap is missing required address %v", addr) - } - } - if len(evict.addrs) != len(*evict.metas) { - t.Errorf("heap size %d mismatches metadata size %d", len(evict.addrs), len(*evict.metas)) - } - // Ensure that all accounts are present in the heap order index and no extras - have := make([]common.Address, len(evict.index)) - for addr, i := range evict.index { - have[i] = addr - } - if len(have) != len(evict.addrs) { - t.Errorf("heap index size %d mismatches heap size %d", len(have), len(evict.addrs)) - } - for i := 0; i < len(have) && i < len(evict.addrs); i++ { - if have[i] != evict.addrs[i] { - t.Errorf("heap index for slot %d mismatches: have %v, want %v", i, have[i], evict.addrs[i]) - } - } -} - -// Tests that the price heap can correctly sort its set of transactions based on -// an input base- and blob fee. -func TestPriceHeapSorting(t *testing.T) { - tests := []struct { - execTips []uint64 - execFees []uint64 - blobFees []uint64 - - basefee uint64 - blobfee uint64 - - order []int - }{ - // If everything is above the basefee and blobfee, order by miner tip - { - execTips: []uint64{1, 0, 2}, - execFees: []uint64{1, 2, 3}, - blobFees: []uint64{3, 2, 1}, - basefee: 0, - blobfee: 0, - order: []int{1, 0, 2}, - }, - // If only basefees are used (blob fee matches with network), return the - // ones the furthest below the current basefee, splitting same ones with - // the tip. Anything above the basefee should be split by tip. - { - execTips: []uint64{100, 50, 100, 50, 1, 2, 3}, - execFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000}, - blobFees: []uint64{0, 0, 0, 0, 0, 0, 0}, - basefee: 1999, - blobfee: 0, - order: []int{3, 2, 1, 0, 4, 5, 6}, - }, - // If only blobfees are used (base fee matches with network), return the - // ones the furthest below the current blobfee, splitting same ones with - // the tip. Anything above the blobfee should be split by tip. - { - execTips: []uint64{100, 50, 100, 50, 1, 2, 3}, - execFees: []uint64{0, 0, 0, 0, 0, 0, 0}, - blobFees: []uint64{1000, 1000, 500, 500, 2000, 2000, 2000}, - basefee: 0, - blobfee: 1999, - order: []int{3, 2, 1, 0, 4, 5, 6}, - }, - // If both basefee and blobfee is specified, sort by the larger distance - // of the two from the current network conditions, splitting same (loglog) - // ones via the tip. - // - // Basefee: 1000 - // Blobfee: 100 - // - // Tx #0: (800, 80) - 2 jumps below both => priority -1 - // Tx #1: (630, 63) - 4 jumps below both => priority -2 - // Tx #2: (800, 63) - 2 jumps below basefee, 4 jumps below blobfee => priority -2 (blob penalty dominates) - // Tx #3: (630, 80) - 4 jumps below basefee, 2 jumps below blobfee => priority -2 (base penalty dominates) - // - // Txs 1, 2, 3 share the same priority, split via tip, prefer 0 as the best - { - execTips: []uint64{1, 2, 3, 4}, - execFees: []uint64{800, 630, 800, 630}, - blobFees: []uint64{80, 63, 63, 80}, - basefee: 1000, - blobfee: 100, - order: []int{1, 2, 3, 0}, - }, - } - for i, tt := range tests { - // Create an index of the transactions - index := make(map[common.Address][]*blobTxMeta) - for j := byte(0); j < byte(len(tt.execTips)); j++ { - addr := common.Address{j} - - var ( - execTip = uint256.NewInt(tt.execTips[j]) - execFee = uint256.NewInt(tt.execFees[j]) - blobFee = uint256.NewInt(tt.blobFees[j]) - - basefeeJumps = dynamicFeeJumps(execFee) - blobfeeJumps = dynamicFeeJumps(blobFee) - ) - index[addr] = []*blobTxMeta{{ - id: uint64(j), - size: 128 * 1024, - nonce: 0, - execTipCap: execTip, - execFeeCap: execFee, - blobFeeCap: blobFee, - basefeeJumps: basefeeJumps, - blobfeeJumps: blobfeeJumps, - evictionExecTip: execTip, - evictionExecFeeJumps: basefeeJumps, - evictionBlobFeeJumps: blobfeeJumps, - }} - } - // Create a price heap and check the pop order - priceheap := newPriceHeap(uint256.NewInt(tt.basefee), uint256.NewInt(tt.blobfee), &index) - verifyHeapInternals(t, priceheap) - - for j := 0; j < len(tt.order); j++ { - if next := heap.Pop(priceheap); int(next.(common.Address)[0]) != tt.order[j] { - t.Errorf("test %d, item %d: order mismatch: have %d, want %d", i, j, next.(common.Address)[0], tt.order[j]) - } else { - delete(index, next.(common.Address)) // remove to simulate a correct pool for the test - } - verifyHeapInternals(t, priceheap) - } - } -} - -// Benchmarks reheaping the entire set of accounts in the blob pool. -func BenchmarkPriceHeapReinit1MB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024) } -func BenchmarkPriceHeapReinit10MB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024) } -func BenchmarkPriceHeapReinit100MB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024) } -func BenchmarkPriceHeapReinit1GB(b *testing.B) { benchmarkPriceHeapReinit(b, 1024*1024*1024) } -func BenchmarkPriceHeapReinit10GB(b *testing.B) { benchmarkPriceHeapReinit(b, 10*1024*1024*1024) } -func BenchmarkPriceHeapReinit25GB(b *testing.B) { benchmarkPriceHeapReinit(b, 25*1024*1024*1024) } -func BenchmarkPriceHeapReinit50GB(b *testing.B) { benchmarkPriceHeapReinit(b, 50*1024*1024*1024) } -func BenchmarkPriceHeapReinit100GB(b *testing.B) { benchmarkPriceHeapReinit(b, 100*1024*1024*1024) } - -func benchmarkPriceHeapReinit(b *testing.B, datacap uint64) { - // Calculate how many unique transactions we can fit into the provided disk - // data cap - blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob) - - // Create a random set of transactions with random fees. Use a separate account - // for each transaction to make it worse case. - index := make(map[common.Address][]*blobTxMeta) - for i := 0; i < int(blobs); i++ { - var addr common.Address - rand.Read(addr[:]) - - var ( - execTip = uint256.NewInt(rand.Uint64()) - execFee = uint256.NewInt(rand.Uint64()) - blobFee = uint256.NewInt(rand.Uint64()) - - basefeeJumps = dynamicFeeJumps(execFee) - blobfeeJumps = dynamicFeeJumps(blobFee) - ) - index[addr] = []*blobTxMeta{{ - id: uint64(i), - size: 128 * 1024, - nonce: 0, - execTipCap: execTip, - execFeeCap: execFee, - blobFeeCap: blobFee, - basefeeJumps: basefeeJumps, - blobfeeJumps: blobfeeJumps, - evictionExecTip: execTip, - evictionExecFeeJumps: basefeeJumps, - evictionBlobFeeJumps: blobfeeJumps, - }} - } - // Create a price heap and reinit it over and over - heap := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index) - - basefees := make([]*uint256.Int, b.N) - blobfees := make([]*uint256.Int, b.N) - for i := 0; i < b.N; i++ { - basefees[i] = uint256.NewInt(rand.Uint64()) - blobfees[i] = uint256.NewInt(rand.Uint64()) - } - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - heap.reinit(basefees[i], blobfees[i], true) - } -} - -// Benchmarks overflowing the heap over and over (add and then drop). -func BenchmarkPriceHeapOverflow1MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024) } -func BenchmarkPriceHeapOverflow10MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024) } -func BenchmarkPriceHeapOverflow100MB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024) } -func BenchmarkPriceHeapOverflow1GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 1024*1024*1024) } -func BenchmarkPriceHeapOverflow10GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 10*1024*1024*1024) } -func BenchmarkPriceHeapOverflow25GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 25*1024*1024*1024) } -func BenchmarkPriceHeapOverflow50GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 50*1024*1024*1024) } -func BenchmarkPriceHeapOverflow100GB(b *testing.B) { benchmarkPriceHeapOverflow(b, 100*1024*1024*1024) } - -func benchmarkPriceHeapOverflow(b *testing.B, datacap uint64) { - // Calculate how many unique transactions we can fit into the provided disk - // data cap - blobs := datacap / (params.BlobTxBytesPerFieldElement * params.BlobTxFieldElementsPerBlob) - - // Create a random set of transactions with random fees. Use a separate account - // for each transaction to make it worse case. - index := make(map[common.Address][]*blobTxMeta) - for i := 0; i < int(blobs); i++ { - var addr common.Address - rand.Read(addr[:]) - - var ( - execTip = uint256.NewInt(rand.Uint64()) - execFee = uint256.NewInt(rand.Uint64()) - blobFee = uint256.NewInt(rand.Uint64()) - - basefeeJumps = dynamicFeeJumps(execFee) - blobfeeJumps = dynamicFeeJumps(blobFee) - ) - index[addr] = []*blobTxMeta{{ - id: uint64(i), - size: 128 * 1024, - nonce: 0, - execTipCap: execTip, - execFeeCap: execFee, - blobFeeCap: blobFee, - basefeeJumps: basefeeJumps, - blobfeeJumps: blobfeeJumps, - evictionExecTip: execTip, - evictionExecFeeJumps: basefeeJumps, - evictionBlobFeeJumps: blobfeeJumps, - }} - } - // Create a price heap and overflow it over and over - evict := newPriceHeap(uint256.NewInt(rand.Uint64()), uint256.NewInt(rand.Uint64()), &index) - var ( - addrs = make([]common.Address, b.N) - metas = make([]*blobTxMeta, b.N) - ) - for i := 0; i < b.N; i++ { - rand.Read(addrs[i][:]) - - var ( - execTip = uint256.NewInt(rand.Uint64()) - execFee = uint256.NewInt(rand.Uint64()) - blobFee = uint256.NewInt(rand.Uint64()) - - basefeeJumps = dynamicFeeJumps(execFee) - blobfeeJumps = dynamicFeeJumps(blobFee) - ) - metas[i] = &blobTxMeta{ - id: uint64(int(blobs) + i), - size: 128 * 1024, - nonce: 0, - execTipCap: execTip, - execFeeCap: execFee, - blobFeeCap: blobFee, - basefeeJumps: basefeeJumps, - blobfeeJumps: blobfeeJumps, - evictionExecTip: execTip, - evictionExecFeeJumps: basefeeJumps, - evictionBlobFeeJumps: blobfeeJumps, - } - } - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - index[addrs[i]] = []*blobTxMeta{metas[i]} - heap.Push(evict, addrs[i]) - - drop := heap.Pop(evict) - delete(index, drop.(common.Address)) - } -} diff --git a/core/txpool/blobpool/interface.go b/core/txpool/blobpool/interface.go deleted file mode 100644 index d5603cf566..0000000000 --- a/core/txpool/blobpool/interface.go +++ /dev/null @@ -1,59 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "math/big" - - "github.com/ava-labs/subnet-evm/commontype" - "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/params" - "github.com/ethereum/go-ethereum/common" -) - -// BlockChain defines the minimal set of methods needed to back a blob pool with -// a chain. Exists to allow mocking the live chain out of tests. -type BlockChain interface { - // Config retrieves the chain's fork configuration. - Config() *params.ChainConfig - - // CurrentBlock returns the current head of the chain. - CurrentBlock() *types.Header - - // CurrentFinalBlock returns the current block below which blobs should not - // be maintained anymore for reorg purposes. - CurrentFinalBlock() *types.Header - - // GetBlock retrieves a specific block, used during pool resets. - GetBlock(hash common.Hash, number uint64) *types.Block - - // StateAt returns a state database for a given root hash (generally the head). - StateAt(root common.Hash) (*state.StateDB, error) - - GetFeeConfigAt(header *types.Header) (commontype.FeeConfig, *big.Int, error) -} diff --git a/core/txpool/blobpool/limbo.go b/core/txpool/blobpool/limbo.go deleted file mode 100644 index c8e7eed476..0000000000 --- a/core/txpool/blobpool/limbo.go +++ /dev/null @@ -1,268 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "errors" - - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - "github.com/holiman/billy" -) - -// limboBlob is a wrapper around an opaque blobset that also contains the tx hash -// to which it belongs as well as the block number in which it was included for -// finality eviction. -type limboBlob struct { - Owner common.Hash // Owner transaction's hash to support resurrecting reorged txs - Block uint64 // Block in which the blob transaction was included - - Blobs []kzg4844.Blob // The opaque blobs originally part of the transaction - Commits []kzg4844.Commitment // The commitments for the original blobs - Proofs []kzg4844.Proof // The proofs verifying the commitments -} - -// limbo is a light, indexed database to temporarily store recently included -// blobs until they are finalized. The purpose is to support small reorgs, which -// would require pulling back up old blobs (which aren't part of the chain). -// -// TODO(karalabe): Currently updating the inclusion block of a blob needs a full db rewrite. Can we do without? -type limbo struct { - store billy.Database // Persistent data store for limboed blobs - - index map[common.Hash]uint64 // Mappings from tx hashes to datastore ids - groups map[uint64]map[uint64]common.Hash // Set of txs included in past blocks -} - -// newLimbo opens and indexes a set of limboed blob transactions. -func newLimbo(datadir string) (*limbo, error) { - l := &limbo{ - index: make(map[common.Hash]uint64), - groups: make(map[uint64]map[uint64]common.Hash), - } - // Index all limboed blobs on disk and delete anything inprocessable - var fails []uint64 - index := func(id uint64, size uint32, data []byte) { - if l.parseBlob(id, data) != nil { - fails = append(fails, id) - } - } - store, err := billy.Open(billy.Options{Path: datadir}, newSlotter(), index) - if err != nil { - return nil, err - } - l.store = store - - if len(fails) > 0 { - log.Warn("Dropping invalidated limboed blobs", "ids", fails) - for _, id := range fails { - if err := l.store.Delete(id); err != nil { - l.Close() - return nil, err - } - } - } - return l, nil -} - -// Close closes down the underlying persistent store. -func (l *limbo) Close() error { - return l.store.Close() -} - -// parseBlob is a callback method on limbo creation that gets called for each -// limboed blob on disk to create the in-memory metadata index. -func (l *limbo) parseBlob(id uint64, data []byte) error { - item := new(limboBlob) - if err := rlp.DecodeBytes(data, item); err != nil { - // This path is impossible unless the disk data representation changes - // across restarts. For that ever unprobable case, recover gracefully - // by ignoring this data entry. - log.Error("Failed to decode blob limbo entry", "id", id, "err", err) - return err - } - if _, ok := l.index[item.Owner]; ok { - // This path is impossible, unless due to a programming error a blob gets - // inserted into the limbo which was already part of if. Recover gracefully - // by ignoring this data entry. - log.Error("Dropping duplicate blob limbo entry", "owner", item.Owner, "id", id) - return errors.New("duplicate blob") - } - l.index[item.Owner] = id - - if _, ok := l.groups[item.Block]; !ok { - l.groups[item.Block] = make(map[uint64]common.Hash) - } - l.groups[item.Block][id] = item.Owner - - return nil -} - -// finalize evicts all blobs belonging to a recently finalized block or older. -func (l *limbo) finalize(final *types.Header) { - // Just in case there's no final block yet (network not yet merged, weird - // restart, sethead, etc), fail gracefully. - if final == nil { - log.Error("Nil finalized block cannot evict old blobs") - return - } - for block, ids := range l.groups { - if block > final.Number.Uint64() { - continue - } - for id, owner := range ids { - if err := l.store.Delete(id); err != nil { - log.Error("Failed to drop finalized blob", "block", block, "id", id, "err", err) - } - delete(l.index, owner) - } - delete(l.groups, block) - } -} - -// push stores a new blob transaction into the limbo, waiting until finality for -// it to be automatically evicted. -func (l *limbo) push(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error { - // If the blobs are already tracked by the limbo, consider it a programming - // error. There's not much to do against it, but be loud. - if _, ok := l.index[tx]; ok { - log.Error("Limbo cannot push already tracked blobs", "tx", tx) - return errors.New("already tracked blob transaction") - } - if err := l.setAndIndex(tx, block, blobs, commits, proofs); err != nil { - log.Error("Failed to set and index liboed blobs", "tx", tx, "err", err) - return err - } - return nil -} - -// pull retrieves a previously pushed set of blobs back from the limbo, removing -// it at the same time. This method should be used when a previously included blob -// transaction gets reorged out. -func (l *limbo) pull(tx common.Hash) ([]kzg4844.Blob, []kzg4844.Commitment, []kzg4844.Proof, error) { - // If the blobs are not tracked by the limbo, there's not much to do. This - // can happen for example if a blob transaction is mined without pushing it - // into the network first. - id, ok := l.index[tx] - if !ok { - log.Trace("Limbo cannot pull non-tracked blobs", "tx", tx) - return nil, nil, nil, errors.New("unseen blob transaction") - } - item, err := l.getAndDrop(id) - if err != nil { - log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err) - return nil, nil, nil, err - } - return item.Blobs, item.Commits, item.Proofs, nil -} - -// update changes the block number under which a blob transaction is tracked. This -// method should be used when a reorg changes a transaction's inclusion block. -// -// The method may log errors for various unexpcted scenarios but will not return -// any of it since there's no clear error case. Some errors may be due to coding -// issues, others caused by signers mining MEV stuff or swapping transactions. In -// all cases, the pool needs to continue operating. -func (l *limbo) update(tx common.Hash, block uint64) { - // If the blobs are not tracked by the limbo, there's not much to do. This - // can happen for example if a blob transaction is mined without pushing it - // into the network first. - id, ok := l.index[tx] - if !ok { - log.Trace("Limbo cannot update non-tracked blobs", "tx", tx) - return - } - // If there was no change in the blob's inclusion block, don't mess around - // with heavy database operations. - if _, ok := l.groups[block][id]; ok { - log.Trace("Blob transaction unchanged in limbo", "tx", tx, "block", block) - return - } - // Retrieve the old blobs from the data store and write tehm back with a new - // block number. IF anything fails, there's not much to do, go on. - item, err := l.getAndDrop(id) - if err != nil { - log.Error("Failed to get and drop limboed blobs", "tx", tx, "id", id, "err", err) - return - } - if err := l.setAndIndex(tx, block, item.Blobs, item.Commits, item.Proofs); err != nil { - log.Error("Failed to set and index limboed blobs", "tx", tx, "err", err) - return - } - log.Trace("Blob transaction updated in limbo", "tx", tx, "old-block", item.Block, "new-block", block) -} - -// getAndDrop retrieves a blob item from the limbo store and deletes it both from -// the store and indices. -func (l *limbo) getAndDrop(id uint64) (*limboBlob, error) { - data, err := l.store.Get(id) - if err != nil { - return nil, err - } - item := new(limboBlob) - if err = rlp.DecodeBytes(data, item); err != nil { - return nil, err - } - delete(l.index, item.Owner) - delete(l.groups[item.Block], id) - if len(l.groups[item.Block]) == 0 { - delete(l.groups, item.Block) - } - if err := l.store.Delete(id); err != nil { - return nil, err - } - return item, nil -} - -// setAndIndex assembles a limbo blob database entry and stores it, also updating -// the in-memory indices. -func (l *limbo) setAndIndex(tx common.Hash, block uint64, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof) error { - item := &limboBlob{ - Owner: tx, - Block: block, - Blobs: blobs, - Commits: commits, - Proofs: proofs, - } - data, err := rlp.EncodeToBytes(item) - if err != nil { - panic(err) // cannot happen runtime, dev error - } - id, err := l.store.Put(data) - if err != nil { - return err - } - l.index[tx] = id - if _, ok := l.groups[block]; !ok { - l.groups[block] = make(map[uint64]common.Hash) - } - l.groups[block][id] = tx - return nil -} diff --git a/core/txpool/blobpool/metrics.go b/core/txpool/blobpool/metrics.go deleted file mode 100644 index 0b9c687cce..0000000000 --- a/core/txpool/blobpool/metrics.go +++ /dev/null @@ -1,88 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import "github.com/ava-labs/subnet-evm/metrics" - -var ( - // datacapGauge tracks the user's configured capacity for the blob pool. It - // is mostly a way to expose/debug issues. - datacapGauge = metrics.NewRegisteredGauge("blobpool/datacap", nil) - - // The below metrics track the per-datastore metrics for the primary blob - // store and the temporary limbo store. - datausedGauge = metrics.NewRegisteredGauge("blobpool/dataused", nil) - datarealGauge = metrics.NewRegisteredGauge("blobpool/datareal", nil) - slotusedGauge = metrics.NewRegisteredGauge("blobpool/slotused", nil) - - limboDatausedGauge = metrics.NewRegisteredGauge("blobpool/limbo/dataused", nil) - limboDatarealGauge = metrics.NewRegisteredGauge("blobpool/limbo/datareal", nil) - limboSlotusedGauge = metrics.NewRegisteredGauge("blobpool/limbo/slotused", nil) - - // The below metrics track the per-shelf metrics for the primary blob store - // and the temporary limbo store. - shelfDatausedGaugeName = "blobpool/shelf_%d/dataused" - shelfDatagapsGaugeName = "blobpool/shelf_%d/datagaps" - shelfSlotusedGaugeName = "blobpool/shelf_%d/slotused" - shelfSlotgapsGaugeName = "blobpool/shelf_%d/slotgaps" - - limboShelfDatausedGaugeName = "blobpool/limbo/shelf_%d/dataused" - limboShelfDatagapsGaugeName = "blobpool/limbo/shelf_%d/datagaps" - limboShelfSlotusedGaugeName = "blobpool/limbo/shelf_%d/slotused" - limboShelfSlotgapsGaugeName = "blobpool/limbo/shelf_%d/slotgaps" - - // The oversized metrics aggregate the shelf stats above the max blob count - // limits to track transactions that are just huge, but don't contain blobs. - // - // There are no oversized data in the limbo, it only contains blobs and some - // constant metadata. - oversizedDatausedGauge = metrics.NewRegisteredGauge("blobpool/oversized/dataused", nil) - oversizedDatagapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/datagaps", nil) - oversizedSlotusedGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotused", nil) - oversizedSlotgapsGauge = metrics.NewRegisteredGauge("blobpool/oversized/slotgaps", nil) - - // basefeeGauge and blobfeeGauge track the current network 1559 base fee and - // 4844 blob fee respectively. - basefeeGauge = metrics.NewRegisteredGauge("blobpool/basefee", nil) - blobfeeGauge = metrics.NewRegisteredGauge("blobpool/blobfee", nil) - - // pooltipGauge is the configurable miner tip to permit a transaction into - // the pool. - pooltipGauge = metrics.NewRegisteredGauge("blobpool/pooltip", nil) - - // addwait/time, resetwait/time and getwait/time track the rough health of - // the pool and wether or not it's capable of keeping up with the load from - // the network. - addwaitHist = metrics.NewRegisteredHistogram("blobpool/addwait", nil, metrics.NewExpDecaySample(1028, 0.015)) - addtimeHist = metrics.NewRegisteredHistogram("blobpool/addtime", nil, metrics.NewExpDecaySample(1028, 0.015)) - getwaitHist = metrics.NewRegisteredHistogram("blobpool/getwait", nil, metrics.NewExpDecaySample(1028, 0.015)) - gettimeHist = metrics.NewRegisteredHistogram("blobpool/gettime", nil, metrics.NewExpDecaySample(1028, 0.015)) - pendwaitHist = metrics.NewRegisteredHistogram("blobpool/pendwait", nil, metrics.NewExpDecaySample(1028, 0.015)) - pendtimeHist = metrics.NewRegisteredHistogram("blobpool/pendtime", nil, metrics.NewExpDecaySample(1028, 0.015)) - resetwaitHist = metrics.NewRegisteredHistogram("blobpool/resetwait", nil, metrics.NewExpDecaySample(1028, 0.015)) - resettimeHist = metrics.NewRegisteredHistogram("blobpool/resettime", nil, metrics.NewExpDecaySample(1028, 0.015)) -) diff --git a/core/txpool/blobpool/priority.go b/core/txpool/blobpool/priority.go deleted file mode 100644 index dd39927361..0000000000 --- a/core/txpool/blobpool/priority.go +++ /dev/null @@ -1,100 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "math" - "math/bits" - - "github.com/holiman/uint256" -) - -// log2_1_125 is used in the eviction priority calculation. -var log2_1_125 = math.Log2(1.125) - -// evictionPriority calculates the eviction priority based on the algorithm -// described in the BlobPool docs for a both fee components. -// -// This method takes about 8ns on a very recent laptop CPU, recalculating about -// 125 million transaction priority values per second. -func evictionPriority(basefeeJumps float64, txBasefeeJumps, blobfeeJumps, txBlobfeeJumps float64) int { - var ( - basefeePriority = evictionPriority1D(basefeeJumps, txBasefeeJumps) - blobfeePriority = evictionPriority1D(blobfeeJumps, txBlobfeeJumps) - ) - if basefeePriority < blobfeePriority { - return basefeePriority - } - return blobfeePriority -} - -// evictionPriority1D calculates the eviction priority based on the algorithm -// described in the BlobPool docs for a single fee component. -func evictionPriority1D(basefeeJumps float64, txfeeJumps float64) int { - jumps := txfeeJumps - basefeeJumps - if int(jumps) == 0 { - return 0 // can't log2 0 - } - if jumps < 0 { - return -intLog2(uint(-math.Floor(jumps))) - } - return intLog2(uint(math.Ceil(jumps))) -} - -// dynamicFeeJumps calculates the log1.125(fee), namely the number of fee jumps -// needed to reach the requested one. We only use it when calculating the jumps -// between 2 fees, so it doesn't matter from what exact number with returns. -// it returns the result from (0, 1, 1.125). -// -// This method is very expensive, taking about 75ns on a very recent laptop CPU, -// but the result does not change with the lifetime of a transaction, so it can -// be cached. -func dynamicFeeJumps(fee *uint256.Int) float64 { - if fee.IsZero() { - return 0 // can't log2 zero, should never happen outside tests, but don't choke - } - return math.Log2(fee.Float64()) / log2_1_125 -} - -// intLog2 is a helper to calculate the integral part of a log2 of an unsigned -// integer. It is a very specific calculation that's not particularly useful in -// general, but it's what we need here (it's fast). -func intLog2(n uint) int { - switch { - case n == 0: - panic("log2(0) is undefined") - - case n < 2048: - return bits.UintSize - bits.LeadingZeros(n) - 1 - - default: - // The input is log1.125(uint256) = log2(uint256) / log2(1.125). At the - // most extreme, log2(uint256) will be a bit below 257, and the constant - // log2(1.125) ~= 0.17. The larges input thus is ~257 / ~0.17 ~= ~1511. - panic("dynamic fee jump diffs cannot reach this") - } -} diff --git a/core/txpool/blobpool/priority_test.go b/core/txpool/blobpool/priority_test.go deleted file mode 100644 index 3c9523d512..0000000000 --- a/core/txpool/blobpool/priority_test.go +++ /dev/null @@ -1,97 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import ( - "testing" - - "github.com/holiman/uint256" -) - -// Tests that the priority fees are calculated correctly as the log2 of the fee -// jumps needed to go from the base fee to the tx's fee cap. -func TestPriorityCalculation(t *testing.T) { - tests := []struct { - basefee uint64 - txfee uint64 - result int - }{ - {basefee: 7, txfee: 10, result: 2}, // 3.02 jumps, 4 ceil, 2 log2 - {basefee: 17_200_000_000, txfee: 17_200_000_000, result: 0}, // 0 jumps, special case 0 log2 - {basefee: 9_853_941_692, txfee: 11_085_092_510, result: 0}, // 0.99 jumps, 1 ceil, 0 log2 - {basefee: 11_544_106_391, txfee: 10_356_781_100, result: 0}, // -0.92 jumps, -1 floor, 0 log2 - {basefee: 17_200_000_000, txfee: 7, result: -7}, // -183.57 jumps, -184 floor, -7 log2 - {basefee: 7, txfee: 17_200_000_000, result: 7}, // 183.57 jumps, 184 ceil, 7 log2 - } - for i, tt := range tests { - var ( - baseJumps = dynamicFeeJumps(uint256.NewInt(tt.basefee)) - feeJumps = dynamicFeeJumps(uint256.NewInt(tt.txfee)) - ) - if prio := evictionPriority1D(baseJumps, feeJumps); prio != tt.result { - t.Errorf("test %d priority mismatch: have %d, want %d", i, prio, tt.result) - } - } -} - -// Benchmarks how many dynamic fee jump values can be done. -func BenchmarkDynamicFeeJumpCalculation(b *testing.B) { - fees := make([]*uint256.Int, b.N) - for i := 0; i < b.N; i++ { - fees[i] = uint256.NewInt(rand.Uint64()) - } - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - dynamicFeeJumps(fees[i]) - } -} - -// Benchmarks how many priority recalculations can be done. -func BenchmarkPriorityCalculation(b *testing.B) { - // The basefee and blob fee is constant for all transactions across a block, - // so we can assume theit absolute jump counts can be pre-computed. - basefee := uint256.NewInt(17_200_000_000) // 17.2 Gwei is the 22.03.2023 zero-emission basefee, random number - blobfee := uint256.NewInt(123_456_789_000) // Completely random, no idea what this will be - - basefeeJumps := dynamicFeeJumps(basefee) - blobfeeJumps := dynamicFeeJumps(blobfee) - - // The transaction's fee cap and blob fee cap are constant across the life - // of the transaction, so we can pre-calculate and cache them. - txBasefeeJumps := make([]float64, b.N) - txBlobfeeJumps := make([]float64, b.N) - for i := 0; i < b.N; i++ { - txBasefeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64())) - txBlobfeeJumps[i] = dynamicFeeJumps(uint256.NewInt(rand.Uint64())) - } - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - evictionPriority(basefeeJumps, txBasefeeJumps[i], blobfeeJumps, txBlobfeeJumps[i]) - } -} diff --git a/core/txpool/blobpool/slotter_test.go b/core/txpool/blobpool/slotter_test.go deleted file mode 100644 index 9a89f42d7d..0000000000 --- a/core/txpool/blobpool/slotter_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package blobpool - -import "testing" - -// Tests that the slotter creates the expected database shelves. -func TestNewSlotter(t *testing.T) { - // Generate the database shelve sizes - slotter := newSlotter() - - var shelves []uint32 - for { - shelf, done := slotter() - shelves = append(shelves, shelf) - if done { - break - } - } - // Compare the database shelves to the expected ones - want := []uint32{ - 0*blobSize + txAvgSize, // 0 blob + some expected tx infos - 1*blobSize + txAvgSize, // 1 blob + some expected tx infos - 2*blobSize + txAvgSize, // 2 blob + some expected tx infos (could be fewer blobs and more tx data) - 3*blobSize + txAvgSize, // 3 blob + some expected tx infos (could be fewer blobs and more tx data) - 4*blobSize + txAvgSize, // 4 blob + some expected tx infos (could be fewer blobs and more tx data) - 5*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size - 6*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size - 7*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size - 8*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size - 9*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size - 10*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size - 11*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos < 4 blobs + max tx metadata size - 12*blobSize + txAvgSize, // 1-4 blobs + unexpectedly large tx infos >= 4 blobs + max tx metadata size - } - if len(shelves) != len(want) { - t.Errorf("shelves count mismatch: have %d, want %d", len(shelves), len(want)) - } - for i := 0; i < len(shelves) && i < len(want); i++ { - if shelves[i] != want[i] { - t.Errorf("shelf %d mismatch: have %d, want %d", i, shelves[i], want[i]) - } - } -} diff --git a/core/txpool/errors.go b/core/txpool/errors.go deleted file mode 100644 index 7ecbfef35f..0000000000 --- a/core/txpool/errors.go +++ /dev/null @@ -1,67 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package txpool - -import "errors" - -var ( - // ErrAlreadyKnown is returned if the transactions is already contained - // within the pool. - ErrAlreadyKnown = errors.New("already known") - - // ErrInvalidSender is returned if the transaction contains an invalid signature. - ErrInvalidSender = errors.New("invalid sender") - - // ErrUnderpriced is returned if a transaction's gas price is below the minimum - // configured for the transaction pool. - ErrUnderpriced = errors.New("transaction underpriced") - - // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced - // with a different one without the required price bump. - ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") - - // ErrAccountLimitExceeded is returned if a transaction would exceed the number - // allowed by a pool for a single account. - ErrAccountLimitExceeded = errors.New("account limit exceeded") - - // ErrGasLimit is returned if a transaction's requested gas limit exceeds the - // maximum allowance of the current block. - ErrGasLimit = errors.New("exceeds block gas limit") - - // ErrNegativeValue is a sanity error to ensure no one is able to specify a - // transaction with a negative value. - ErrNegativeValue = errors.New("negative value") - - // ErrOversizedData is returned if the input data of a transaction is greater - // than some meaningful limit a user might use. This is not a consensus error - // making the transaction invalid, rather a DOS protection. - ErrOversizedData = errors.New("oversized data") - - // ErrFutureReplacePending is returned if a future transaction replaces a pending - // transaction. Future transactions should only be able to replace other future transactions. - ErrFutureReplacePending = errors.New("future transaction tries to replace pending") -) diff --git a/core/txpool/legacypool/journal.go b/core/txpool/journal.go similarity index 99% rename from core/txpool/legacypool/journal.go rename to core/txpool/journal.go index 2065fb36e0..11ec2ccd36 100644 --- a/core/txpool/legacypool/journal.go +++ b/core/txpool/journal.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package txpool import ( "errors" diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go deleted file mode 100644 index 8c74f1efbb..0000000000 --- a/core/txpool/legacypool/legacypool.go +++ /dev/null @@ -1,2141 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package legacypool implements the normal EVM execution transaction pool. -package legacypool - -import ( - "errors" - "math" - "math/big" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/ava-labs/subnet-evm/commontype" - "github.com/ava-labs/subnet-evm/consensus/dummy" - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/metrics" - "github.com/ava-labs/subnet-evm/params" - "github.com/ava-labs/subnet-evm/precompile/contracts/feemanager" - "github.com/ava-labs/subnet-evm/utils" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/prque" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/log" -) - -const ( - // txSlotSize is used to calculate how many data slots a single transaction - // takes up based on its size. The slots are used as DoS protection, ensuring - // that validating a new transaction remains a constant operation (in reality - // O(maxslots), where max slots are 4 currently). - txSlotSize = 32 * 1024 - - // txMaxSize is the maximum size a single transaction can have. This field has - // non-trivial consequences: larger transactions are significantly harder and - // more expensive to propagate; larger transactions also take more resources - // to validate whether they fit into the pool or not. - // - // Note: the max contract size is 24KB - txMaxSize = 4 * txSlotSize // 128KB -) - -var ( - // ErrAlreadyKnown is returned if the transactions is already contained - // within the pool. - ErrAlreadyKnown = errors.New("already known") - - // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept - // another remote transaction. - ErrTxPoolOverflow = errors.New("txpool is full") -) - -var ( - evictionInterval = time.Minute // Time interval to check for evictable transactions - statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats - baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after SubnetEVM is enabled -) - -var ( - // Metrics for the pending pool - pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) - pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) - pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting - pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds - - // Metrics for the queued pool - queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) - queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) - queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting - queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds - queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime - - // General tx metrics - knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) - validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) - invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) - underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) - overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) - - // throttleTxMeter counts how many transactions are rejected due to too-many-changes between - // txpool reorgs. - throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) - // reorgDurationTimer measures how long time a txpool reorg takes. - reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) - // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected - // that this number is pretty low, since txpool reorgs happen very frequently. - dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) - - pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) - queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) - localGauge = metrics.NewRegisteredGauge("txpool/local", nil) - slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) - - reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) -) - -// BlockChain defines the minimal set of methods needed to back a tx pool with -// a chain. Exists to allow mocking the live chain out of tests. -type BlockChain interface { - // Config retrieves the chain's fork configuration. - Config() *params.ChainConfig - - // CurrentBlock returns the current head of the chain. - CurrentBlock() *types.Header - - // GetBlock retrieves a specific block, used during pool resets. - GetBlock(hash common.Hash, number uint64) *types.Block - - // StateAt returns a state database for a given root hash (generally the head). - StateAt(root common.Hash) (*state.StateDB, error) - - SenderCacher() *core.TxSenderCacher - GetFeeConfigAt(parent *types.Header) (commontype.FeeConfig, *big.Int, error) -} - -// Config are the configuration parameters of the transaction pool. -type Config struct { - Locals []common.Address // Addresses that should be treated by default as local - NoLocals bool // Whether local transaction handling should be disabled - Journal string // Journal of local transactions to survive node restarts - Rejournal time.Duration // Time interval to regenerate the local transaction journal - - PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool - PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) - - AccountSlots uint64 // Number of executable transaction slots guaranteed per account - GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts - AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account - GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts - - Lifetime time.Duration // Maximum amount of time non-executable transaction are queued -} - -// DefaultConfig contains the default configurations for the transaction pool. -var DefaultConfig = Config{ - // If we re-enable txpool journaling, we should also add the saved local - // transactions to the p2p gossip on startup. - Journal: "", - Rejournal: time.Hour, - - PriceLimit: 1, - PriceBump: 10, - - AccountSlots: 16, - GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio - AccountQueue: 64, - GlobalQueue: 1024, - - Lifetime: 10 * time.Minute, -} - -// sanitize checks the provided user configurations and changes anything that's -// unreasonable or unworkable. -func (config *Config) sanitize() Config { - conf := *config - if conf.Rejournal < time.Second { - log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) - conf.Rejournal = time.Second - } - if conf.PriceLimit < 1 { - log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) - conf.PriceLimit = DefaultConfig.PriceLimit - } - if conf.PriceBump < 1 { - log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) - conf.PriceBump = DefaultConfig.PriceBump - } - if conf.AccountSlots < 1 { - log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) - conf.AccountSlots = DefaultConfig.AccountSlots - } - if conf.GlobalSlots < 1 { - log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) - conf.GlobalSlots = DefaultConfig.GlobalSlots - } - if conf.AccountQueue < 1 { - log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) - conf.AccountQueue = DefaultConfig.AccountQueue - } - if conf.GlobalQueue < 1 { - log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) - conf.GlobalQueue = DefaultConfig.GlobalQueue - } - if conf.Lifetime < 1 { - log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) - conf.Lifetime = DefaultConfig.Lifetime - } - return conf -} - -// LegacyPool contains all currently known transactions. Transactions -// enter the pool when they are received from the network or submitted -// locally. They exit the pool when they are included in the blockchain. -// -// The pool separates processable transactions (which can be applied to the -// current state) and future transactions. Transactions move between those -// two states over time as they are received and processed. -type LegacyPool struct { - config Config - chainconfig *params.ChainConfig - chain BlockChain - gasTip atomic.Pointer[big.Int] - minimumFee *big.Int - txFeed event.Feed - scope event.SubscriptionScope - signer types.Signer - mu sync.RWMutex - - // [currentStateLock] is required to allow concurrent access to address nonces - // and balances during reorgs and gossip handling. - currentStateLock sync.Mutex - // closed when the transaction pool is stopped. Any goroutine can listen - // to this to be notified if it should shut down. - generalShutdownChan chan struct{} - - currentHead atomic.Pointer[types.Header] // Current head of the blockchain - currentState *state.StateDB // Current state in the blockchain head - pendingNonces *noncer // Pending state tracking virtual nonces - - locals *accountSet // Set of local transaction to exempt from eviction rules - journal *journal // Journal of local transaction to back up to disk - - reserve txpool.AddressReserver // Address reserver to ensure exclusivity across subpools - pending map[common.Address]*list // All currently processable transactions - queue map[common.Address]*list // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all *lookup // All transactions to allow lookups - priced *pricedList // All transactions sorted by price - - reqResetCh chan *txpoolResetRequest - reqPromoteCh chan *accountSet - queueTxEventCh chan *types.Transaction - reorgDoneCh chan chan struct{} - reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop - wg sync.WaitGroup // tracks loop, scheduleReorgLoop - initDoneCh chan struct{} // is closed once the pool is initialized (for tests) - - changesSinceReorg int // A counter for how many drops we've performed in-between reorg. -} - -type txpoolResetRequest struct { - oldHead, newHead *types.Header -} - -// New creates a new transaction pool to gather, sort and filter inbound -// transactions from the network. -func New(config Config, chain BlockChain) *LegacyPool { - // Sanitize the input to ensure no vulnerable gas prices are set - config = (&config).sanitize() - - // Create the transaction pool with its initial settings - pool := &LegacyPool{ - config: config, - chain: chain, - chainconfig: chain.Config(), - signer: types.LatestSigner(chain.Config()), - pending: make(map[common.Address]*list), - queue: make(map[common.Address]*list), - beats: make(map[common.Address]time.Time), - all: newLookup(), - reqResetCh: make(chan *txpoolResetRequest), - reqPromoteCh: make(chan *accountSet), - queueTxEventCh: make(chan *types.Transaction), - reorgDoneCh: make(chan chan struct{}), - reorgShutdownCh: make(chan struct{}), - initDoneCh: make(chan struct{}), - generalShutdownChan: make(chan struct{}), - } - pool.locals = newAccountSet(pool.signer) - for _, addr := range config.Locals { - log.Info("Setting new local account", "address", addr) - pool.locals.add(addr) - } - pool.priced = newPricedList(pool.all) - - if !config.NoLocals && config.Journal != "" { - pool.journal = newTxJournal(config.Journal) - } - return pool -} - -// Filter returns whether the given transaction can be consumed by the legacy -// pool, specifically, whether it is a Legacy, AccessList or Dynamic transaction. -func (pool *LegacyPool) Filter(tx *types.Transaction) bool { - switch tx.Type() { - case types.LegacyTxType, types.AccessListTxType, types.DynamicFeeTxType: - return true - default: - return false - } -} - -// Init sets the gas price needed to keep a transaction in the pool and the chain -// head to allow balance / nonce checks. The transaction journal will be loaded -// from disk and filtered based on the provided starting settings. The internal -// goroutines will be spun up and the pool deemed operational afterwards. -func (pool *LegacyPool) Init(gasTip *big.Int, head *types.Header, reserve txpool.AddressReserver) error { - // Set the address reserver to request exclusive access to pooled accounts - pool.reserve = reserve - - // Set the basic pool parameters - pool.gasTip.Store(gasTip) - pool.reset(nil, head) - - // Start the reorg loop early, so it can handle requests generated during - // journal loading. - pool.wg.Add(1) - go pool.scheduleReorgLoop() - - // If local transactions and journaling is enabled, load from disk - if pool.journal != nil { - if err := pool.journal.load(pool.addLocals); err != nil { - log.Warn("Failed to load transaction journal", "err", err) - } - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate transaction journal", "err", err) - } - } - pool.wg.Add(1) - go pool.loop() - - pool.startPeriodicFeeUpdate() - - return nil -} - -// loop is the transaction pool's main event loop, waiting for and reacting to -// outside blockchain events as well as for various reporting and transaction -// eviction events. -func (pool *LegacyPool) loop() { - defer pool.wg.Done() - - var ( - prevPending, prevQueued, prevStales int - - // Start the stats reporting and transaction eviction tickers - report = time.NewTicker(statsReportInterval) - evict = time.NewTicker(evictionInterval) - journal = time.NewTicker(pool.config.Rejournal) - ) - defer report.Stop() - defer evict.Stop() - defer journal.Stop() - - // Notify tests that the init phase is done - close(pool.initDoneCh) - for { - select { - // Handle pool shutdown - case <-pool.reorgShutdownCh: - return - - // Handle stats reporting ticks - case <-report.C: - pool.mu.RLock() - pending, queued := pool.stats() - pool.mu.RUnlock() - stales := int(pool.priced.stales.Load()) - - if pending != prevPending || queued != prevQueued || stales != prevStales { - log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) - prevPending, prevQueued, prevStales = pending, queued, stales - } - - // Handle inactive account transaction eviction - case <-evict.C: - pool.mu.Lock() - for addr := range pool.queue { - // Skip local transactions from the eviction mechanism - if pool.locals.contains(addr) { - continue - } - // Any non-locals old enough should be removed - if time.Since(pool.beats[addr]) > pool.config.Lifetime { - list := pool.queue[addr].Flatten() - for _, tx := range list { - pool.removeTx(tx.Hash(), true, true) - } - queuedEvictionMeter.Mark(int64(len(list))) - } - } - pool.mu.Unlock() - - // Handle local transaction journal rotation - case <-journal.C: - if pool.journal != nil { - pool.mu.Lock() - if err := pool.journal.rotate(pool.local()); err != nil { - log.Warn("Failed to rotate local tx journal", "err", err) - } - pool.mu.Unlock() - } - } - } -} - -// Close terminates the transaction pool. -func (pool *LegacyPool) Close() error { - // Unsubscribe all subscriptions registered from txpool - pool.scope.Close() - - close(pool.generalShutdownChan) - - // Terminate the pool reorger and return - close(pool.reorgShutdownCh) - pool.wg.Wait() - - if pool.journal != nil { - pool.journal.close() - } - log.Info("Transaction pool stopped") - return nil -} - -// Reset implements txpool.SubPool, allowing the legacy pool's internal state to be -// kept in sync with the main transacion pool's internal state. -func (pool *LegacyPool) Reset(oldHead, newHead *types.Header) { - wait := pool.requestReset(oldHead, newHead) - <-wait -} - -// SubscribeTransactions registers a subscription of NewTxsEvent and -// starts sending event to the given channel. -func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription { - return pool.scope.Track(pool.txFeed.Subscribe(ch)) -} - -// SetGasTip updates the minimum gas tip required by the transaction pool for a -// new transaction, and drops all transactions below this threshold. -func (pool *LegacyPool) SetGasTip(tip *big.Int) { - pool.mu.Lock() - defer pool.mu.Unlock() - - old := pool.gasTip.Load() - pool.gasTip.Store(new(big.Int).Set(tip)) - - // If the min miner fee increased, remove transactions below the new threshold - if tip.Cmp(old) > 0 { - // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead - drop := pool.all.RemotesBelowTip(tip) - for _, tx := range drop { - pool.removeTx(tx.Hash(), false, true) - } - pool.priced.Removed(len(drop)) - } - log.Info("Legacy pool tip threshold updated", "tip", tip) -} - -func (pool *LegacyPool) SetMinFee(minFee *big.Int) { - pool.mu.Lock() - defer pool.mu.Unlock() - - pool.minimumFee = minFee -} - -// Nonce returns the next nonce of an account, with all transactions executable -// by the pool already applied on top. -func (pool *LegacyPool) Nonce(addr common.Address) uint64 { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return pool.pendingNonces.get(addr) -} - -// Stats retrieves the current pool stats, namely the number of pending and the -// number of queued (non-executable) transactions. -func (pool *LegacyPool) Stats() (int, int) { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return pool.stats() -} - -// stats retrieves the current pool stats, namely the number of pending and the -// number of queued (non-executable) transactions. -func (pool *LegacyPool) stats() (int, int) { - pending := 0 - for _, list := range pool.pending { - pending += list.Len() - } - queued := 0 - for _, list := range pool.queue { - queued += list.Len() - } - return pending, queued -} - -// Content retrieves the data content of the transaction pool, returning all the -// pending as well as queued transactions, grouped by account and sorted by nonce. -func (pool *LegacyPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { - pool.mu.Lock() - defer pool.mu.Unlock() - - pending := make(map[common.Address][]*types.Transaction, len(pool.pending)) - for addr, list := range pool.pending { - pending[addr] = list.Flatten() - } - queued := make(map[common.Address][]*types.Transaction, len(pool.queue)) - for addr, list := range pool.queue { - queued[addr] = list.Flatten() - } - return pending, queued -} - -// ContentFrom retrieves the data content of the transaction pool, returning the -// pending as well as queued transactions of this address, grouped by nonce. -func (pool *LegacyPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { - pool.mu.RLock() - defer pool.mu.RUnlock() - - var pending []*types.Transaction - if list, ok := pool.pending[addr]; ok { - pending = list.Flatten() - } - var queued []*types.Transaction - if list, ok := pool.queue[addr]; ok { - queued = list.Flatten() - } - return pending, queued -} - -// Pending retrieves all currently processable transactions, grouped by origin -// account and sorted by nonce. The returned transaction set is a copy and can be -// freely modified by calling code. -// -// The enforceTips parameter can be used to do an extra filtering on the pending -// transactions and only return those whose **effective** tip is large enough in -// the next pending execution environment. -func (pool *LegacyPool) Pending(enforceTips bool) map[common.Address][]*txpool.LazyTransaction { - return pool.PendingWithBaseFee(enforceTips, nil) -} - -// If baseFee is nil, then pool.priced.urgent.baseFee is used. -func (pool *LegacyPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*txpool.LazyTransaction { - pool.mu.Lock() - defer pool.mu.Unlock() - - if baseFee == nil { - baseFee = pool.priced.urgent.baseFee - } - - pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending)) - for addr, list := range pool.pending { - txs := list.Flatten() - - // If the miner requests tip enforcement, cap the lists now - if enforceTips && !pool.locals.contains(addr) { - for i, tx := range txs { - if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), baseFee) < 0 { - txs = txs[:i] - break - } - } - } - if len(txs) > 0 { - lazies := make([]*txpool.LazyTransaction, len(txs)) - for i := 0; i < len(txs); i++ { - lazies[i] = &txpool.LazyTransaction{ - Pool: pool, - Hash: txs[i].Hash(), - Tx: &txpool.Transaction{Tx: txs[i]}, - Time: txs[i].Time(), - GasFeeCap: txs[i].GasFeeCap(), - GasTipCap: txs[i].GasTipCap(), - } - } - pending[addr] = lazies - } - } - return pending -} - -// PendingFrom returns the same set of transactions that would be returned from Pending restricted to only -// transactions from [addrs]. -func (pool *LegacyPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address][]*txpool.LazyTransaction { - pool.mu.Lock() - defer pool.mu.Unlock() - - pending := make(map[common.Address][]*txpool.LazyTransaction, len(pool.pending)) - for _, addr := range addrs { - list, ok := pool.pending[addr] - if !ok { - continue - } - txs := list.Flatten() - - // If the miner requests tip enforcement, cap the lists now - if enforceTips && !pool.locals.contains(addr) { - for i, tx := range txs { - if tx.EffectiveGasTipIntCmp(pool.gasTip.Load(), pool.priced.urgent.baseFee) < 0 { - txs = txs[:i] - break - } - } - } - if len(txs) > 0 { - lazies := make([]*txpool.LazyTransaction, len(txs)) - for i := 0; i < len(txs); i++ { - lazies[i] = &txpool.LazyTransaction{ - Pool: pool, - Hash: txs[i].Hash(), - Tx: &txpool.Transaction{Tx: txs[i]}, - Time: txs[i].Time(), - GasFeeCap: txs[i].GasFeeCap(), - GasTipCap: txs[i].GasTipCap(), - } - } - pending[addr] = lazies - } - } - return pending -} - -// IteratePending iterates over [pool.pending] until [f] returns false. -// The caller must not modify [tx]. Returns false if iteration was interrupted. -func (pool *LegacyPool) IteratePending(f func(tx *txpool.Transaction) bool) bool { - pool.mu.RLock() - defer pool.mu.RUnlock() - - for _, list := range pool.pending { - for _, tx := range list.txs.items { - if !f(&txpool.Transaction{Tx: tx}) { - return false - } - } - } - return true -} - -// Locals retrieves the accounts currently considered local by the pool. -func (pool *LegacyPool) Locals() []common.Address { - pool.mu.Lock() - defer pool.mu.Unlock() - - return pool.locals.flatten() -} - -// local retrieves all currently known local transactions, grouped by origin -// account and sorted by nonce. The returned transaction set is a copy and can be -// freely modified by calling code. -func (pool *LegacyPool) local() map[common.Address]types.Transactions { - txs := make(map[common.Address]types.Transactions) - for addr := range pool.locals.accounts { - if pending := pool.pending[addr]; pending != nil { - txs[addr] = append(txs[addr], pending.Flatten()...) - } - if queued := pool.queue[addr]; queued != nil { - txs[addr] = append(txs[addr], queued.Flatten()...) - } - } - return txs -} - -// validateTxBasics checks whether a transaction is valid according to the consensus -// rules, but does not check state-dependent validation such as sufficient balance. -// This check is meant as an early check which only needs to be performed once, -// and does not require the pool mutex to be held. -func (pool *LegacyPool) validateTxBasics(tx *types.Transaction, local bool) error { - opts := &txpool.ValidationOptions{ - Config: pool.chainconfig, - Accept: 0 | - 1< pool.config.GlobalSlots+pool.config.GlobalQueue { - // If the new transaction is underpriced, don't accept it - if !isLocal && pool.priced.Underpriced(tx) { - log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) - underpricedTxMeter.Mark(1) - return false, txpool.ErrUnderpriced - } - - // We're about to replace a transaction. The reorg does a more thorough - // analysis of what to remove and how, but it runs async. We don't want to - // do too many replacements between reorg-runs, so we cap the number of - // replacements to 25% of the slots - if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { - throttleTxMeter.Mark(1) - return false, ErrTxPoolOverflow - } - - // New transaction is better than our worse ones, make room for it. - // If it's a local transaction, forcibly discard all available transactions. - // Otherwise if we can't make enough room for new one, abort the operation. - drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) - - // Special case, we still can't make the room for the new remote one. - if !isLocal && !success { - log.Trace("Discarding overflown transaction", "hash", hash) - overflowedTxMeter.Mark(1) - return false, ErrTxPoolOverflow - } - - // If the new transaction is a future transaction it should never churn pending transactions - if !isLocal && pool.isGapped(from, tx) { - var replacesPending bool - for _, dropTx := range drop { - dropSender, _ := types.Sender(pool.signer, dropTx) - if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) { - replacesPending = true - break - } - } - // Add all transactions back to the priced queue - if replacesPending { - for _, dropTx := range drop { - pool.priced.Put(dropTx, false) - } - log.Trace("Discarding future transaction replacing pending tx", "hash", hash) - return false, txpool.ErrFutureReplacePending - } - } - - // Kick out the underpriced remote transactions. - for _, tx := range drop { - log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) - underpricedTxMeter.Mark(1) - - sender, _ := types.Sender(pool.signer, tx) - dropped := pool.removeTx(tx.Hash(), false, sender != from) // Don't unreserve the sender of the tx being added if last from the acc - - pool.changesSinceReorg += dropped - } - } - - // Try to replace an existing transaction in the pending pool - if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) { - // Nonce already pending, check if required price bump is met - inserted, old := list.Add(tx, pool.config.PriceBump) - if !inserted { - pendingDiscardMeter.Mark(1) - return false, txpool.ErrReplaceUnderpriced - } - // New transaction is better, replace old one - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - pendingReplaceMeter.Mark(1) - } - pool.all.Add(tx, isLocal) - pool.priced.Put(tx, isLocal) - pool.journalTx(from, tx) - pool.queueTxEvent(tx) - log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) - - // Successful promotion, bump the heartbeat - pool.beats[from] = time.Now() - return old != nil, nil - } - // New transaction isn't replacing a pending one, push into queue - replaced, err = pool.enqueueTx(hash, tx, isLocal, true) - if err != nil { - return false, err - } - // Mark local addresses and journal local transactions - if local && !pool.locals.contains(from) { - log.Info("Setting new local account", "address", from) - pool.locals.add(from) - pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. - } - if isLocal { - localGauge.Inc(1) - } - pool.journalTx(from, tx) - - log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) - return replaced, nil -} - -// isGapped reports whether the given transaction is immediately executable. -func (pool *LegacyPool) isGapped(from common.Address, tx *types.Transaction) bool { - // Short circuit if transaction falls within the scope of the pending list - // or matches the next pending nonce which can be promoted as an executable - // transaction afterwards. Note, the tx staleness is already checked in - // 'validateTx' function previously. - next := pool.pendingNonces.get(from) - if tx.Nonce() <= next { - return false - } - // The transaction has a nonce gap with pending list, it's only considered - // as executable if transactions in queue can fill up the nonce gap. - queue, ok := pool.queue[from] - if !ok { - return true - } - for nonce := next; nonce < tx.Nonce(); nonce++ { - if !queue.Contains(nonce) { - return true // txs in queue can't fill up the nonce gap - } - } - return false -} - -// enqueueTx inserts a new transaction into the non-executable transaction queue. -// -// Note, this method assumes the pool lock is held! -func (pool *LegacyPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { - // Try to insert the transaction into the future queue - from, _ := types.Sender(pool.signer, tx) // already validated - if pool.queue[from] == nil { - pool.queue[from] = newList(false) - } - inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) - if !inserted { - // An older transaction was better, discard this - queuedDiscardMeter.Mark(1) - return false, txpool.ErrReplaceUnderpriced - } - // Discard any previous transaction and mark this - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - queuedReplaceMeter.Mark(1) - } else { - // Nothing was replaced, bump the queued counter - queuedGauge.Inc(1) - } - // If the transaction isn't in lookup set but it's expected to be there, - // show the error log. - if pool.all.Get(hash) == nil && !addAll { - log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) - } - if addAll { - pool.all.Add(tx, local) - pool.priced.Put(tx, local) - } - // If we never record the heartbeat, do it right now. - if _, exist := pool.beats[from]; !exist { - pool.beats[from] = time.Now() - } - return old != nil, nil -} - -// journalTx adds the specified transaction to the local disk journal if it is -// deemed to have been sent from a local account. -func (pool *LegacyPool) journalTx(from common.Address, tx *types.Transaction) { - // Only journal if it's enabled and the transaction is local - if pool.journal == nil || !pool.locals.contains(from) { - return - } - if err := pool.journal.insert(tx); err != nil { - log.Warn("Failed to journal local transaction", "err", err) - } -} - -// promoteTx adds a transaction to the pending (processable) list of transactions -// and returns whether it was inserted or an older was better. -// -// Note, this method assumes the pool lock is held! -func (pool *LegacyPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { - // Try to insert the transaction into the pending queue - if pool.pending[addr] == nil { - pool.pending[addr] = newList(true) - } - list := pool.pending[addr] - - inserted, old := list.Add(tx, pool.config.PriceBump) - if !inserted { - // An older transaction was better, discard this - pool.all.Remove(hash) - pool.priced.Removed(1) - pendingDiscardMeter.Mark(1) - return false - } - // Otherwise discard any previous transaction and mark this - if old != nil { - pool.all.Remove(old.Hash()) - pool.priced.Removed(1) - pendingReplaceMeter.Mark(1) - } else { - // Nothing was replaced, bump the pending counter - pendingGauge.Inc(1) - } - // Set the potentially new pending nonce and notify any subsystems of the new tx - pool.pendingNonces.set(addr, tx.Nonce()+1) - - // Successful promotion, bump the heartbeat - pool.beats[addr] = time.Now() - return true -} - -// Add enqueues a batch of transactions into the pool if they are valid. Depending -// on the local flag, full pricing contraints will or will not be applied. -// -// If sync is set, the method will block until all internal maintenance related -// to the add is finished. Only use this during tests for determinism! -func (pool *LegacyPool) Add(txs []*txpool.Transaction, local bool, sync bool) []error { - unwrapped := make([]*types.Transaction, len(txs)) - for i, tx := range txs { - unwrapped[i] = tx.Tx - } - return pool.addTxs(unwrapped, local, sync) -} - -// addLocals enqueues a batch of transactions into the pool if they are valid, marking the -// senders as a local ones, ensuring they go around the local pricing constraints. -// -// This method is used to add transactions from the RPC API and performs synchronous pool -// reorganization and event propagation. -func (pool *LegacyPool) addLocals(txs []*types.Transaction) []error { - return pool.addTxs(txs, !pool.config.NoLocals, true) -} - -// addLocal enqueues a single local transaction into the pool if it is valid. This is -// a convenience wrapper around addLocals. -func (pool *LegacyPool) addLocal(tx *types.Transaction) error { - errs := pool.addLocals([]*types.Transaction{tx}) - return errs[0] -} - -// addRemotes enqueues a batch of transactions into the pool if they are valid. If the -// senders are not among the locally tracked ones, full pricing constraints will apply. -// -// This method is used to add transactions from the p2p network and does not wait for pool -// reorganization and internal event propagation. -func (pool *LegacyPool) addRemotes(txs []*types.Transaction) []error { - return pool.addTxs(txs, false, false) -} - -// addRemote enqueues a single transaction into the pool if it is valid. This is a convenience -// wrapper around addRemotes. -func (pool *LegacyPool) addRemote(tx *types.Transaction) error { - errs := pool.addRemotes([]*types.Transaction{tx}) - return errs[0] -} - -// addRemotesSync is like addRemotes, but waits for pool reorganization. Tests use this method. -func (pool *LegacyPool) addRemotesSync(txs []*types.Transaction) []error { - return pool.addTxs(txs, false, true) -} - -// This is like addRemotes with a single transaction, but waits for pool reorganization. Tests use this method. -func (pool *LegacyPool) addRemoteSync(tx *types.Transaction) error { - return pool.addTxs([]*types.Transaction{tx}, false, true)[0] -} - -// addTxs attempts to queue a batch of transactions if they are valid. -func (pool *LegacyPool) addTxs(txs []*types.Transaction, local, sync bool) []error { - // Filter out known ones without obtaining the pool lock or recovering signatures - var ( - errs = make([]error, len(txs)) - news = make([]*types.Transaction, 0, len(txs)) - ) - for i, tx := range txs { - // If the transaction is known, pre-set the error slot - if pool.all.Get(tx.Hash()) != nil { - errs[i] = ErrAlreadyKnown - knownTxMeter.Mark(1) - continue - } - // Exclude transactions with basic errors, e.g invalid signatures and - // insufficient intrinsic gas as soon as possible and cache senders - // in transactions before obtaining lock - if err := pool.validateTxBasics(tx, local); err != nil { - errs[i] = err - invalidTxMeter.Mark(1) - continue - } - // Accumulate all unknown transactions for deeper processing - news = append(news, tx) - } - if len(news) == 0 { - return errs - } - - // Process all the new transaction and merge any errors into the original slice - pool.mu.Lock() - newErrs, dirtyAddrs := pool.addTxsLocked(news, local) - pool.mu.Unlock() - - var nilSlot = 0 - for _, err := range newErrs { - for errs[nilSlot] != nil { - nilSlot++ - } - errs[nilSlot] = err - nilSlot++ - } - // Reorg the pool internals if needed and return - done := pool.requestPromoteExecutables(dirtyAddrs) - if sync { - <-done - } - return errs -} - -// addTxsLocked attempts to queue a batch of transactions if they are valid. -// The transaction pool lock must be held. -func (pool *LegacyPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { - dirty := newAccountSet(pool.signer) - errs := make([]error, len(txs)) - for i, tx := range txs { - replaced, err := pool.add(tx, local) - errs[i] = err - if err == nil && !replaced { - dirty.addTx(tx) - } - } - validTxMeter.Mark(int64(len(dirty.accounts))) - return errs, dirty -} - -// Status returns the status (unknown/pending/queued) of a batch of transactions -// identified by their hashes. -func (pool *LegacyPool) Status(hash common.Hash) txpool.TxStatus { - tx := pool.get(hash) - if tx == nil { - return txpool.TxStatusUnknown - } - from, _ := types.Sender(pool.signer, tx) // already validated - - pool.mu.RLock() - defer pool.mu.RUnlock() - - if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { - return txpool.TxStatusPending - } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { - return txpool.TxStatusQueued - } - return txpool.TxStatusUnknown -} - -// Get returns a transaction if it is contained in the pool and nil otherwise. -func (pool *LegacyPool) Get(hash common.Hash) *txpool.Transaction { - tx := pool.get(hash) - if tx == nil { - return nil - } - return &txpool.Transaction{Tx: tx} -} - -// get returns a transaction if it is contained in the pool and nil otherwise. -func (pool *LegacyPool) get(hash common.Hash) *types.Transaction { - return pool.all.Get(hash) -} - -// Has returns an indicator whether txpool has a transaction cached with the -// given hash. -func (pool *LegacyPool) Has(hash common.Hash) bool { - return pool.all.Get(hash) != nil -} - -func (pool *LegacyPool) HasLocal(hash common.Hash) bool { - return pool.all.GetLocal(hash) != nil -} - -// removeTx removes a single transaction from the queue, moving all subsequent -// transactions back to the future queue. -// -// In unreserve is false, the account will not be relinquished to the main txpool -// even if there are no more references to it. This is used to handle a race when -// a tx being added, and it evicts a previously scheduled tx from the same account, -// which could lead to a premature release of the lock. -// -// Returns the number of transactions removed from the pending queue. -func (pool *LegacyPool) removeTx(hash common.Hash, outofbound bool, unreserve bool) int { - // Fetch the transaction we wish to delete - tx := pool.all.Get(hash) - if tx == nil { - return 0 - } - addr, _ := types.Sender(pool.signer, tx) // already validated during insertion - - // If after deletion there are no more transactions belonging to this account, - // relinquish the address reservation. It's a bit convoluted do this, via a - // defer, but it's safer vs. the many return pathways. - if unreserve { - defer func() { - var ( - _, hasPending = pool.pending[addr] - _, hasQueued = pool.queue[addr] - ) - if !hasPending && !hasQueued { - pool.reserve(addr, false) - } - }() - } - // Remove it from the list of known transactions - pool.all.Remove(hash) - if outofbound { - pool.priced.Removed(1) - } - if pool.locals.contains(addr) { - localGauge.Dec(1) - } - // Remove the transaction from the pending lists and reset the account nonce - if pending := pool.pending[addr]; pending != nil { - if removed, invalids := pending.Remove(tx); removed { - // If no more pending transactions are left, remove the list - if pending.Empty() { - delete(pool.pending, addr) - } - // Postpone any invalidated transactions - for _, tx := range invalids { - // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(tx.Hash(), tx, false, false) - } - // Update the account nonce if needed - pool.pendingNonces.setIfLower(addr, tx.Nonce()) - // Reduce the pending counter - pendingGauge.Dec(int64(1 + len(invalids))) - return 1 + len(invalids) - } - } - // Transaction is in the future queue - if future := pool.queue[addr]; future != nil { - if removed, _ := future.Remove(tx); removed { - // Reduce the queued counter - queuedGauge.Dec(1) - } - if future.Empty() { - delete(pool.queue, addr) - delete(pool.beats, addr) - } - } - return 0 -} - -// requestReset requests a pool reset to the new head block. -// The returned channel is closed when the reset has occurred. -func (pool *LegacyPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { - select { - case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: - return <-pool.reorgDoneCh - case <-pool.reorgShutdownCh: - return pool.reorgShutdownCh - } -} - -// requestPromoteExecutables requests transaction promotion checks for the given addresses. -// The returned channel is closed when the promotion checks have occurred. -func (pool *LegacyPool) requestPromoteExecutables(set *accountSet) chan struct{} { - select { - case pool.reqPromoteCh <- set: - return <-pool.reorgDoneCh - case <-pool.reorgShutdownCh: - return pool.reorgShutdownCh - } -} - -// queueTxEvent enqueues a transaction event to be sent in the next reorg run. -func (pool *LegacyPool) queueTxEvent(tx *types.Transaction) { - select { - case pool.queueTxEventCh <- tx: - case <-pool.reorgShutdownCh: - } -} - -// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not -// call those methods directly, but request them being run using requestReset and -// requestPromoteExecutables instead. -func (pool *LegacyPool) scheduleReorgLoop() { - defer pool.wg.Done() - - var ( - curDone chan struct{} // non-nil while runReorg is active - nextDone = make(chan struct{}) - launchNextRun bool - reset *txpoolResetRequest - dirtyAccounts *accountSet - queuedEvents = make(map[common.Address]*sortedMap) - ) - for { - // Launch next background reorg if needed - if curDone == nil && launchNextRun { - // Run the background reorg and announcements - go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) - - // Prepare everything for the next round of reorg - curDone, nextDone = nextDone, make(chan struct{}) - launchNextRun = false - - reset, dirtyAccounts = nil, nil - queuedEvents = make(map[common.Address]*sortedMap) - } - - select { - case req := <-pool.reqResetCh: - // Reset request: update head if request is already pending. - if reset == nil { - reset = req - } else { - reset.newHead = req.newHead - } - launchNextRun = true - pool.reorgDoneCh <- nextDone - - case req := <-pool.reqPromoteCh: - // Promote request: update address set if request is already pending. - if dirtyAccounts == nil { - dirtyAccounts = req - } else { - dirtyAccounts.merge(req) - } - launchNextRun = true - pool.reorgDoneCh <- nextDone - - case tx := <-pool.queueTxEventCh: - // Queue up the event, but don't schedule a reorg. It's up to the caller to - // request one later if they want the events sent. - addr, _ := types.Sender(pool.signer, tx) - if _, ok := queuedEvents[addr]; !ok { - queuedEvents[addr] = newSortedMap() - } - queuedEvents[addr].Put(tx) - - case <-curDone: - curDone = nil - - case <-pool.reorgShutdownCh: - // Wait for current run to finish. - if curDone != nil { - <-curDone - } - close(nextDone) - return - } - } -} - -// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. -func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { - defer func(t0 time.Time) { - reorgDurationTimer.Update(time.Since(t0)) - }(time.Now()) - defer close(done) - - var promoteAddrs []common.Address - if dirtyAccounts != nil && reset == nil { - // Only dirty accounts need to be promoted, unless we're resetting. - // For resets, all addresses in the tx queue will be promoted and - // the flatten operation can be avoided. - promoteAddrs = dirtyAccounts.flatten() - } - pool.mu.Lock() - if reset != nil { - // Reset from the old head to the new, rescheduling any reorged transactions - pool.reset(reset.oldHead, reset.newHead) - - // Nonces were reset, discard any events that became stale - for addr := range events { - events[addr].Forward(pool.pendingNonces.get(addr)) - if events[addr].Len() == 0 { - delete(events, addr) - } - } - // Reset needs promote for all addresses - promoteAddrs = make([]common.Address, 0, len(pool.queue)) - for addr := range pool.queue { - promoteAddrs = append(promoteAddrs, addr) - } - } - // Check for pending transactions for every account that sent new ones - promoted := pool.promoteExecutables(promoteAddrs) - - // If a new block appeared, validate the pool of pending transactions. This will - // remove any transaction that has been included in the block or was invalidated - // because of another transaction (e.g. higher gas price). - if reset != nil { - pool.demoteUnexecutables() - if reset.newHead != nil { - if pool.chainconfig.IsSubnetEVM(reset.newHead.Time) { - if err := pool.updateBaseFeeAt(reset.newHead); err != nil { - log.Error("error at updating base fee in tx pool", "error", err) - } - } else { - pool.priced.Reheap() - } - } - // Update all accounts to the latest known pending nonce - nonces := make(map[common.Address]uint64, len(pool.pending)) - for addr, list := range pool.pending { - highestPending := list.LastElement() - nonces[addr] = highestPending.Nonce() + 1 - } - pool.pendingNonces.setAll(nonces) - } - // Ensure pool.queue and pool.pending sizes stay within the configured limits. - pool.truncatePending() - pool.truncateQueue() - - dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) - pool.changesSinceReorg = 0 // Reset change counter - pool.mu.Unlock() - - // Notify subsystems for newly added transactions - for _, tx := range promoted { - addr, _ := types.Sender(pool.signer, tx) - if _, ok := events[addr]; !ok { - events[addr] = newSortedMap() - } - events[addr].Put(tx) - } - if len(events) > 0 { - var txs []*types.Transaction - for _, set := range events { - txs = append(txs, set.Flatten()...) - } - pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) - } -} - -// reset retrieves the current state of the blockchain and ensures the content -// of the transaction pool is valid with regard to the chain state. -func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { - // If we're reorging an old state, reinject all dropped transactions - var reinject types.Transactions - - if oldHead != nil && oldHead.Hash() != newHead.ParentHash { - // If the reorg is too deep, avoid doing it (will happen during fast sync) - oldNum := oldHead.Number.Uint64() - newNum := newHead.Number.Uint64() - - if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { - log.Debug("Skipping deep transaction reorg", "depth", depth) - } else { - // Reorg seems shallow enough to pull in all transactions into memory - var ( - rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) - add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) - ) - if rem == nil { - // This can happen if a setHead is performed, where we simply discard the old - // head from the chain. - // If that is the case, we don't have the lost transactions anymore, and - // there's nothing to add - if newNum >= oldNum { - // If we reorged to a same or higher number, then it's not a case of setHead - log.Warn("Transaction pool reset with missing old head", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - return - } - // If the reorg ended up on a lower number, it's indicative of setHead being the cause - log.Debug("Skipping transaction reset caused by setHead", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - // We still need to update the current state s.th. the lost transactions can be readded by the user - } else { - if add == nil { - // if the new head is nil, it means that something happened between - // the firing of newhead-event and _now_: most likely a - // reorg caused by sync-reversion or explicit sethead back to an - // earlier block. - log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) - return - } - var discarded, included types.Transactions - for rem.NumberU64() > add.NumberU64() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return - } - } - for add.NumberU64() > rem.NumberU64() { - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return - } - } - for rem.Hash() != add.Hash() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return - } - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return - } - } - lost := make([]*types.Transaction, 0, len(discarded)) - for _, tx := range types.TxDifference(discarded, included) { - if pool.Filter(tx) { - lost = append(lost, tx) - } - } - reinject = lost - } - } - } - // Initialize the internal state to the current head - if newHead == nil { - newHead = pool.chain.CurrentBlock() // Special case during testing - } - statedb, err := pool.chain.StateAt(newHead.Root) - if err != nil { - log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root) - return - } - pool.currentHead.Store(newHead) - pool.currentStateLock.Lock() - pool.currentState = statedb - pool.currentStateLock.Unlock() - pool.pendingNonces = newNoncer(statedb) - - // when we reset txPool we should explicitly check if fee struct for min base fee has changed - // so that we can correctly drop txs with < minBaseFee from tx pool. - if pool.chainconfig.IsPrecompileEnabled(feemanager.ContractAddress, newHead.Time) { - feeConfig, _, err := pool.chain.GetFeeConfigAt(newHead) - if err != nil { - log.Error("Failed to get fee config state", "err", err, "root", newHead.Root) - return - } - pool.minimumFee = feeConfig.MinBaseFee - } - - // Inject any transactions discarded due to reorgs - log.Debug("Reinjecting stale transactions", "count", len(reinject)) - pool.chain.SenderCacher().Recover(pool.signer, reinject) - pool.addTxsLocked(reinject, false) -} - -// promoteExecutables moves transactions that have become processable from the -// future queue to the set of pending transactions. During this process, all -// invalidated transactions (low nonce, low balance) are deleted. -func (pool *LegacyPool) promoteExecutables(accounts []common.Address) []*types.Transaction { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - - // Track the promoted transactions to broadcast them at once - var promoted []*types.Transaction - - // Iterate over all accounts and promote any executable transactions - gasLimit := pool.currentHead.Load().GasLimit - for _, addr := range accounts { - list := pool.queue[addr] - if list == nil { - continue // Just in case someone calls with a non existing account - } - // Drop all transactions that are deemed too old (low nonce) - forwards := list.Forward(pool.currentState.GetNonce(addr)) - for _, tx := range forwards { - hash := tx.Hash() - pool.all.Remove(hash) - } - log.Trace("Removed old queued transactions", "count", len(forwards)) - // Drop all transactions that are too costly (low balance or out of gas) - drops, _ := list.Filter(pool.currentState.GetBalance(addr), gasLimit) - for _, tx := range drops { - hash := tx.Hash() - pool.all.Remove(hash) - } - log.Trace("Removed unpayable queued transactions", "count", len(drops)) - queuedNofundsMeter.Mark(int64(len(drops))) - - // Gather all executable transactions and promote them - readies := list.Ready(pool.pendingNonces.get(addr)) - for _, tx := range readies { - hash := tx.Hash() - if pool.promoteTx(addr, hash, tx) { - promoted = append(promoted, tx) - } - } - log.Trace("Promoted queued transactions", "count", len(promoted)) - queuedGauge.Dec(int64(len(readies))) - - // Drop all transactions over the allowed limit - var caps types.Transactions - if !pool.locals.contains(addr) { - caps = list.Cap(int(pool.config.AccountQueue)) - for _, tx := range caps { - hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed cap-exceeding queued transaction", "hash", hash) - } - queuedRateLimitMeter.Mark(int64(len(caps))) - } - // Mark all the items dropped as removed - pool.priced.Removed(len(forwards) + len(drops) + len(caps)) - queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) - } - // Delete the entire queue entry if it became empty. - if list.Empty() { - delete(pool.queue, addr) - delete(pool.beats, addr) - if _, ok := pool.pending[addr]; !ok { - pool.reserve(addr, false) - } - } - } - return promoted -} - -// truncatePending removes transactions from the pending queue if the pool is above the -// pending limit. The algorithm tries to reduce transaction counts by an approximately -// equal number for all for accounts with many pending transactions. -func (pool *LegacyPool) truncatePending() { - pending := uint64(0) - for _, list := range pool.pending { - pending += uint64(list.Len()) - } - if pending <= pool.config.GlobalSlots { - return - } - - pendingBeforeCap := pending - // Assemble a spam order to penalize large transactors first - spammers := prque.New[int64, common.Address](nil) - for addr, list := range pool.pending { - // Only evict transactions from high rollers - if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { - spammers.Push(addr, int64(list.Len())) - } - } - // Gradually drop transactions from offenders - offenders := []common.Address{} - for pending > pool.config.GlobalSlots && !spammers.Empty() { - // Retrieve the next offender if not local address - offender, _ := spammers.Pop() - offenders = append(offenders, offender) - - // Equalize balances until all the same or below threshold - if len(offenders) > 1 { - // Calculate the equalization threshold for all current offenders - threshold := pool.pending[offender].Len() - - // Iteratively reduce all offenders until below limit or threshold reached - for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { - for i := 0; i < len(offenders)-1; i++ { - list := pool.pending[offenders[i]] - - caps := list.Cap(list.Len() - 1) - for _, tx := range caps { - // Drop the transaction from the global pools too - hash := tx.Hash() - pool.all.Remove(hash) - - // Update the account nonce to the dropped transaction - pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) - } - pool.priced.Removed(len(caps)) - pendingGauge.Dec(int64(len(caps))) - if pool.locals.contains(offenders[i]) { - localGauge.Dec(int64(len(caps))) - } - pending-- - } - } - } - } - - // If still above threshold, reduce to limit or min allowance - if pending > pool.config.GlobalSlots && len(offenders) > 0 { - for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { - for _, addr := range offenders { - list := pool.pending[addr] - - caps := list.Cap(list.Len() - 1) - for _, tx := range caps { - // Drop the transaction from the global pools too - hash := tx.Hash() - pool.all.Remove(hash) - - // Update the account nonce to the dropped transaction - pool.pendingNonces.setIfLower(addr, tx.Nonce()) - log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) - } - pool.priced.Removed(len(caps)) - pendingGauge.Dec(int64(len(caps))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(caps))) - } - pending-- - } - } - } - pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) -} - -// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. -func (pool *LegacyPool) truncateQueue() { - queued := uint64(0) - for _, list := range pool.queue { - queued += uint64(list.Len()) - } - if queued <= pool.config.GlobalQueue { - return - } - - // Sort all accounts with queued transactions by heartbeat - addresses := make(addressesByHeartbeat, 0, len(pool.queue)) - for addr := range pool.queue { - if !pool.locals.contains(addr) { // don't drop locals - addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) - } - } - sort.Sort(sort.Reverse(addresses)) - - // Drop transactions until the total is below the limit or only locals remain - for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { - addr := addresses[len(addresses)-1] - list := pool.queue[addr.address] - - addresses = addresses[:len(addresses)-1] - - // Drop all transactions if they are less than the overflow - if size := uint64(list.Len()); size <= drop { - for _, tx := range list.Flatten() { - pool.removeTx(tx.Hash(), true, true) - } - drop -= size - queuedRateLimitMeter.Mark(int64(size)) - continue - } - // Otherwise drop only last few transactions - txs := list.Flatten() - for i := len(txs) - 1; i >= 0 && drop > 0; i-- { - pool.removeTx(txs[i].Hash(), true, true) - drop-- - queuedRateLimitMeter.Mark(1) - } - } -} - -// demoteUnexecutables removes invalid and processed transactions from the pools -// executable/pending queue and any subsequent transactions that become unexecutable -// are moved back into the future queue. -// -// Note: transactions are not marked as removed in the priced list because re-heaping -// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful -// to trigger a re-heap is this function -func (pool *LegacyPool) demoteUnexecutables() { - pool.currentStateLock.Lock() - defer pool.currentStateLock.Unlock() - - // Iterate over all accounts and demote any non-executable transactions - gasLimit := pool.currentHead.Load().GasLimit - for addr, list := range pool.pending { - nonce := pool.currentState.GetNonce(addr) - - // Drop all transactions that are deemed too old (low nonce) - olds := list.Forward(nonce) - for _, tx := range olds { - hash := tx.Hash() - pool.all.Remove(hash) - log.Trace("Removed old pending transaction", "hash", hash) - } - // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit) - for _, tx := range drops { - hash := tx.Hash() - log.Trace("Removed unpayable pending transaction", "hash", hash) - pool.all.Remove(hash) - } - pendingNofundsMeter.Mark(int64(len(drops))) - - for _, tx := range invalids { - hash := tx.Hash() - log.Trace("Demoting pending transaction", "hash", hash) - - // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(hash, tx, false, false) - } - pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) - if pool.locals.contains(addr) { - localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) - } - // If there's a gap in front, alert (should never happen) and postpone all transactions - if list.Len() > 0 && list.txs.Get(nonce) == nil { - gapped := list.Cap(0) - for _, tx := range gapped { - hash := tx.Hash() - log.Error("Demoting invalidated transaction", "hash", hash) - - // Internal shuffle shouldn't touch the lookup set. - pool.enqueueTx(hash, tx, false, false) - } - pendingGauge.Dec(int64(len(gapped))) - } - // Delete the entire pending entry if it became empty. - if list.Empty() { - delete(pool.pending, addr) - if _, ok := pool.queue[addr]; !ok { - pool.reserve(addr, false) - } - } - } -} - -func (pool *LegacyPool) startPeriodicFeeUpdate() { - if pool.chainconfig.SubnetEVMTimestamp == nil { - return - } - - // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay - // when starting up in Subnet EVM before the base fee is updated. - if time.Now().After(utils.Uint64ToTime(pool.chainconfig.SubnetEVMTimestamp)) { - pool.updateBaseFee() - } - - pool.wg.Add(1) - go pool.periodicBaseFeeUpdate() -} - -func (pool *LegacyPool) periodicBaseFeeUpdate() { - defer pool.wg.Done() - - // Sleep until its time to start the periodic base fee update or the tx pool is shutting down - select { - case <-time.After(time.Until(utils.Uint64ToTime(pool.chainconfig.SubnetEVMTimestamp))): - case <-pool.generalShutdownChan: - return // Return early if shutting down - } - - // Update the base fee every [baseFeeUpdateInterval] - // and shutdown when [generalShutdownChan] is closed by Stop() - for { - select { - case <-time.After(baseFeeUpdateInterval): - pool.updateBaseFee() - case <-pool.generalShutdownChan: - return - } - } -} - -func (pool *LegacyPool) updateBaseFee() { - pool.mu.Lock() - defer pool.mu.Unlock() - - err := pool.updateBaseFeeAt(pool.currentHead.Load()) - if err != nil { - log.Error("failed to update base fee", "currentHead", pool.currentHead.Load().Hash(), "err", err) - } -} - -// assumes lock is already held -func (pool *LegacyPool) updateBaseFeeAt(head *types.Header) error { - feeConfig, _, err := pool.chain.GetFeeConfigAt(head) - if err != nil { - return err - } - _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, feeConfig, head, uint64(time.Now().Unix())) - if err != nil { - return err - } - pool.priced.SetBaseFee(baseFeeEstimate) - return nil -} - -// addressByHeartbeat is an account address tagged with its last activity timestamp. -type addressByHeartbeat struct { - address common.Address - heartbeat time.Time -} - -type addressesByHeartbeat []addressByHeartbeat - -func (a addressesByHeartbeat) Len() int { return len(a) } -func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } -func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -// accountSet is simply a set of addresses to check for existence, and a signer -// capable of deriving addresses from transactions. -type accountSet struct { - accounts map[common.Address]struct{} - signer types.Signer - cache *[]common.Address -} - -// newAccountSet creates a new address set with an associated signer for sender -// derivations. -func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { - as := &accountSet{ - accounts: make(map[common.Address]struct{}, len(addrs)), - signer: signer, - } - for _, addr := range addrs { - as.add(addr) - } - return as -} - -// contains checks if a given address is contained within the set. -func (as *accountSet) contains(addr common.Address) bool { - _, exist := as.accounts[addr] - return exist -} - -// containsTx checks if the sender of a given tx is within the set. If the sender -// cannot be derived, this method returns false. -func (as *accountSet) containsTx(tx *types.Transaction) bool { - if addr, err := types.Sender(as.signer, tx); err == nil { - return as.contains(addr) - } - return false -} - -// add inserts a new address into the set to track. -func (as *accountSet) add(addr common.Address) { - as.accounts[addr] = struct{}{} - as.cache = nil -} - -// addTx adds the sender of tx into the set. -func (as *accountSet) addTx(tx *types.Transaction) { - if addr, err := types.Sender(as.signer, tx); err == nil { - as.add(addr) - } -} - -// flatten returns the list of addresses within this set, also caching it for later -// reuse. The returned slice should not be changed! -func (as *accountSet) flatten() []common.Address { - if as.cache == nil { - accounts := make([]common.Address, 0, len(as.accounts)) - for account := range as.accounts { - accounts = append(accounts, account) - } - as.cache = &accounts - } - return *as.cache -} - -// merge adds all addresses from the 'other' set into 'as'. -func (as *accountSet) merge(other *accountSet) { - for addr := range other.accounts { - as.accounts[addr] = struct{}{} - } - as.cache = nil -} - -// lookup is used internally by LegacyPool to track transactions while allowing -// lookup without mutex contention. -// -// Note, although this type is properly protected against concurrent access, it -// is **not** a type that should ever be mutated or even exposed outside of the -// transaction pool, since its internal state is tightly coupled with the pools -// internal mechanisms. The sole purpose of the type is to permit out-of-bound -// peeking into the pool in LegacyPool.Get without having to acquire the widely scoped -// LegacyPool.mu mutex. -// -// This lookup set combines the notion of "local transactions", which is useful -// to build upper-level structure. -type lookup struct { - slots int - lock sync.RWMutex - locals map[common.Hash]*types.Transaction - remotes map[common.Hash]*types.Transaction -} - -// newLookup returns a new lookup structure. -func newLookup() *lookup { - return &lookup{ - locals: make(map[common.Hash]*types.Transaction), - remotes: make(map[common.Hash]*types.Transaction), - } -} - -// Range calls f on each key and value present in the map. The callback passed -// should return the indicator whether the iteration needs to be continued. -// Callers need to specify which set (or both) to be iterated. -func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { - t.lock.RLock() - defer t.lock.RUnlock() - - if local { - for key, value := range t.locals { - if !f(key, value, true) { - return - } - } - } - if remote { - for key, value := range t.remotes { - if !f(key, value, false) { - return - } - } - } -} - -// Get returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) Get(hash common.Hash) *types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - if tx := t.locals[hash]; tx != nil { - return tx - } - return t.remotes[hash] -} - -// GetLocal returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.locals[hash] -} - -// GetRemote returns a transaction if it exists in the lookup, or nil if not found. -func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.remotes[hash] -} - -// Count returns the current number of transactions in the lookup. -func (t *lookup) Count() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.locals) + len(t.remotes) -} - -// LocalCount returns the current number of local transactions in the lookup. -func (t *lookup) LocalCount() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.locals) -} - -// RemoteCount returns the current number of remote transactions in the lookup. -func (t *lookup) RemoteCount() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return len(t.remotes) -} - -// Slots returns the current number of slots used in the lookup. -func (t *lookup) Slots() int { - t.lock.RLock() - defer t.lock.RUnlock() - - return t.slots -} - -// Add adds a transaction to the lookup. -func (t *lookup) Add(tx *types.Transaction, local bool) { - t.lock.Lock() - defer t.lock.Unlock() - - t.slots += numSlots(tx) - slotsGauge.Update(int64(t.slots)) - - if local { - t.locals[tx.Hash()] = tx - } else { - t.remotes[tx.Hash()] = tx - } -} - -// Remove removes a transaction from the lookup. -func (t *lookup) Remove(hash common.Hash) { - t.lock.Lock() - defer t.lock.Unlock() - - tx, ok := t.locals[hash] - if !ok { - tx, ok = t.remotes[hash] - } - if !ok { - log.Error("No transaction found to be deleted", "hash", hash) - return - } - t.slots -= numSlots(tx) - slotsGauge.Update(int64(t.slots)) - - delete(t.locals, hash) - delete(t.remotes, hash) -} - -// RemoteToLocals migrates the transactions belongs to the given locals to locals -// set. The assumption is held the locals set is thread-safe to be used. -func (t *lookup) RemoteToLocals(locals *accountSet) int { - t.lock.Lock() - defer t.lock.Unlock() - - var migrated int - for hash, tx := range t.remotes { - if locals.containsTx(tx) { - t.locals[hash] = tx - delete(t.remotes, hash) - migrated += 1 - } - } - return migrated -} - -// RemotesBelowTip finds all remote transactions below the given tip threshold. -func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { - found := make(types.Transactions, 0, 128) - t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { - if tx.GasTipCapIntCmp(threshold) < 0 { - found = append(found, tx) - } - return true - }, false, true) // Only iterate remotes - return found -} - -// numSlots calculates the number of slots needed for a single transaction. -func numSlots(tx *types.Transaction) int { - return int((tx.Size() + txSlotSize - 1) / txSlotSize) -} diff --git a/core/txpool/legacypool/list.go b/core/txpool/list.go similarity index 99% rename from core/txpool/legacypool/list.go rename to core/txpool/list.go index 92b4e673eb..44fd3e9eb1 100644 --- a/core/txpool/legacypool/list.go +++ b/core/txpool/list.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package txpool import ( "container/heap" @@ -600,7 +600,7 @@ func (l *pricedList) underpricedFor(h *priceHeap, tx *types.Transaction) bool { func (l *pricedList) Discard(slots int, force bool) (types.Transactions, bool) { drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop for slots > 0 { - if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio { + if len(l.urgent.list)*floatingRatio > len(l.floating.list)*urgentRatio || floatingRatio == 0 { // Discard stale transactions if found during cleanup tx := heap.Pop(&l.urgent).(*types.Transaction) if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated diff --git a/core/txpool/legacypool/list_test.go b/core/txpool/list_test.go similarity index 99% rename from core/txpool/legacypool/list_test.go rename to core/txpool/list_test.go index d7ca91844e..fe8e8d5710 100644 --- a/core/txpool/legacypool/list_test.go +++ b/core/txpool/list_test.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package txpool import ( "math/big" diff --git a/core/txpool/legacypool/noncer.go b/core/txpool/noncer.go similarity index 99% rename from core/txpool/legacypool/noncer.go rename to core/txpool/noncer.go index b0280882ff..df416c0958 100644 --- a/core/txpool/legacypool/noncer.go +++ b/core/txpool/noncer.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package txpool import ( "sync" diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go deleted file mode 100644 index fb00fb1abc..0000000000 --- a/core/txpool/subpool.go +++ /dev/null @@ -1,153 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package txpool - -import ( - "math/big" - "time" - - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/event" -) - -// Transaction is a helper struct to group together a canonical transaction with -// satellite data items that are needed by the pool but are not part of the chain. -type Transaction struct { - Tx *types.Transaction // Canonical transaction - - BlobTxBlobs []kzg4844.Blob // Blobs needed by the blob pool - BlobTxCommits []kzg4844.Commitment // Commitments needed by the blob pool - BlobTxProofs []kzg4844.Proof // Proofs needed by the blob pool -} - -// LazyTransaction contains a small subset of the transaction properties that is -// enough for the miner and other APIs to handle large batches of transactions; -// and supports pulling up the entire transaction when really needed. -type LazyTransaction struct { - Pool SubPool // Transaction subpool to pull the real transaction up - Hash common.Hash // Transaction hash to pull up if needed - Tx *Transaction // Transaction if already resolved - - Time time.Time // Time when the transaction was first seen - GasFeeCap *big.Int // Maximum fee per gas the transaction may consume - GasTipCap *big.Int // Maximum miner tip per gas the transaction can pay -} - -// Resolve retrieves the full transaction belonging to a lazy handle if it is still -// maintained by the transaction pool. -func (ltx *LazyTransaction) Resolve() *Transaction { - if ltx.Tx == nil { - ltx.Tx = ltx.Pool.Get(ltx.Hash) - } - return ltx.Tx -} - -// AddressReserver is passed by the main transaction pool to subpools, so they -// may request (and relinquish) exclusive access to certain addresses. -type AddressReserver func(addr common.Address, reserve bool) error - -// SubPool represents a specialized transaction pool that lives on its own (e.g. -// blob pool). Since independent of how many specialized pools we have, they do -// need to be updated in lockstep and assemble into one coherent view for block -// production, this interface defines the common methods that allow the primary -// transaction pool to manage the subpools. -type SubPool interface { - // Filter is a selector used to decide whether a transaction whould be added - // to this particular subpool. - Filter(tx *types.Transaction) bool - - // Init sets the base parameters of the subpool, allowing it to load any saved - // transactions from disk and also permitting internal maintenance routines to - // start up. - // - // These should not be passed as a constructor argument - nor should the pools - // start by themselves - in order to keep multiple subpools in lockstep with - // one another. - Init(gasTip *big.Int, head *types.Header, reserve AddressReserver) error - - // Close terminates any background processing threads and releases any held - // resources. - Close() error - - // Reset retrieves the current state of the blockchain and ensures the content - // of the transaction pool is valid with regard to the chain state. - Reset(oldHead, newHead *types.Header) - - // SetGasTip updates the minimum price required by the subpool for a new - // transaction, and drops all transactions below this threshold. - SetGasTip(tip *big.Int) - SetMinFee(fee *big.Int) - - // Has returns an indicator whether subpool has a transaction cached with the - // given hash. - Has(hash common.Hash) bool - HasLocal(hash common.Hash) bool - - // Get returns a transaction if it is contained in the pool, or nil otherwise. - Get(hash common.Hash) *Transaction - - // Add enqueues a batch of transactions into the pool if they are valid. Due - // to the large transaction churn, add may postpone fully integrating the tx - // to a later point to batch multiple ones together. - Add(txs []*Transaction, local bool, sync bool) []error - - // Pending retrieves all currently processable transactions, grouped by origin - // account and sorted by nonce. - Pending(enforceTips bool) map[common.Address][]*LazyTransaction - PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*LazyTransaction - PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address][]*LazyTransaction - IteratePending(f func(tx *Transaction) bool) bool // Returns false if iteration was interrupted. - - // SubscribeTransactions subscribes to new transaction events. - SubscribeTransactions(ch chan<- core.NewTxsEvent) event.Subscription - - // Nonce returns the next nonce of an account, with all transactions executable - // by the pool already applied on top. - Nonce(addr common.Address) uint64 - - // Stats retrieves the current pool stats, namely the number of pending and the - // number of queued (non-executable) transactions. - Stats() (int, int) - - // Content retrieves the data content of the transaction pool, returning all the - // pending as well as queued transactions, grouped by account and sorted by nonce. - Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) - - // ContentFrom retrieves the data content of the transaction pool, returning the - // pending as well as queued transactions of this address, grouped by nonce. - ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) - - // Locals retrieves the accounts currently considered local by the pool. - Locals() []common.Address - - // Status returns the known status (unknown/pending/queued) of a transaction - // identified by their hashes. - Status(hash common.Hash) TxStatus -} diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 252aca0cf8..20f11ddc39 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -8,7 +8,7 @@ // // Much love to the original authors for their work. // ********** -// Copyright 2023 The go-ethereum Authors +// Copyright 2014 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -29,24 +29,136 @@ package txpool import ( "errors" "fmt" + "math" "math/big" + "sort" "sync" "sync/atomic" + "time" + "github.com/ava-labs/subnet-evm/commontype" + "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/core" + "github.com/ava-labs/subnet-evm/core/state" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/metrics" + "github.com/ava-labs/subnet-evm/params" + "github.com/ava-labs/subnet-evm/precompile/contracts/feemanager" + "github.com/ava-labs/subnet-evm/precompile/contracts/txallowlist" + "github.com/ava-labs/subnet-evm/utils" + "github.com/ava-labs/subnet-evm/vmerrs" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) +const ( + // chainHeadChanSize is the size of channel listening to ChainHeadEvent. + chainHeadChanSize = 10 + + // txSlotSize is used to calculate how many data slots a single transaction + // takes up based on its size. The slots are used as DoS protection, ensuring + // that validating a new transaction remains a constant operation (in reality + // O(maxslots), where max slots are 4 currently). + txSlotSize = 32 * 1024 + + // txMaxSize is the maximum size a single transaction can have. This field has + // non-trivial consequences: larger transactions are significantly harder and + // more expensive to propagate; larger transactions also take more resources + // to validate whether they fit into the pool or not. + // + // Note: the max contract size is 24KB + txMaxSize = 4 * txSlotSize // 128KB +) + var ( + // ErrAlreadyKnown is returned if the transactions is already contained + // within the pool. + ErrAlreadyKnown = errors.New("already known") + + // ErrInvalidSender is returned if the transaction contains an invalid signature. + ErrInvalidSender = errors.New("invalid sender") + + // ErrUnderpriced is returned if a transaction's gas price is below the minimum + // configured for the transaction pool. + ErrUnderpriced = errors.New("transaction underpriced") + + // ErrTxPoolOverflow is returned if the transaction pool is full and can't accept + // another remote transaction. + ErrTxPoolOverflow = errors.New("txpool is full") + + // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced + // with a different one without the required price bump. + ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") + + // ErrGasLimit is returned if a transaction's requested gas limit exceeds the + // maximum allowance of the current block. + ErrGasLimit = errors.New("exceeds block gas limit") + + // ErrNegativeValue is a sanity error to ensure no one is able to specify a + // transaction with a negative value. + ErrNegativeValue = errors.New("negative value") + + // ErrOversizedData is returned if the input data of a transaction is greater + // than some meaningful limit a user might use. This is not a consensus error + // making the transaction invalid, rather a DOS protection. + ErrOversizedData = errors.New("oversized data") + + // ErrFutureReplacePending is returned if a future transaction replaces a pending + // transaction. Future transactions should only be able to replace other future transactions. + ErrFutureReplacePending = errors.New("future transaction tries to replace pending") + // ErrOverdraft is returned if a transaction would cause the senders balance to go negative // thus invalidating a potential large number of transactions. ErrOverdraft = errors.New("transaction would cause overdraft") ) +var ( + evictionInterval = time.Minute // Time interval to check for evictable transactions + statsReportInterval = 8 * time.Second // Time interval to report transaction pool stats + baseFeeUpdateInterval = 10 * time.Second // Time interval at which to schedule a base fee update for the tx pool after SubnetEVM is enabled +) + +var ( + // Metrics for the pending pool + pendingDiscardMeter = metrics.NewRegisteredMeter("txpool/pending/discard", nil) + pendingReplaceMeter = metrics.NewRegisteredMeter("txpool/pending/replace", nil) + pendingRateLimitMeter = metrics.NewRegisteredMeter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting + pendingNofundsMeter = metrics.NewRegisteredMeter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds + + // Metrics for the queued pool + queuedDiscardMeter = metrics.NewRegisteredMeter("txpool/queued/discard", nil) + queuedReplaceMeter = metrics.NewRegisteredMeter("txpool/queued/replace", nil) + queuedRateLimitMeter = metrics.NewRegisteredMeter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting + queuedNofundsMeter = metrics.NewRegisteredMeter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds + queuedEvictionMeter = metrics.NewRegisteredMeter("txpool/queued/eviction", nil) // Dropped due to lifetime + + // General tx metrics + knownTxMeter = metrics.NewRegisteredMeter("txpool/known", nil) + validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) + invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) + underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) + overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) + + // throttleTxMeter counts how many transactions are rejected due to too-many-changes between + // txpool reorgs. + throttleTxMeter = metrics.NewRegisteredMeter("txpool/throttle", nil) + // reorgDurationTimer measures how long time a txpool reorg takes. + reorgDurationTimer = metrics.NewRegisteredTimer("txpool/reorgtime", nil) + // dropBetweenReorgHistogram counts how many drops we experience between two reorg runs. It is expected + // that this number is pretty low, since txpool reorgs happen very frequently. + dropBetweenReorgHistogram = metrics.NewRegisteredHistogram("txpool/dropbetweenreorg", nil, metrics.NewExpDecaySample(1028, 0.015)) + + pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) + queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) + localGauge = metrics.NewRegisteredGauge("txpool/local", nil) + slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil) + + reheapTimer = metrics.NewRegisteredTimer("txpool/reheap", nil) +) + // TxStatus is the current status of a transaction as seen by the pool. type TxStatus uint @@ -56,308 +168,432 @@ const ( TxStatusPending ) -var ( - // reservationsGaugeName is the prefix of a per-subpool address reservation - // metric. - // - // This is mostly a sanity metric to ensure there's no bug that would make - // some subpool hog all the reservations due to mis-accounting. - reservationsGaugeName = "txpool/reservations" -) - -// BlockChain defines the minimal set of methods needed to back a tx pool with -// a chain. Exists to allow mocking the live chain out of tests. -type BlockChain interface { - // CurrentBlock returns the current head of the chain. +// blockChain provides the state of blockchain and current gas limit to do +// some pre checks in tx pool and event subscribers. +type blockChain interface { CurrentBlock() *types.Header + GetBlock(hash common.Hash, number uint64) *types.Block + StateAt(root common.Hash) (*state.StateDB, error) + SenderCacher() *core.TxSenderCacher + GetFeeConfigAt(parent *types.Header) (commontype.FeeConfig, *big.Int, error) - // SubscribeChainHeadEvent subscribes to new blocks being added to the chain. SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription } -// TxPool is an aggregator for various transaction specific pools, collectively -// tracking all the transactions deemed interesting by the node. Transactions -// enter the pool when they are received from the network or submitted locally. -// They exit the pool when they are included in the blockchain or evicted due to -// resource constraints. -type TxPool struct { - subpools []SubPool // List of subpools for specialized transaction handling +// Config are the configuration parameters of the transaction pool. +type Config struct { + Locals []common.Address // Addresses that should be treated by default as local + NoLocals bool // Whether local transaction handling should be disabled + Journal string // Journal of local transactions to survive node restarts + Rejournal time.Duration // Time interval to regenerate the local transaction journal - reservations map[common.Address]SubPool // Map with the account to pool reservations - reserveLock sync.Mutex // Lock protecting the account reservations + PriceLimit uint64 // Minimum gas price to enforce for acceptance into the pool + PriceBump uint64 // Minimum price bump percentage to replace an already existing transaction (nonce) - subs event.SubscriptionScope // Subscription scope to unscubscribe all on shutdown - quit chan chan error // Quit channel to tear down the head updater + AccountSlots uint64 // Number of executable transaction slots guaranteed per account + GlobalSlots uint64 // Maximum number of executable transaction slots for all accounts + AccountQueue uint64 // Maximum number of non-executable transaction slots permitted per account + GlobalQueue uint64 // Maximum number of non-executable transaction slots for all accounts - gasTip atomic.Pointer[big.Int] // Remember last value set so it can be retrieved - reorgFeed event.Feed + Lifetime time.Duration // Maximum amount of time non-executable transaction are queued } -// New creates a new transaction pool to gather, sort and filter inbound -// transactions from the network. -func New(gasTip *big.Int, chain BlockChain, subpools []SubPool) (*TxPool, error) { - // Retrieve the current head so that all subpools and this main coordinator - // pool will have the same starting state, even if the chain moves forward - // during initialization. - head := chain.CurrentBlock() +// DefaultConfig contains the default configurations for the transaction +// pool. +var DefaultConfig = Config{ + // If we re-enable txpool journaling, we should also add the saved local + // transactions to the p2p gossip on startup. + Journal: "", + Rejournal: time.Hour, - pool := &TxPool{ - subpools: subpools, - reservations: make(map[common.Address]SubPool), - quit: make(chan chan error), - } - for i, subpool := range subpools { - if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil { - for j := i - 1; j >= 0; j-- { - subpools[j].Close() - } - return nil, err - } - } - go pool.loop(head, chain) - return pool, nil -} - -// reserver is a method to create an address reservation callback to exclusively -// assign/deassign addresses to/from subpools. This can ensure that at any point -// in time, only a single subpool is able to manage an account, avoiding cross -// subpool eviction issues and nonce conflicts. -func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver { - return func(addr common.Address, reserve bool) error { - p.reserveLock.Lock() - defer p.reserveLock.Unlock() - - owner, exists := p.reservations[addr] - if reserve { - // Double reservations are forbidden even from the same pool to - // avoid subtle bugs in the long term. - if exists { - if owner == subpool { - log.Error("pool attempted to reserve already-owned address", "address", addr) - return nil // Ignore fault to give the pool a chance to recover while the bug gets fixed - } - return errors.New("address already reserved") - } - p.reservations[addr] = subpool - if metrics.Enabled { - m := fmt.Sprintf("%s/%d", reservationsGaugeName, id) - metrics.GetOrRegisterGauge(m, nil).Inc(1) - } - return nil - } - // Ensure subpools only attempt to unreserve their own owned addresses, - // otherwise flag as a programming error. - if !exists { - log.Error("pool attempted to unreserve non-reserved address", "address", addr) - return errors.New("address not reserved") - } - if subpool != owner { - log.Error("pool attempted to unreserve non-owned address", "address", addr) - return errors.New("address not owned") - } - delete(p.reservations, addr) - if metrics.Enabled { - m := fmt.Sprintf("%s/%d", reservationsGaugeName, id) - metrics.GetOrRegisterGauge(m, nil).Dec(1) - } - return nil + PriceLimit: 1, + PriceBump: 10, + + AccountSlots: 16, + GlobalSlots: 4096 + 1024, // urgent + floating queue capacity with 4:1 ratio + AccountQueue: 64, + GlobalQueue: 1024, + + Lifetime: 10 * time.Minute, +} + +// sanitize checks the provided user configurations and changes anything that's +// unreasonable or unworkable. +func (config *Config) sanitize() Config { + conf := *config + if conf.Rejournal < time.Second { + log.Warn("Sanitizing invalid txpool journal time", "provided", conf.Rejournal, "updated", time.Second) + conf.Rejournal = time.Second + } + if conf.PriceLimit < 1 { + log.Warn("Sanitizing invalid txpool price limit", "provided", conf.PriceLimit, "updated", DefaultConfig.PriceLimit) + conf.PriceLimit = DefaultConfig.PriceLimit + } + if conf.PriceBump < 1 { + log.Warn("Sanitizing invalid txpool price bump", "provided", conf.PriceBump, "updated", DefaultConfig.PriceBump) + conf.PriceBump = DefaultConfig.PriceBump } + if conf.AccountSlots < 1 { + log.Warn("Sanitizing invalid txpool account slots", "provided", conf.AccountSlots, "updated", DefaultConfig.AccountSlots) + conf.AccountSlots = DefaultConfig.AccountSlots + } + if conf.GlobalSlots < 1 { + log.Warn("Sanitizing invalid txpool global slots", "provided", conf.GlobalSlots, "updated", DefaultConfig.GlobalSlots) + conf.GlobalSlots = DefaultConfig.GlobalSlots + } + if conf.AccountQueue < 1 { + log.Warn("Sanitizing invalid txpool account queue", "provided", conf.AccountQueue, "updated", DefaultConfig.AccountQueue) + conf.AccountQueue = DefaultConfig.AccountQueue + } + if conf.GlobalQueue < 1 { + log.Warn("Sanitizing invalid txpool global queue", "provided", conf.GlobalQueue, "updated", DefaultConfig.GlobalQueue) + conf.GlobalQueue = DefaultConfig.GlobalQueue + } + if conf.Lifetime < 1 { + log.Warn("Sanitizing invalid txpool lifetime", "provided", conf.Lifetime, "updated", DefaultConfig.Lifetime) + conf.Lifetime = DefaultConfig.Lifetime + } + return conf +} + +// TxPool contains all currently known transactions. Transactions +// enter the pool when they are received from the network or submitted +// locally. They exit the pool when they are included in the blockchain. +// +// The pool separates processable transactions (which can be applied to the +// current state) and future transactions. Transactions move between those +// two states over time as they are received and processed. +type TxPool struct { + config Config + chainconfig *params.ChainConfig + chain blockChain + gasPrice *big.Int + minimumFee *big.Int + txFeed event.Feed + headFeed event.Feed + reorgFeed event.Feed + scope event.SubscriptionScope + signer types.Signer + mu sync.RWMutex + + rules atomic.Pointer[params.Rules] // Rules for the currentHead + eip2718 atomic.Bool // Fork indicator whether we are using EIP-2718 type transactions. + eip1559 atomic.Bool // Fork indicator whether we are using EIP-1559 type transactions. + eip3860 atomic.Bool // Fork indicator whether EIP-3860 is activated. (activated in Shanghai Upgrade in Ethereum) + + currentHead *types.Header + // [currentState] is the state of the blockchain head. It is reset whenever + // head changes. + currentState *state.StateDB + // [currentStateLock] is required to allow concurrent access to address nonces + // and balances during reorgs and gossip handling. + currentStateLock sync.Mutex + + pendingNonces *noncer // Pending state tracking virtual nonces + currentMaxGas atomic.Uint64 // Current gas limit for transaction caps + + locals *accountSet // Set of local transaction to exempt from eviction rules + journal *journal // Journal of local transaction to back up to disk + + pending map[common.Address]*list // All currently processable transactions + queue map[common.Address]*list // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + all *lookup // All transactions to allow lookups + priced *pricedList // All transactions sorted by price + + chainHeadCh chan core.ChainHeadEvent + chainHeadSub event.Subscription + reqResetCh chan *txpoolResetRequest + reqPromoteCh chan *accountSet + queueTxEventCh chan *types.Transaction + reorgDoneCh chan chan struct{} + reorgShutdownCh chan struct{} // requests shutdown of scheduleReorgLoop + generalShutdownChan chan struct{} // closed when the transaction pool is stopped. Any goroutine can listen + // to this to be notified if it should shut down. + wg sync.WaitGroup // tracks loop, scheduleReorgLoop + initDoneCh chan struct{} // is closed once the pool is initialized (for tests) + + changesSinceReorg int // A counter for how many drops we've performed in-between reorg. } -// Close terminates the transaction pool and all its subpools. -func (p *TxPool) Close() error { - p.subs.Close() +type txpoolResetRequest struct { + oldHead, newHead *types.Header +} - var errs []error +// NewTxPool creates a new transaction pool to gather, sort and filter inbound +// transactions from the network. +func NewTxPool(config Config, chainconfig *params.ChainConfig, chain blockChain) *TxPool { + // Sanitize the input to ensure no vulnerable gas prices are set + config = (&config).sanitize() - // Terminate the reset loop and wait for it to finish - errc := make(chan error) - p.quit <- errc - if err := <-errc; err != nil { - errs = append(errs, err) + // Create the transaction pool with its initial settings + pool := &TxPool{ + config: config, + chainconfig: chainconfig, + chain: chain, + signer: types.LatestSigner(chainconfig), + pending: make(map[common.Address]*list), + queue: make(map[common.Address]*list), + beats: make(map[common.Address]time.Time), + all: newLookup(), + chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), + reqResetCh: make(chan *txpoolResetRequest), + reqPromoteCh: make(chan *accountSet), + queueTxEventCh: make(chan *types.Transaction), + reorgDoneCh: make(chan chan struct{}), + reorgShutdownCh: make(chan struct{}), + initDoneCh: make(chan struct{}), + generalShutdownChan: make(chan struct{}), + gasPrice: new(big.Int).SetUint64(config.PriceLimit), + } + pool.locals = newAccountSet(pool.signer) + for _, addr := range config.Locals { + log.Info("Setting new local account", "address", addr) + pool.locals.add(addr) } + pool.priced = newPricedList(pool.all) + pool.reset(nil, chain.CurrentBlock()) - // Terminate each subpool - for _, subpool := range p.subpools { - if err := subpool.Close(); err != nil { - errs = append(errs, err) + // Start the reorg loop early so it can handle requests generated during journal loading. + pool.wg.Add(1) + go pool.scheduleReorgLoop() + + // If local transactions and journaling is enabled, load from disk + if !config.NoLocals && config.Journal != "" { + pool.journal = newTxJournal(config.Journal) + + if err := pool.journal.load(pool.AddLocals); err != nil { + log.Warn("Failed to load transaction journal", "err", err) + } + if err := pool.journal.rotate(pool.local()); err != nil { + log.Warn("Failed to rotate transaction journal", "err", err) } } - if len(errs) > 0 { - return fmt.Errorf("subpool close errors: %v", errs) - } - return nil + + // Subscribe events from blockchain and start the main event loop. + pool.chainHeadSub = pool.chain.SubscribeChainHeadEvent(pool.chainHeadCh) + pool.wg.Add(1) + go pool.loop() + + pool.startPeriodicFeeUpdate() + + return pool } // loop is the transaction pool's main event loop, waiting for and reacting to // outside blockchain events as well as for various reporting and transaction // eviction events. -func (p *TxPool) loop(head *types.Header, chain BlockChain) { - // Subscribe to chain head events to trigger subpool resets - var ( - newHeadCh = make(chan core.ChainHeadEvent) - newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh) - ) - defer newHeadSub.Unsubscribe() +func (pool *TxPool) loop() { + defer pool.wg.Done() - // Track the previous and current head to feed to an idle reset - var ( - oldHead = head - newHead = oldHead - ) - // Consume chain head events and start resets when none is running var ( - resetBusy = make(chan struct{}, 1) // Allow 1 reset to run concurrently - resetDone = make(chan *types.Header) + prevPending, prevQueued, prevStales int + // Start the stats reporting and transaction eviction tickers + report = time.NewTicker(statsReportInterval) + evict = time.NewTicker(evictionInterval) + journal = time.NewTicker(pool.config.Rejournal) + // Track the previous head headers for transaction reorgs + head = pool.chain.CurrentBlock() ) - var errc chan error - for errc == nil { - // Something interesting might have happened, run a reset if there is - // one needed but none is running. The resetter will run on its own - // goroutine to allow chain head events to be consumed contiguously. - if newHead != oldHead { - // Try to inject a busy marker and start a reset if successful - select { - case resetBusy <- struct{}{}: - // Busy marker injected, start a new subpool reset - go func(oldHead, newHead *types.Header) { - for _, subpool := range p.subpools { - subpool.Reset(oldHead, newHead) + defer report.Stop() + defer evict.Stop() + defer journal.Stop() + + // Notify tests that the init phase is done + close(pool.initDoneCh) + for { + select { + // Handle ChainHeadEvent + case ev := <-pool.chainHeadCh: + if ev.Block != nil { + pool.requestReset(head, ev.Block.Header()) + head = ev.Block.Header() + pool.headFeed.Send(core.NewTxPoolHeadEvent{Head: head}) + } + + // System shutdown. + case <-pool.chainHeadSub.Err(): + close(pool.reorgShutdownCh) + return + + // Handle stats reporting ticks + case <-report.C: + pool.mu.RLock() + pending, queued := pool.stats() + pool.mu.RUnlock() + stales := int(pool.priced.stales.Load()) + + if pending != prevPending || queued != prevQueued || stales != prevStales { + log.Debug("Transaction pool status report", "executable", pending, "queued", queued, "stales", stales) + prevPending, prevQueued, prevStales = pending, queued, stales + } + + // Handle inactive account transaction eviction + case <-evict.C: + pool.mu.Lock() + for addr := range pool.queue { + // Skip local transactions from the eviction mechanism + if pool.locals.contains(addr) { + continue + } + // Any non-locals old enough should be removed + if time.Since(pool.beats[addr]) > pool.config.Lifetime { + list := pool.queue[addr].Flatten() + for _, tx := range list { + pool.removeTx(tx.Hash(), true) } - resetDone <- newHead - p.reorgFeed.Send(core.NewTxPoolReorgEvent{Head: newHead}) - }(oldHead, newHead) + queuedEvictionMeter.Mark(int64(len(list))) + } + } + pool.mu.Unlock() - default: - // Reset already running, wait until it finishes + // Handle local transaction journal rotation + case <-journal.C: + if pool.journal != nil { + pool.mu.Lock() + if err := pool.journal.rotate(pool.local()); err != nil { + log.Warn("Failed to rotate local tx journal", "err", err) + } + pool.mu.Unlock() } } - // Wait for the next chain head event or a previous reset finish - select { - case event := <-newHeadCh: - // Chain moved forward, store the head for later consumption - newHead = event.Block.Header() + } +} - case head := <-resetDone: - // Previous reset finished, update the old head and allow a new reset - oldHead = head - <-resetBusy +// Stop terminates the transaction pool. +func (pool *TxPool) Stop() { + // Unsubscribe all subscriptions registered from txpool + pool.scope.Close() - case errc = <-p.quit: - // Termination requested, break out on the next loop round - } + close(pool.generalShutdownChan) + // Unsubscribe subscriptions registered from blockchain + pool.chainHeadSub.Unsubscribe() + pool.wg.Wait() + + if pool.journal != nil { + pool.journal.close() } - // Notify the closer of termination (no error possible for now) - errc <- nil + log.Info("Transaction pool stopped") } -// GasTip returns the current gas tip enforced by the transaction pool. -func (p *TxPool) GasTip() *big.Int { - return new(big.Int).Set(p.gasTip.Load()) +// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and +// starts sending event to the given channel. +func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { + return pool.scope.Track(pool.txFeed.Subscribe(ch)) } -// SetGasTip updates the minimum gas tip required by the transaction pool for a -// new transaction, and drops all transactions below this threshold. -func (p *TxPool) SetGasTip(tip *big.Int) { - p.gasTip.Store(new(big.Int).Set(tip)) +// SubscribeNewHeadEvent registers a subscription of NewHeadEvent and +// starts sending event to the given channel. +func (pool *TxPool) SubscribeNewHeadEvent(ch chan<- core.NewTxPoolHeadEvent) event.Subscription { + return pool.scope.Track(pool.headFeed.Subscribe(ch)) +} - for _, subpool := range p.subpools { - subpool.SetGasTip(tip) - } +// SubscribeNewReorgEvent registers a subscription of NewReorgEvent and +// starts sending event to the given channel. +func (pool *TxPool) SubscribeNewReorgEvent(ch chan<- core.NewTxPoolReorgEvent) event.Subscription { + return pool.scope.Track(pool.reorgFeed.Subscribe(ch)) } -// SetMinFee updates the minimum fee required by the transaction pool for a -// new transaction, and drops all transactions below this threshold. -func (p *TxPool) SetMinFee(fee *big.Int) { - for _, subpool := range p.subpools { - subpool.SetMinFee(fee) - } +// GasPrice returns the current gas price enforced by the transaction pool. +func (pool *TxPool) GasPrice() *big.Int { + pool.mu.RLock() + defer pool.mu.RUnlock() + + return new(big.Int).Set(pool.gasPrice) } -// Has returns an indicator whether the pool has a transaction cached with the -// given hash. -func (p *TxPool) Has(hash common.Hash) bool { - for _, subpool := range p.subpools { - if subpool.Has(hash) { - return true +// SetGasPrice updates the minimum price required by the transaction pool for a +// new transaction, and drops all transactions below this threshold. +func (pool *TxPool) SetGasPrice(price *big.Int) { + pool.mu.Lock() + defer pool.mu.Unlock() + + old := pool.gasPrice + pool.gasPrice = price + // if the min miner fee increased, remove transactions below the new threshold + if price.Cmp(old) > 0 { + // pool.priced is sorted by GasFeeCap, so we have to iterate through pool.all instead + drop := pool.all.RemotesBelowTip(price) + for _, tx := range drop { + pool.removeTx(tx.Hash(), false) } + pool.priced.Removed(len(drop)) } - return false + + log.Info("Transaction pool price threshold updated", "price", price) } -// HasLocal returns an indicator whether the pool has a local transaction cached -// with the given hash. -func (p *TxPool) HasLocal(hash common.Hash) bool { - for _, subpool := range p.subpools { - if subpool.HasLocal(hash) { - return true - } - } - return false +func (pool *TxPool) SetMinFee(minFee *big.Int) { + pool.mu.Lock() + defer pool.mu.Unlock() + + pool.minimumFee = minFee } -// Get returns a transaction if it is contained in the pool, or nil otherwise. -func (p *TxPool) Get(hash common.Hash) *Transaction { - for _, subpool := range p.subpools { - if tx := subpool.Get(hash); tx != nil { - return tx - } - } - return nil +// Nonce returns the next nonce of an account, with all transactions executable +// by the pool already applied on top. +func (pool *TxPool) Nonce(addr common.Address) uint64 { + pool.mu.RLock() + defer pool.mu.RUnlock() + + return pool.pendingNonces.get(addr) } -// Add enqueues a batch of transactions into the pool if they are valid. Due -// to the large transaction churn, add may postpone fully integrating the tx -// to a later point to batch multiple ones together. -func (p *TxPool) Add(txs []*Transaction, local bool, sync bool) []error { - // Split the input transactions between the subpools. It shouldn't really - // happen that we receive merged batches, but better graceful than strange - // errors. - // - // We also need to track how the transactions were split across the subpools, - // so we can piece back the returned errors into the original order. - txsets := make([][]*Transaction, len(p.subpools)) - splits := make([]int, len(txs)) +// Stats retrieves the current pool stats, namely the number of pending and the +// number of queued (non-executable) transactions. +func (pool *TxPool) Stats() (int, int) { + pool.mu.RLock() + defer pool.mu.RUnlock() - for i, tx := range txs { - // Mark this transaction belonging to no-subpool - splits[i] = -1 - - // Try to find a subpool that accepts the transaction - for j, subpool := range p.subpools { - if subpool.Filter(tx.Tx) { - txsets[j] = append(txsets[j], tx) - splits[i] = j - break - } - } + return pool.stats() +} + +// stats retrieves the current pool stats, namely the number of pending and the +// number of queued (non-executable) transactions. +func (pool *TxPool) stats() (int, int) { + pending := 0 + for _, list := range pool.pending { + pending += list.Len() } - // Add the transactions split apart to the individual subpools and piece - // back the errors into the original sort order. - errsets := make([][]error, len(p.subpools)) - for i := 0; i < len(p.subpools); i++ { - errsets[i] = p.subpools[i].Add(txsets[i], local, sync) + queued := 0 + for _, list := range pool.queue { + queued += list.Len() } - errs := make([]error, len(txs)) - for i, split := range splits { - // If the transaction was rejected by all subpools, mark it unsupported - if split == -1 { - errs[i] = core.ErrTxTypeNotSupported - continue - } - // Find which subpool handled it and pull in the corresponding error - errs[i] = errsets[split][0] - errsets[split] = errsets[split][1:] + return pending, queued +} + +// Content retrieves the data content of the transaction pool, returning all the +// pending as well as queued transactions, grouped by account and sorted by nonce. +func (pool *TxPool) Content() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { + pool.mu.Lock() + defer pool.mu.Unlock() + + pending := make(map[common.Address]types.Transactions, len(pool.pending)) + for addr, list := range pool.pending { + pending[addr] = list.Flatten() } - return errs + queued := make(map[common.Address]types.Transactions, len(pool.queue)) + for addr, list := range pool.queue { + queued[addr] = list.Flatten() + } + return pending, queued } -func (p *TxPool) AddRemotesSync(txs []*types.Transaction) []error { - wrapped := make([]*Transaction, len(txs)) - for i, tx := range txs { - wrapped[i] = &Transaction{Tx: tx} +// ContentFrom retrieves the data content of the transaction pool, returning the +// pending as well as queued transactions of this address, grouped by nonce. +func (pool *TxPool) ContentFrom(addr common.Address) (types.Transactions, types.Transactions) { + pool.mu.RLock() + defer pool.mu.RUnlock() + + var pending types.Transactions + if list, ok := pool.pending[addr]; ok { + pending = list.Flatten() } - return p.Add(wrapped, false, true) + var queued types.Transactions + if list, ok := pool.queue[addr]; ok { + queued = list.Flatten() + } + return pending, queued } // Pending retrieves all currently processable transactions, grouped by origin @@ -367,20 +603,37 @@ func (p *TxPool) AddRemotesSync(txs []*types.Transaction) []error { // The enforceTips parameter can be used to do an extra filtering on the pending // transactions and only return those whose **effective** tip is large enough in // the next pending execution environment. -// account and sorted by nonce. -func (p *TxPool) Pending(enforceTips bool) map[common.Address][]*LazyTransaction { - return p.PendingWithBaseFee(enforceTips, nil) +func (pool *TxPool) Pending(enforceTips bool) map[common.Address]types.Transactions { + return pool.PendingWithBaseFee(enforceTips, nil) } // If baseFee is nil, then pool.priced.urgent.baseFee is used. -func (p *TxPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address][]*LazyTransaction { - txs := make(map[common.Address][]*LazyTransaction) - for _, subpool := range p.subpools { - for addr, set := range subpool.PendingWithBaseFee(enforceTips, baseFee) { - txs[addr] = set +func (pool *TxPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[common.Address]types.Transactions { + pool.mu.Lock() + defer pool.mu.Unlock() + + if baseFee == nil { + baseFee = pool.priced.urgent.baseFee + } + + pending := make(map[common.Address]types.Transactions, len(pool.pending)) + for addr, list := range pool.pending { + txs := list.Flatten() + + // If the miner requests tip enforcement, cap the lists now + if enforceTips && !pool.locals.contains(addr) { + for i, tx := range txs { + if tx.EffectiveGasTipIntCmp(pool.gasPrice, baseFee) < 0 { + txs = txs[:i] + break + } + } + } + if len(txs) > 0 { + pending[addr] = txs } } - return txs + return pending } // PendingSize returns the number of pending txs in the tx pool. @@ -388,142 +641,1542 @@ func (p *TxPool) PendingWithBaseFee(enforceTips bool, baseFee *big.Int) map[comm // The enforceTips parameter can be used to do an extra filtering on the pending // transactions and only return those whose **effective** tip is large enough in // the next pending execution environment. -func (p *TxPool) PendingSize(enforceTips bool) int { +func (pool *TxPool) PendingSize(enforceTips bool) int { + pending := pool.Pending(enforceTips) count := 0 - for _, subpool := range p.subpools { - for _, txs := range subpool.Pending(enforceTips) { - count += len(txs) - } + for _, txs := range pending { + count += len(txs) } return count } // PendingFrom returns the same set of transactions that would be returned from Pending restricted to only // transactions from [addrs]. -func (p *TxPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address][]*LazyTransaction { - txs := make(map[common.Address][]*LazyTransaction) - for _, subpool := range p.subpools { - for addr, set := range subpool.PendingFrom(addrs, enforceTips) { - txs[addr] = set +func (pool *TxPool) PendingFrom(addrs []common.Address, enforceTips bool) map[common.Address]types.Transactions { + pool.mu.Lock() + defer pool.mu.Unlock() + + pending := make(map[common.Address]types.Transactions) + for _, addr := range addrs { + list, ok := pool.pending[addr] + if !ok { + continue + } + txs := list.Flatten() + + // If the miner requests tip enforcement, cap the lists now + if enforceTips && !pool.locals.contains(addr) { + for i, tx := range txs { + if tx.EffectiveGasTipIntCmp(pool.gasPrice, pool.priced.urgent.baseFee) < 0 { + txs = txs[:i] + break + } + } + } + if len(txs) > 0 { + pending[addr] = txs } } - return txs + return pending } // IteratePending iterates over [pool.pending] until [f] returns false. // The caller must not modify [tx]. -func (p *TxPool) IteratePending(f func(tx *Transaction) bool) { - for _, subpool := range p.subpools { - if !subpool.IteratePending(f) { - return +func (pool *TxPool) IteratePending(f func(tx *types.Transaction) bool) { + pool.mu.RLock() + defer pool.mu.RUnlock() + + for _, list := range pool.pending { + for _, tx := range list.txs.items { + if !f(tx) { + return + } } } } -// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and starts sending -// events to the given channel. -func (p *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - subs := make([]event.Subscription, 0, len(p.subpools)) - for _, subpool := range p.subpools { - sub := subpool.SubscribeTransactions(ch) - if sub == nil { - continue +// Locals retrieves the accounts currently considered local by the pool. +func (pool *TxPool) Locals() []common.Address { + pool.mu.Lock() + defer pool.mu.Unlock() + + return pool.locals.flatten() +} + +// local retrieves all currently known local transactions, grouped by origin +// account and sorted by nonce. The returned transaction set is a copy and can be +// freely modified by calling code. +func (pool *TxPool) local() map[common.Address]types.Transactions { + txs := make(map[common.Address]types.Transactions) + for addr := range pool.locals.accounts { + if pending := pool.pending[addr]; pending != nil { + txs[addr] = append(txs[addr], pending.Flatten()...) + } + if queued := pool.queue[addr]; queued != nil { + txs[addr] = append(txs[addr], queued.Flatten()...) } - subs = append(subs, sub) } - return p.subs.Track(event.JoinSubscriptions(subs...)) + return txs } -// SubscribeNewReorgEvent registers a subscription of NewReorgEvent and -// starts sending event to the given channel. -func (p *TxPool) SubscribeNewReorgEvent(ch chan<- core.NewTxPoolReorgEvent) event.Subscription { - return p.subs.Track(p.reorgFeed.Subscribe(ch)) -} +// checks transaction validity against the current state. +func (pool *TxPool) checkTxState(from common.Address, tx *types.Transaction) error { + pool.currentStateLock.Lock() + defer pool.currentStateLock.Unlock() -// Nonce returns the next nonce of an account, with all transactions executable -// by the pool already applied on top. -func (p *TxPool) Nonce(addr common.Address) uint64 { - // Since (for now) accounts are unique to subpools, only one pool will have - // (at max) a non-state nonce. To avoid stateful lookups, just return the - // highest nonce for now. - var nonce uint64 - for _, subpool := range p.subpools { - if next := subpool.Nonce(addr); nonce < next { - nonce = next + txNonce := tx.Nonce() + // Ensure the transaction adheres to nonce ordering + if currentNonce := pool.currentState.GetNonce(from); currentNonce > txNonce { + return fmt.Errorf("%w: address %s current nonce (%d) > tx nonce (%d)", + core.ErrNonceTooLow, from.Hex(), currentNonce, txNonce) + } + + // cost == V + GP * GL + balance := pool.currentState.GetBalance(from) + if balance.Cmp(tx.Cost()) < 0 { + return fmt.Errorf("%w: address %s have (%d) want (%d)", core.ErrInsufficientFunds, from.Hex(), balance, tx.Cost()) + } + + // Verify that replacing transactions will not result in overdraft + list := pool.pending[from] + if list != nil { // Sender already has pending txs + sum := new(big.Int).Add(tx.Cost(), list.totalcost) + if repl := list.txs.Get(tx.Nonce()); repl != nil { + // Deduct the cost of a transaction replaced by this + sum.Sub(sum, repl.Cost()) + } + if balance.Cmp(sum) < 0 { + log.Trace("Replacing transactions would overdraft", "sender", from, "balance", pool.currentState.GetBalance(from), "required", sum) + return ErrOverdraft + } + } + + // If the tx allow list is enabled, return an error if the from address is not allow listed. + if pool.rules.Load().IsPrecompileEnabled(txallowlist.ContractAddress) { + txAllowListRole := txallowlist.GetTxAllowListStatus(pool.currentState, from) + if !txAllowListRole.IsEnabled() { + return fmt.Errorf("%w: %s", vmerrs.ErrSenderAddressNotAllowListed, from) } } - return nonce + + return nil } -// Stats retrieves the current pool stats, namely the number of pending and the -// number of queued (non-executable) transactions. -func (p *TxPool) Stats() (int, int) { - var runnable, blocked int - for _, subpool := range p.subpools { - run, block := subpool.Stats() +// validateTxBasics checks whether a transaction is valid according to the consensus +// rules, but does not check state-dependent validation such as sufficient balance. +// This check is meant as an early check which only needs to be performed once, +// and does not require the pool mutex to be held. +func (pool *TxPool) validateTxBasics(tx *types.Transaction, local bool) error { + // Accept only legacy transactions until EIP-2718/2930 activates. + if !pool.eip2718.Load() && tx.Type() != types.LegacyTxType { + return core.ErrTxTypeNotSupported + } + // Reject dynamic fee transactions until EIP-1559 activates. + if !pool.eip1559.Load() && tx.Type() == types.DynamicFeeTxType { + return core.ErrTxTypeNotSupported + } + // Reject blob transactions forever, those will have their own pool. + if tx.Type() == types.BlobTxType { + return core.ErrTxTypeNotSupported + } + // Reject transactions over defined size to prevent DOS attacks + if tx.Size() > txMaxSize { + return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, tx.Size(), txMaxSize) + } + // Check whether the init code size has been exceeded. + if pool.eip3860.Load() && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { + return fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) + } + // Transactions can't be negative. This may never happen using RLP decoded + // transactions but may occur if you create a transaction using the RPC. + if tx.Value().Sign() < 0 { + return ErrNegativeValue + } + // Ensure the transaction doesn't exceed the current block limit gas. + if txGas := tx.Gas(); pool.currentMaxGas.Load() < txGas { + return fmt.Errorf( + "%w: tx gas (%d) > current max gas (%d)", + ErrGasLimit, + txGas, + pool.currentMaxGas.Load(), + ) + } + // Sanity check for extremely large numbers + if tx.GasFeeCap().BitLen() > 256 { + return core.ErrFeeCapVeryHigh + } + if tx.GasTipCap().BitLen() > 256 { + return core.ErrTipVeryHigh + } + // Ensure gasFeeCap is greater than or equal to gasTipCap. + if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { + return core.ErrTipAboveFeeCap + } + // Make sure the transaction is signed properly. + from, err := types.Sender(pool.signer, tx) + if err != nil { + return ErrInvalidSender + } + // Drop non-local transactions under our own minimal accepted gas price or tip + if !local && tx.GasTipCapIntCmp(pool.gasPrice) < 0 { + return fmt.Errorf("%w: address %s have gas tip cap (%d) < pool gas tip cap (%d)", ErrUnderpriced, from.Hex(), tx.GasTipCap(), pool.gasPrice) + } + // Ensure the transaction has more gas than the basic tx fee. + intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, *pool.rules.Load()) + if err != nil { + return err + } + if txGas := tx.Gas(); txGas < intrGas { + return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", core.ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas) + } + return nil +} + +// validateTx checks whether a transaction is valid according to the consensus +// rules and adheres to some heuristic limits of the local node (price and size). +func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { + // Signature has been checked already, this cannot error. + from, _ := types.Sender(pool.signer, tx) + // Drop the transaction if the gas fee cap is below the pool's minimum fee + if pool.minimumFee != nil && tx.GasFeeCapIntCmp(pool.minimumFee) < 0 { + return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), pool.minimumFee) + } - runnable += run - blocked += block + // Ensure the transaction adheres to nonce ordering + // Transactor should have enough funds to cover the costs + if err := pool.checkTxState(from, tx); err != nil { + return err } - return runnable, blocked + return nil } -// Content retrieves the data content of the transaction pool, returning all the -// pending as well as queued transactions, grouped by account and sorted by nonce. -func (p *TxPool) Content() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { - var ( - runnable = make(map[common.Address][]*types.Transaction) - blocked = make(map[common.Address][]*types.Transaction) - ) - for _, subpool := range p.subpools { - run, block := subpool.Content() +// add validates a transaction and inserts it into the non-executable queue for later +// pending promotion and execution. If the transaction is a replacement for an already +// pending or queued one, it overwrites the previous transaction if its price is higher. +// +// If a newly added transaction is marked as local, its sending account will be +// be added to the allowlist, preventing any associated transaction from being dropped +// out of the pool due to pricing constraints. +func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err error) { + // If the transaction is already known, discard it + hash := tx.Hash() + if pool.all.Get(hash) != nil { + log.Trace("Discarding already known transaction", "hash", hash) + knownTxMeter.Mark(1) + return false, ErrAlreadyKnown + } + // Make the local flag. If it's from local source or it's from the network but + // the sender is marked as local previously, treat it as the local transaction. + isLocal := local || pool.locals.containsTx(tx) + + // If the transaction fails basic validation, discard it + if err := pool.validateTx(tx, isLocal); err != nil { + log.Trace("Discarding invalid transaction", "hash", hash, "err", err) + invalidTxMeter.Mark(1) + return false, err + } + + // already validated by this point + from, _ := types.Sender(pool.signer, tx) + + // If the transaction pool is full, discard underpriced transactions + if uint64(pool.all.Slots()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { + // If the new transaction is underpriced, don't accept it + if !isLocal && pool.priced.Underpriced(tx) { + log.Trace("Discarding underpriced transaction", "hash", hash, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + underpricedTxMeter.Mark(1) + return false, ErrUnderpriced + } + + // We're about to replace a transaction. The reorg does a more thorough + // analysis of what to remove and how, but it runs async. We don't want to + // do too many replacements between reorg-runs, so we cap the number of + // replacements to 25% of the slots + if pool.changesSinceReorg > int(pool.config.GlobalSlots/4) { + throttleTxMeter.Mark(1) + return false, ErrTxPoolOverflow + } + + // New transaction is better than our worse ones, make room for it. + // If it's a local transaction, forcibly discard all available transactions. + // Otherwise if we can't make enough room for new one, abort the operation. + drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) + + // Special case, we still can't make the room for the new remote one. + if !isLocal && !success { + log.Trace("Discarding overflown transaction", "hash", hash) + overflowedTxMeter.Mark(1) + return false, ErrTxPoolOverflow + } - for addr, txs := range run { - runnable[addr] = txs + // If the new transaction is a future transaction it should never churn pending transactions + if !isLocal && pool.isGapped(from, tx) { + var replacesPending bool + for _, dropTx := range drop { + dropSender, _ := types.Sender(pool.signer, dropTx) + if list := pool.pending[dropSender]; list != nil && list.Contains(dropTx.Nonce()) { + replacesPending = true + break + } + } + // Add all transactions back to the priced queue + if replacesPending { + for _, dropTx := range drop { + pool.priced.Put(dropTx, false) + } + log.Trace("Discarding future transaction replacing pending tx", "hash", hash) + return false, ErrFutureReplacePending + } } - for addr, txs := range block { - blocked[addr] = txs + + // Kick out the underpriced remote transactions. + for _, tx := range drop { + log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap()) + underpricedTxMeter.Mark(1) + dropped := pool.removeTx(tx.Hash(), false) + pool.changesSinceReorg += dropped } } - return runnable, blocked -} -// ContentFrom retrieves the data content of the transaction pool, returning the -// pending as well as queued transactions of this address, grouped by nonce. -func (p *TxPool) ContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { - for _, subpool := range p.subpools { - run, block := subpool.ContentFrom(addr) - if len(run) != 0 || len(block) != 0 { - return run, block + // Try to replace an existing transaction in the pending pool + if list := pool.pending[from]; list != nil && list.Contains(tx.Nonce()) { + // Nonce already pending, check if required price bump is met + inserted, old := list.Add(tx, pool.config.PriceBump) + if !inserted { + pendingDiscardMeter.Mark(1) + return false, ErrReplaceUnderpriced } + // New transaction is better, replace old one + if old != nil { + pool.all.Remove(old.Hash()) + pool.priced.Removed(1) + pendingReplaceMeter.Mark(1) + } + pool.all.Add(tx, isLocal) + pool.priced.Put(tx, isLocal) + pool.journalTx(from, tx) + pool.queueTxEvent(tx) + log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) + + // Successful promotion, bump the heartbeat + pool.beats[from] = time.Now() + return old != nil, nil + } + // New transaction isn't replacing a pending one, push into queue + replaced, err = pool.enqueueTx(hash, tx, isLocal, true) + if err != nil { + return false, err + } + // Mark local addresses and journal local transactions + if local && !pool.locals.contains(from) { + log.Info("Setting new local account", "address", from) + pool.locals.add(from) + pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. } - return []*types.Transaction{}, []*types.Transaction{} + if isLocal { + localGauge.Inc(1) + } + pool.journalTx(from, tx) + + log.Trace("Pooled new future transaction", "hash", hash, "from", from, "to", tx.To()) + return replaced, nil } -// Locals retrieves the accounts currently considered local by the pool. -func (p *TxPool) Locals() []common.Address { - // Retrieve the locals from each subpool and deduplicate them - locals := make(map[common.Address]struct{}) - for _, subpool := range p.subpools { - for _, local := range subpool.Locals() { - locals[local] = struct{}{} +// isGapped reports whether the given transaction is immediately executable. +func (pool *TxPool) isGapped(from common.Address, tx *types.Transaction) bool { + // Short circuit if transaction matches pending nonce and can be promoted + // to pending list as an executable transaction. + next := pool.pendingNonces.get(from) + if tx.Nonce() == next { + return false + } + // The transaction has a nonce gap with pending list, it's only considered + // as executable if transactions in queue can fill up the nonce gap. + queue, ok := pool.queue[from] + if !ok { + return true + } + for nonce := next; nonce < tx.Nonce(); nonce++ { + if !queue.Contains(nonce) { + return true // txs in queue can't fill up the nonce gap } } - // Flatten and return the deduplicated local set - flat := make([]common.Address, 0, len(locals)) - for local := range locals { - flat = append(flat, local) + return false +} + +// enqueueTx inserts a new transaction into the non-executable transaction queue. +// +// Note, this method assumes the pool lock is held! +func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { + // Try to insert the transaction into the future queue + from, _ := types.Sender(pool.signer, tx) // already validated + if pool.queue[from] == nil { + pool.queue[from] = newList(false) + } + inserted, old := pool.queue[from].Add(tx, pool.config.PriceBump) + if !inserted { + // An older transaction was better, discard this + queuedDiscardMeter.Mark(1) + return false, ErrReplaceUnderpriced + } + // Discard any previous transaction and mark this + if old != nil { + pool.all.Remove(old.Hash()) + pool.priced.Removed(1) + queuedReplaceMeter.Mark(1) + } else { + // Nothing was replaced, bump the queued counter + queuedGauge.Inc(1) + } + // If the transaction isn't in lookup set but it's expected to be there, + // show the error log. + if pool.all.Get(hash) == nil && !addAll { + log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) } - return flat + if addAll { + pool.all.Add(tx, local) + pool.priced.Put(tx, local) + } + // If we never record the heartbeat, do it right now. + if _, exist := pool.beats[from]; !exist { + pool.beats[from] = time.Now() + } + return old != nil, nil } -// Status returns the known status (unknown/pending/queued) of a transaction -// identified by their hashes. -func (p *TxPool) Status(hash common.Hash) TxStatus { - for _, subpool := range p.subpools { - if status := subpool.Status(hash); status != TxStatusUnknown { - return status - } +// journalTx adds the specified transaction to the local disk journal if it is +// deemed to have been sent from a local account. +func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { + // Only journal if it's enabled and the transaction is local + if pool.journal == nil || !pool.locals.contains(from) { + return } - return TxStatusUnknown + if err := pool.journal.insert(tx); err != nil { + log.Warn("Failed to journal local transaction", "err", err) + } +} + +// promoteTx adds a transaction to the pending (processable) list of transactions +// and returns whether it was inserted or an older was better. +// +// Note, this method assumes the pool lock is held! +func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { + // Try to insert the transaction into the pending queue + if pool.pending[addr] == nil { + pool.pending[addr] = newList(true) + } + list := pool.pending[addr] + + inserted, old := list.Add(tx, pool.config.PriceBump) + if !inserted { + // An older transaction was better, discard this + pool.all.Remove(hash) + pool.priced.Removed(1) + pendingDiscardMeter.Mark(1) + return false + } + // Otherwise discard any previous transaction and mark this + if old != nil { + pool.all.Remove(old.Hash()) + pool.priced.Removed(1) + pendingReplaceMeter.Mark(1) + } else { + // Nothing was replaced, bump the pending counter + pendingGauge.Inc(1) + } + // Set the potentially new pending nonce and notify any subsystems of the new tx + pool.pendingNonces.set(addr, tx.Nonce()+1) + + // Successful promotion, bump the heartbeat + pool.beats[addr] = time.Now() + return true +} + +// AddLocals enqueues a batch of transactions into the pool if they are valid, marking the +// senders as a local ones, ensuring they go around the local pricing constraints. +// +// This method is used to add transactions from the RPC API and performs synchronous pool +// reorganization and event propagation. +func (pool *TxPool) AddLocals(txs []*types.Transaction) []error { + return pool.addTxs(txs, !pool.config.NoLocals, true) +} + +// AddLocal enqueues a single local transaction into the pool if it is valid. This is +// a convenience wrapper around AddLocals. +func (pool *TxPool) AddLocal(tx *types.Transaction) error { + errs := pool.AddLocals([]*types.Transaction{tx}) + return errs[0] +} + +// AddRemotes enqueues a batch of transactions into the pool if they are valid. If the +// senders are not among the locally tracked ones, full pricing constraints will apply. +// +// This method is used to add transactions from the p2p network and does not wait for pool +// reorganization and internal event propagation. +func (pool *TxPool) AddRemotes(txs []*types.Transaction) []error { + return pool.addTxs(txs, false, false) +} + +// AddRemotesSync is like AddRemotes, but waits for pool reorganization. Tests use this method. +func (pool *TxPool) AddRemotesSync(txs []*types.Transaction) []error { + return pool.addTxs(txs, false, true) +} + +// This is like AddRemotes with a single transaction, but waits for pool reorganization. Tests use this method. +func (pool *TxPool) addRemoteSync(tx *types.Transaction) error { + errs := pool.AddRemotesSync([]*types.Transaction{tx}) + return errs[0] +} + +// AddRemote enqueues a single transaction into the pool if it is valid. This is a convenience +// wrapper around AddRemotes. +// +// Deprecated: use AddRemotes +func (pool *TxPool) AddRemote(tx *types.Transaction) error { + errs := pool.AddRemotes([]*types.Transaction{tx}) + return errs[0] +} + +// addTxs attempts to queue a batch of transactions if they are valid. +func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error { + // Filter out known ones without obtaining the pool lock or recovering signatures + var ( + errs = make([]error, len(txs)) + news = make([]*types.Transaction, 0, len(txs)) + ) + for i, tx := range txs { + // If the transaction is known, pre-set the error slot + if pool.all.Get(tx.Hash()) != nil { + errs[i] = ErrAlreadyKnown + knownTxMeter.Mark(1) + continue + } + // Exclude transactions with basic errors, e.g invalid signatures and + // insufficient intrinsic gas as soon as possible and cache senders + // in transactions before obtaining lock + + if err := pool.validateTxBasics(tx, local); err != nil { + errs[i] = err + invalidTxMeter.Mark(1) + continue + } + // Accumulate all unknown transactions for deeper processing + news = append(news, tx) + } + if len(news) == 0 { + return errs + } + + // Process all the new transaction and merge any errors into the original slice + pool.mu.Lock() + newErrs, dirtyAddrs := pool.addTxsLocked(news, local) + pool.mu.Unlock() + + var nilSlot = 0 + for _, err := range newErrs { + for errs[nilSlot] != nil { + nilSlot++ + } + errs[nilSlot] = err + nilSlot++ + } + // Reorg the pool internals if needed and return + done := pool.requestPromoteExecutables(dirtyAddrs) + if sync { + <-done + } + return errs +} + +// addTxsLocked attempts to queue a batch of transactions if they are valid. +// The transaction pool lock must be held. +func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) ([]error, *accountSet) { + dirty := newAccountSet(pool.signer) + errs := make([]error, len(txs)) + for i, tx := range txs { + replaced, err := pool.add(tx, local) + errs[i] = err + if err == nil && !replaced { + dirty.addTx(tx) + } + } + validTxMeter.Mark(int64(len(dirty.accounts))) + return errs, dirty +} + +// Status returns the status (unknown/pending/queued) of a batch of transactions +// identified by their hashes. +func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { + status := make([]TxStatus, len(hashes)) + for i, hash := range hashes { + tx := pool.Get(hash) + if tx == nil { + continue + } + from, _ := types.Sender(pool.signer, tx) // already validated + pool.mu.RLock() + if txList := pool.pending[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + status[i] = TxStatusPending + } else if txList := pool.queue[from]; txList != nil && txList.txs.items[tx.Nonce()] != nil { + status[i] = TxStatusQueued + } + // implicit else: the tx may have been included into a block between + // checking pool.Get and obtaining the lock. In that case, TxStatusUnknown is correct + pool.mu.RUnlock() + } + return status +} + +// Get returns a transaction if it is contained in the pool and nil otherwise. +func (pool *TxPool) Get(hash common.Hash) *types.Transaction { + return pool.all.Get(hash) +} + +// Has returns an indicator whether txpool has a transaction cached with the +// given hash. +func (pool *TxPool) Has(hash common.Hash) bool { + return pool.all.Get(hash) != nil +} + +// Has returns an indicator whether txpool has a local transaction cached with +// the given hash. +func (pool *TxPool) HasLocal(hash common.Hash) bool { + return pool.all.GetLocal(hash) != nil +} + +// RemoveTx removes a single transaction from the queue, moving all subsequent +// transactions back to the future queue. +func (pool *TxPool) RemoveTx(hash common.Hash) { + pool.mu.Lock() + defer pool.mu.Unlock() + + pool.removeTx(hash, true) +} + +// removeTx removes a single transaction from the queue, moving all subsequent +// transactions back to the future queue. +// Returns the number of transactions removed from the pending queue. +func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) int { + // Fetch the transaction we wish to delete + tx := pool.all.Get(hash) + if tx == nil { + return 0 + } + addr, _ := types.Sender(pool.signer, tx) // already validated during insertion + + // Remove it from the list of known transactions + pool.all.Remove(hash) + if outofbound { + pool.priced.Removed(1) + } + if pool.locals.contains(addr) { + localGauge.Dec(1) + } + // Remove the transaction from the pending lists and reset the account nonce + if pending := pool.pending[addr]; pending != nil { + if removed, invalids := pending.Remove(tx); removed { + // If no more pending transactions are left, remove the list + if pending.Empty() { + delete(pool.pending, addr) + } + // Postpone any invalidated transactions + for _, tx := range invalids { + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(tx.Hash(), tx, false, false) + } + // Update the account nonce if needed + pool.pendingNonces.setIfLower(addr, tx.Nonce()) + // Reduce the pending counter + pendingGauge.Dec(int64(1 + len(invalids))) + return 1 + len(invalids) + } + } + // Transaction is in the future queue + if future := pool.queue[addr]; future != nil { + if removed, _ := future.Remove(tx); removed { + // Reduce the queued counter + queuedGauge.Dec(1) + } + if future.Empty() { + delete(pool.queue, addr) + delete(pool.beats, addr) + } + } + return 0 +} + +// requestReset requests a pool reset to the new head block. +// The returned channel is closed when the reset has occurred. +func (pool *TxPool) requestReset(oldHead *types.Header, newHead *types.Header) chan struct{} { + select { + case pool.reqResetCh <- &txpoolResetRequest{oldHead, newHead}: + return <-pool.reorgDoneCh + case <-pool.reorgShutdownCh: + return pool.reorgShutdownCh + } +} + +// requestPromoteExecutables requests transaction promotion checks for the given addresses. +// The returned channel is closed when the promotion checks have occurred. +func (pool *TxPool) requestPromoteExecutables(set *accountSet) chan struct{} { + select { + case pool.reqPromoteCh <- set: + return <-pool.reorgDoneCh + case <-pool.reorgShutdownCh: + return pool.reorgShutdownCh + } +} + +// queueTxEvent enqueues a transaction event to be sent in the next reorg run. +func (pool *TxPool) queueTxEvent(tx *types.Transaction) { + select { + case pool.queueTxEventCh <- tx: + case <-pool.reorgShutdownCh: + } +} + +// scheduleReorgLoop schedules runs of reset and promoteExecutables. Code above should not +// call those methods directly, but request them being run using requestReset and +// requestPromoteExecutables instead. +func (pool *TxPool) scheduleReorgLoop() { + defer pool.wg.Done() + + var ( + curDone chan struct{} // non-nil while runReorg is active + nextDone = make(chan struct{}) + launchNextRun bool + reset *txpoolResetRequest + dirtyAccounts *accountSet + queuedEvents = make(map[common.Address]*sortedMap) + ) + for { + // Launch next background reorg if needed + if curDone == nil && launchNextRun { + // Run the background reorg and announcements + go pool.runReorg(nextDone, reset, dirtyAccounts, queuedEvents) + + // Prepare everything for the next round of reorg + curDone, nextDone = nextDone, make(chan struct{}) + launchNextRun = false + + reset, dirtyAccounts = nil, nil + queuedEvents = make(map[common.Address]*sortedMap) + } + + select { + case req := <-pool.reqResetCh: + // Reset request: update head if request is already pending. + if reset == nil { + reset = req + } else { + reset.newHead = req.newHead + } + launchNextRun = true + pool.reorgDoneCh <- nextDone + + case req := <-pool.reqPromoteCh: + // Promote request: update address set if request is already pending. + if dirtyAccounts == nil { + dirtyAccounts = req + } else { + dirtyAccounts.merge(req) + } + launchNextRun = true + pool.reorgDoneCh <- nextDone + + case tx := <-pool.queueTxEventCh: + // Queue up the event, but don't schedule a reorg. It's up to the caller to + // request one later if they want the events sent. + addr, _ := types.Sender(pool.signer, tx) + if _, ok := queuedEvents[addr]; !ok { + queuedEvents[addr] = newSortedMap() + } + queuedEvents[addr].Put(tx) + + case <-curDone: + curDone = nil + + case <-pool.reorgShutdownCh: + // Wait for current run to finish. + if curDone != nil { + <-curDone + } + close(nextDone) + return + } + } +} + +// runReorg runs reset and promoteExecutables on behalf of scheduleReorgLoop. +func (pool *TxPool) runReorg(done chan struct{}, reset *txpoolResetRequest, dirtyAccounts *accountSet, events map[common.Address]*sortedMap) { + defer func(t0 time.Time) { + reorgDurationTimer.Update(time.Since(t0)) + }(time.Now()) + defer close(done) + + var promoteAddrs []common.Address + if dirtyAccounts != nil && reset == nil { + // Only dirty accounts need to be promoted, unless we're resetting. + // For resets, all addresses in the tx queue will be promoted and + // the flatten operation can be avoided. + promoteAddrs = dirtyAccounts.flatten() + } + pool.mu.Lock() + if reset != nil { + // Reset from the old head to the new, rescheduling any reorged transactions + pool.reset(reset.oldHead, reset.newHead) + + // Nonces were reset, discard any events that became stale + for addr := range events { + events[addr].Forward(pool.pendingNonces.get(addr)) + if events[addr].Len() == 0 { + delete(events, addr) + } + } + // Reset needs promote for all addresses + promoteAddrs = make([]common.Address, 0, len(pool.queue)) + for addr := range pool.queue { + promoteAddrs = append(promoteAddrs, addr) + } + } + // Check for pending transactions for every account that sent new ones + promoted := pool.promoteExecutables(promoteAddrs) + + // If a new block appeared, validate the pool of pending transactions. This will + // remove any transaction that has been included in the block or was invalidated + // because of another transaction (e.g. higher gas price). + if reset != nil { + pool.demoteUnexecutables() + if reset.newHead != nil && pool.chainconfig.IsSubnetEVM(reset.newHead.Time) { + if err := pool.updateBaseFeeAt(reset.newHead); err != nil { + log.Error("error at updating base fee in tx pool", "error", err) + } + } + + // Update all accounts to the latest known pending nonce + nonces := make(map[common.Address]uint64, len(pool.pending)) + for addr, list := range pool.pending { + highestPending := list.LastElement() + nonces[addr] = highestPending.Nonce() + 1 + } + pool.pendingNonces.setAll(nonces) + } + // Ensure pool.queue and pool.pending sizes stay within the configured limits. + pool.truncatePending() + pool.truncateQueue() + + dropBetweenReorgHistogram.Update(int64(pool.changesSinceReorg)) + pool.changesSinceReorg = 0 // Reset change counter + pool.mu.Unlock() + + if reset != nil && reset.newHead != nil { + pool.reorgFeed.Send(core.NewTxPoolReorgEvent{Head: reset.newHead}) + } + + // Notify subsystems for newly added transactions + for _, tx := range promoted { + addr, _ := types.Sender(pool.signer, tx) + if _, ok := events[addr]; !ok { + events[addr] = newSortedMap() + } + events[addr].Put(tx) + } + if len(events) > 0 { + var txs []*types.Transaction + for _, set := range events { + txs = append(txs, set.Flatten()...) + } + pool.txFeed.Send(core.NewTxsEvent{Txs: txs}) + } +} + +// reset retrieves the current state of the blockchain and ensures the content +// of the transaction pool is valid with regard to the chain state. +func (pool *TxPool) reset(oldHead, newHead *types.Header) { + // If we're reorging an old state, reinject all dropped transactions + var reinject types.Transactions + + if oldHead != nil && oldHead.Hash() != newHead.ParentHash { + // If the reorg is too deep, avoid doing it (will happen during fast sync) + oldNum := oldHead.Number.Uint64() + newNum := newHead.Number.Uint64() + + if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { + log.Debug("Skipping deep transaction reorg", "depth", depth) + } else { + // Reorg seems shallow enough to pull in all transactions into memory + var discarded, included types.Transactions + var ( + rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) + add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) + ) + if rem == nil { + // This can happen if a setHead is performed, where we simply discard the old + // head from the chain. + // If that is the case, we don't have the lost transactions anymore, and + // there's nothing to add + if newNum >= oldNum { + // If we reorged to a same or higher number, then it's not a case of setHead + log.Warn("Transaction pool reset with missing oldhead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + return + } + // If the reorg ended up on a lower number, it's indicative of setHead being the cause + log.Debug("Skipping transaction reset caused by setHead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + // We still need to update the current state s.th. the lost transactions can be readded by the user + } else { + for rem.NumberU64() > add.NumberU64() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + } + for add.NumberU64() > rem.NumberU64() { + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } + } + for rem.Hash() != add.Hash() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } + } + reinject = types.TxDifference(discarded, included) + } + } + } + // Initialize the internal state to the current head + if newHead == nil { + newHead = pool.chain.CurrentBlock() // Special case during testing + } + statedb, err := pool.chain.StateAt(newHead.Root) + if err != nil { + log.Error("Failed to reset txpool state", "err", err, "root", newHead.Root) + return + } + pool.currentHead = newHead + pool.currentStateLock.Lock() + pool.currentState = statedb + pool.currentStateLock.Unlock() + pool.pendingNonces = newNoncer(statedb) + pool.currentMaxGas.Store(newHead.GasLimit) + + // when we reset txPool we should explicitly check if fee struct for min base fee has changed + // so that we can correctly drop txs with < minBaseFee from tx pool. + if pool.chainconfig.IsPrecompileEnabled(feemanager.ContractAddress, newHead.Time) { + feeConfig, _, err := pool.chain.GetFeeConfigAt(newHead) + if err != nil { + log.Error("Failed to get fee config state", "err", err, "root", newHead.Root) + return + } + pool.minimumFee = feeConfig.MinBaseFee + } + + // Inject any transactions discarded due to reorgs + log.Debug("Reinjecting stale transactions", "count", len(reinject)) + pool.chain.SenderCacher().Recover(pool.signer, reinject) + pool.addTxsLocked(reinject, false) + + // Update all fork indicator by next pending block number. + next := new(big.Int).Add(newHead.Number, big.NewInt(1)) + rules := pool.chainconfig.AvalancheRules(next, newHead.Time) + + pool.rules.Store(&rules) + pool.eip2718.Store(rules.IsSubnetEVM) + pool.eip1559.Store(rules.IsSubnetEVM) + pool.eip3860.Store(rules.IsDurango) +} + +// promoteExecutables moves transactions that have become processable from the +// future queue to the set of pending transactions. During this process, all +// invalidated transactions (low nonce, low balance) are deleted. +func (pool *TxPool) promoteExecutables(accounts []common.Address) []*types.Transaction { + pool.currentStateLock.Lock() + defer pool.currentStateLock.Unlock() + + // Track the promoted transactions to broadcast them at once + var promoted []*types.Transaction + + // Iterate over all accounts and promote any executable transactions + for _, addr := range accounts { + list := pool.queue[addr] + if list == nil { + continue // Just in case someone calls with a non existing account + } + // Drop all transactions that are deemed too old (low nonce) + forwards := list.Forward(pool.currentState.GetNonce(addr)) + for _, tx := range forwards { + hash := tx.Hash() + pool.all.Remove(hash) + } + log.Trace("Removed old queued transactions", "count", len(forwards)) + // Drop all transactions that are too costly (low balance or out of gas) + drops, _ := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load()) + for _, tx := range drops { + hash := tx.Hash() + pool.all.Remove(hash) + } + log.Trace("Removed unpayable queued transactions", "count", len(drops)) + queuedNofundsMeter.Mark(int64(len(drops))) + + // Gather all executable transactions and promote them + readies := list.Ready(pool.pendingNonces.get(addr)) + for _, tx := range readies { + hash := tx.Hash() + if pool.promoteTx(addr, hash, tx) { + promoted = append(promoted, tx) + } + } + log.Trace("Promoted queued transactions", "count", len(promoted)) + queuedGauge.Dec(int64(len(readies))) + + // Drop all transactions over the allowed limit + var caps types.Transactions + if !pool.locals.contains(addr) { + caps = list.Cap(int(pool.config.AccountQueue)) + for _, tx := range caps { + hash := tx.Hash() + pool.all.Remove(hash) + log.Trace("Removed cap-exceeding queued transaction", "hash", hash) + } + queuedRateLimitMeter.Mark(int64(len(caps))) + } + // Mark all the items dropped as removed + pool.priced.Removed(len(forwards) + len(drops) + len(caps)) + queuedGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) + if pool.locals.contains(addr) { + localGauge.Dec(int64(len(forwards) + len(drops) + len(caps))) + } + // Delete the entire queue entry if it became empty. + if list.Empty() { + delete(pool.queue, addr) + delete(pool.beats, addr) + } + } + return promoted +} + +// truncatePending removes transactions from the pending queue if the pool is above the +// pending limit. The algorithm tries to reduce transaction counts by an approximately +// equal number for all for accounts with many pending transactions. +func (pool *TxPool) truncatePending() { + pending := uint64(0) + for _, list := range pool.pending { + pending += uint64(list.Len()) + } + if pending <= pool.config.GlobalSlots { + return + } + + pendingBeforeCap := pending + // Assemble a spam order to penalize large transactors first + spammers := prque.New[int64, common.Address](nil) + for addr, list := range pool.pending { + // Only evict transactions from high rollers + if !pool.locals.contains(addr) && uint64(list.Len()) > pool.config.AccountSlots { + spammers.Push(addr, int64(list.Len())) + } + } + // Gradually drop transactions from offenders + offenders := []common.Address{} + for pending > pool.config.GlobalSlots && !spammers.Empty() { + // Retrieve the next offender if not local address + offender, _ := spammers.Pop() + offenders = append(offenders, offender) + + // Equalize balances until all the same or below threshold + if len(offenders) > 1 { + // Calculate the equalization threshold for all current offenders + threshold := pool.pending[offender].Len() + + // Iteratively reduce all offenders until below limit or threshold reached + for pending > pool.config.GlobalSlots && pool.pending[offenders[len(offenders)-2]].Len() > threshold { + for i := 0; i < len(offenders)-1; i++ { + list := pool.pending[offenders[i]] + + caps := list.Cap(list.Len() - 1) + for _, tx := range caps { + // Drop the transaction from the global pools too + hash := tx.Hash() + pool.all.Remove(hash) + + // Update the account nonce to the dropped transaction + pool.pendingNonces.setIfLower(offenders[i], tx.Nonce()) + log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) + } + pool.priced.Removed(len(caps)) + pendingGauge.Dec(int64(len(caps))) + if pool.locals.contains(offenders[i]) { + localGauge.Dec(int64(len(caps))) + } + pending-- + } + } + } + } + + // If still above threshold, reduce to limit or min allowance + if pending > pool.config.GlobalSlots && len(offenders) > 0 { + for pending > pool.config.GlobalSlots && uint64(pool.pending[offenders[len(offenders)-1]].Len()) > pool.config.AccountSlots { + for _, addr := range offenders { + list := pool.pending[addr] + + caps := list.Cap(list.Len() - 1) + for _, tx := range caps { + // Drop the transaction from the global pools too + hash := tx.Hash() + pool.all.Remove(hash) + + // Update the account nonce to the dropped transaction + pool.pendingNonces.setIfLower(addr, tx.Nonce()) + log.Trace("Removed fairness-exceeding pending transaction", "hash", hash) + } + pool.priced.Removed(len(caps)) + pendingGauge.Dec(int64(len(caps))) + if pool.locals.contains(addr) { + localGauge.Dec(int64(len(caps))) + } + pending-- + } + } + } + pendingRateLimitMeter.Mark(int64(pendingBeforeCap - pending)) +} + +// truncateQueue drops the oldest transactions in the queue if the pool is above the global queue limit. +func (pool *TxPool) truncateQueue() { + queued := uint64(0) + for _, list := range pool.queue { + queued += uint64(list.Len()) + } + if queued <= pool.config.GlobalQueue { + return + } + + // Sort all accounts with queued transactions by heartbeat + addresses := make(addressesByHeartbeat, 0, len(pool.queue)) + for addr := range pool.queue { + if !pool.locals.contains(addr) { // don't drop locals + addresses = append(addresses, addressByHeartbeat{addr, pool.beats[addr]}) + } + } + sort.Sort(sort.Reverse(addresses)) + + // Drop transactions until the total is below the limit or only locals remain + for drop := queued - pool.config.GlobalQueue; drop > 0 && len(addresses) > 0; { + addr := addresses[len(addresses)-1] + list := pool.queue[addr.address] + + addresses = addresses[:len(addresses)-1] + + // Drop all transactions if they are less than the overflow + if size := uint64(list.Len()); size <= drop { + for _, tx := range list.Flatten() { + pool.removeTx(tx.Hash(), true) + } + drop -= size + queuedRateLimitMeter.Mark(int64(size)) + continue + } + // Otherwise drop only last few transactions + txs := list.Flatten() + for i := len(txs) - 1; i >= 0 && drop > 0; i-- { + pool.removeTx(txs[i].Hash(), true) + drop-- + queuedRateLimitMeter.Mark(1) + } + } +} + +// demoteUnexecutables removes invalid and processed transactions from the pools +// executable/pending queue and any subsequent transactions that become unexecutable +// are moved back into the future queue. +// +// Note: transactions are not marked as removed in the priced list because re-heaping +// is always explicitly triggered by SetBaseFee and it would be unnecessary and wasteful +// to trigger a re-heap is this function +func (pool *TxPool) demoteUnexecutables() { + pool.currentStateLock.Lock() + defer pool.currentStateLock.Unlock() + + // Iterate over all accounts and demote any non-executable transactions + for addr, list := range pool.pending { + nonce := pool.currentState.GetNonce(addr) + + // Drop all transactions that are deemed too old (low nonce) + olds := list.Forward(nonce) + for _, tx := range olds { + hash := tx.Hash() + pool.all.Remove(hash) + log.Trace("Removed old pending transaction", "hash", hash) + } + // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later + drops, invalids := list.Filter(pool.currentState.GetBalance(addr), pool.currentMaxGas.Load()) + for _, tx := range drops { + hash := tx.Hash() + log.Trace("Removed unpayable pending transaction", "hash", hash) + pool.all.Remove(hash) + } + pendingNofundsMeter.Mark(int64(len(drops))) + + for _, tx := range invalids { + hash := tx.Hash() + log.Trace("Demoting pending transaction", "hash", hash) + + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(hash, tx, false, false) + } + pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + if pool.locals.contains(addr) { + localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + } + // If there's a gap in front, alert (should never happen) and postpone all transactions + if list.Len() > 0 && list.txs.Get(nonce) == nil { + gapped := list.Cap(0) + for _, tx := range gapped { + hash := tx.Hash() + log.Error("Demoting invalidated transaction", "hash", hash) + + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(hash, tx, false, false) + } + pendingGauge.Dec(int64(len(gapped))) + } + // Delete the entire pending entry if it became empty. + if list.Empty() { + delete(pool.pending, addr) + } + } +} + +func (pool *TxPool) startPeriodicFeeUpdate() { + if pool.chainconfig.SubnetEVMTimestamp == nil { + return + } + + // Call updateBaseFee here to ensure that there is not a [baseFeeUpdateInterval] delay + // when starting up in Subnet EVM before the base fee is updated. + if time.Now().After(utils.Uint64ToTime(pool.chainconfig.SubnetEVMTimestamp)) { + pool.updateBaseFee() + } + + pool.wg.Add(1) + go pool.periodicBaseFeeUpdate() +} + +func (pool *TxPool) periodicBaseFeeUpdate() { + defer pool.wg.Done() + + // Sleep until its time to start the periodic base fee update or the tx pool is shutting down + select { + case <-time.After(time.Until(utils.Uint64ToTime(pool.chainconfig.SubnetEVMTimestamp))): + case <-pool.generalShutdownChan: + return // Return early if shutting down + } + + // Update the base fee every [baseFeeUpdateInterval] + // and shutdown when [generalShutdownChan] is closed by Stop() + for { + select { + case <-time.After(baseFeeUpdateInterval): + pool.updateBaseFee() + case <-pool.generalShutdownChan: + return + } + } +} + +func (pool *TxPool) updateBaseFee() { + pool.mu.Lock() + defer pool.mu.Unlock() + + err := pool.updateBaseFeeAt(pool.currentHead) + if err != nil { + log.Error("failed to update base fee", "currentHead", pool.currentHead.Hash(), "err", err) + } +} + +// assumes lock is already held +func (pool *TxPool) updateBaseFeeAt(head *types.Header) error { + feeConfig, _, err := pool.chain.GetFeeConfigAt(head) + if err != nil { + return err + } + _, baseFeeEstimate, err := dummy.EstimateNextBaseFee(pool.chainconfig, feeConfig, head, uint64(time.Now().Unix())) + if err != nil { + return err + } + pool.priced.SetBaseFee(baseFeeEstimate) + return nil +} + +// addressByHeartbeat is an account address tagged with its last activity timestamp. +type addressByHeartbeat struct { + address common.Address + heartbeat time.Time +} + +type addressesByHeartbeat []addressByHeartbeat + +func (a addressesByHeartbeat) Len() int { return len(a) } +func (a addressesByHeartbeat) Less(i, j int) bool { return a[i].heartbeat.Before(a[j].heartbeat) } +func (a addressesByHeartbeat) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// accountSet is simply a set of addresses to check for existence, and a signer +// capable of deriving addresses from transactions. +type accountSet struct { + accounts map[common.Address]struct{} + signer types.Signer + cache *[]common.Address +} + +// newAccountSet creates a new address set with an associated signer for sender +// derivations. +func newAccountSet(signer types.Signer, addrs ...common.Address) *accountSet { + as := &accountSet{ + accounts: make(map[common.Address]struct{}, len(addrs)), + signer: signer, + } + for _, addr := range addrs { + as.add(addr) + } + return as +} + +// contains checks if a given address is contained within the set. +func (as *accountSet) contains(addr common.Address) bool { + _, exist := as.accounts[addr] + return exist +} + +// containsTx checks if the sender of a given tx is within the set. If the sender +// cannot be derived, this method returns false. +func (as *accountSet) containsTx(tx *types.Transaction) bool { + if addr, err := types.Sender(as.signer, tx); err == nil { + return as.contains(addr) + } + return false +} + +// add inserts a new address into the set to track. +func (as *accountSet) add(addr common.Address) { + as.accounts[addr] = struct{}{} + as.cache = nil +} + +// addTx adds the sender of tx into the set. +func (as *accountSet) addTx(tx *types.Transaction) { + if addr, err := types.Sender(as.signer, tx); err == nil { + as.add(addr) + } +} + +// flatten returns the list of addresses within this set, also caching it for later +// reuse. The returned slice should not be changed! +func (as *accountSet) flatten() []common.Address { + if as.cache == nil { + accounts := make([]common.Address, 0, len(as.accounts)) + for account := range as.accounts { + accounts = append(accounts, account) + } + as.cache = &accounts + } + return *as.cache +} + +// merge adds all addresses from the 'other' set into 'as'. +func (as *accountSet) merge(other *accountSet) { + for addr := range other.accounts { + as.accounts[addr] = struct{}{} + } + as.cache = nil +} + +// lookup is used internally by TxPool to track transactions while allowing +// lookup without mutex contention. +// +// Note, although this type is properly protected against concurrent access, it +// is **not** a type that should ever be mutated or even exposed outside of the +// transaction pool, since its internal state is tightly coupled with the pools +// internal mechanisms. The sole purpose of the type is to permit out-of-bound +// peeking into the pool in TxPool.Get without having to acquire the widely scoped +// TxPool.mu mutex. +// +// This lookup set combines the notion of "local transactions", which is useful +// to build upper-level structure. +type lookup struct { + slots int + lock sync.RWMutex + locals map[common.Hash]*types.Transaction + remotes map[common.Hash]*types.Transaction +} + +// newLookup returns a new lookup structure. +func newLookup() *lookup { + return &lookup{ + locals: make(map[common.Hash]*types.Transaction), + remotes: make(map[common.Hash]*types.Transaction), + } +} + +// Range calls f on each key and value present in the map. The callback passed +// should return the indicator whether the iteration needs to be continued. +// Callers need to specify which set (or both) to be iterated. +func (t *lookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { + t.lock.RLock() + defer t.lock.RUnlock() + + if local { + for key, value := range t.locals { + if !f(key, value, true) { + return + } + } + } + if remote { + for key, value := range t.remotes { + if !f(key, value, false) { + return + } + } + } +} + +// Get returns a transaction if it exists in the lookup, or nil if not found. +func (t *lookup) Get(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + if tx := t.locals[hash]; tx != nil { + return tx + } + return t.remotes[hash] +} + +// GetLocal returns a transaction if it exists in the lookup, or nil if not found. +func (t *lookup) GetLocal(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.locals[hash] +} + +// GetRemote returns a transaction if it exists in the lookup, or nil if not found. +func (t *lookup) GetRemote(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.remotes[hash] +} + +// Count returns the current number of transactions in the lookup. +func (t *lookup) Count() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.locals) + len(t.remotes) +} + +// LocalCount returns the current number of local transactions in the lookup. +func (t *lookup) LocalCount() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.locals) +} + +// RemoteCount returns the current number of remote transactions in the lookup. +func (t *lookup) RemoteCount() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.remotes) +} + +// Slots returns the current number of slots used in the lookup. +func (t *lookup) Slots() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.slots +} + +// Add adds a transaction to the lookup. +func (t *lookup) Add(tx *types.Transaction, local bool) { + t.lock.Lock() + defer t.lock.Unlock() + + t.slots += numSlots(tx) + slotsGauge.Update(int64(t.slots)) + + if local { + t.locals[tx.Hash()] = tx + } else { + t.remotes[tx.Hash()] = tx + } +} + +// Remove removes a transaction from the lookup. +func (t *lookup) Remove(hash common.Hash) { + t.lock.Lock() + defer t.lock.Unlock() + + tx, ok := t.locals[hash] + if !ok { + tx, ok = t.remotes[hash] + } + if !ok { + log.Error("No transaction found to be deleted", "hash", hash) + return + } + t.slots -= numSlots(tx) + slotsGauge.Update(int64(t.slots)) + + delete(t.locals, hash) + delete(t.remotes, hash) +} + +// RemoteToLocals migrates the transactions belongs to the given locals to locals +// set. The assumption is held the locals set is thread-safe to be used. +func (t *lookup) RemoteToLocals(locals *accountSet) int { + t.lock.Lock() + defer t.lock.Unlock() + + var migrated int + for hash, tx := range t.remotes { + if locals.containsTx(tx) { + t.locals[hash] = tx + delete(t.remotes, hash) + migrated += 1 + } + } + return migrated +} + +// RemotesBelowTip finds all remote transactions below the given tip threshold. +func (t *lookup) RemotesBelowTip(threshold *big.Int) types.Transactions { + found := make(types.Transactions, 0, 128) + t.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { + if tx.GasTipCapIntCmp(threshold) < 0 { + found = append(found, tx) + } + return true + }, false, true) // Only iterate remotes + return found +} + +// numSlots calculates the number of slots needed for a single transaction. +func numSlots(tx *types.Transaction) int { + return int((tx.Size() + txSlotSize - 1) / txSlotSize) } diff --git a/core/txpool/legacypool/legacypool2_test.go b/core/txpool/txpool2_test.go similarity index 84% rename from core/txpool/legacypool/legacypool2_test.go rename to core/txpool/txpool2_test.go index 57dfeff8cc..cb0251356f 100644 --- a/core/txpool/legacypool/legacypool2_test.go +++ b/core/txpool/txpool2_test.go @@ -23,7 +23,7 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package txpool import ( "crypto/ecdsa" @@ -43,7 +43,7 @@ func pricedValuedTransaction(nonce uint64, value int64, gaslimit uint64, gaspric return tx } -func count(t *testing.T, pool *LegacyPool) (pending int, queued int) { +func count(t *testing.T, pool *TxPool) (pending int, queued int) { t.Helper() pending, queued = pool.stats() if err := validatePoolInternals(pool); err != nil { @@ -52,7 +52,7 @@ func count(t *testing.T, pool *LegacyPool) (pending int, queued int) { return pending, queued } -func fillPool(t testing.TB, pool *LegacyPool) { +func fillPool(t testing.TB, pool *TxPool) { t.Helper() // Create a number of test accounts, fund them and make transactions executableTxs := types.Transactions{} @@ -66,8 +66,8 @@ func fillPool(t testing.TB, pool *LegacyPool) { } } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(executableTxs) - pool.addRemotesSync(nonExecutableTxs) + pool.AddRemotesSync(executableTxs) + pool.AddRemotesSync(nonExecutableTxs) pending, queued := pool.Stats() slots := pool.all.Slots() // sanity-check that the test prerequisites are ok (pending full) @@ -89,13 +89,12 @@ func TestTransactionFutureAttack(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, eip1559Config, blockchain) + defer pool.Stop() fillPool(t, pool) pending, _ := pool.Stats() // Now, future transaction attack starts, let's add a bunch of expensive non-executables, and see if the pending-count drops @@ -107,7 +106,7 @@ func TestTransactionFutureAttack(t *testing.T) { futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 100000, big.NewInt(500), key)) } for i := 0; i < 5; i++ { - pool.addRemotesSync(futureTxs) + pool.AddRemotesSync(futureTxs) newPending, newQueued := count(t, pool) t.Logf("pending: %d queued: %d, all: %d\n", newPending, newQueued, pool.all.Slots()) } @@ -126,10 +125,9 @@ func TestTransactionFuture1559(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + defer pool.Stop() // Create a number of test accounts, fund them and make transactions fillPool(t, pool) @@ -143,7 +141,7 @@ func TestTransactionFuture1559(t *testing.T) { for j := 0; j < int(pool.config.GlobalSlots+pool.config.GlobalQueue); j++ { futureTxs = append(futureTxs, dynamicFeeTx(1000+uint64(j), 100000, big.NewInt(200), big.NewInt(101), key)) } - pool.addRemotesSync(futureTxs) + pool.AddRemotesSync(futureTxs) } newPending, _ := pool.Stats() // Pending should not have been touched @@ -159,10 +157,9 @@ func TestTransactionZAttack(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + defer pool.Stop() // Create a number of test accounts, fund them and make transactions fillPool(t, pool) @@ -194,7 +191,7 @@ func TestTransactionZAttack(t *testing.T) { key, _ := crypto.GenerateKey() pool.currentState.AddBalance(crypto.PubkeyToAddress(key.PublicKey), big.NewInt(100000000000)) futureTxs = append(futureTxs, pricedTransaction(1000+uint64(j), 21000, big.NewInt(500), key)) - pool.addRemotesSync(futureTxs) + pool.AddRemotesSync(futureTxs) } overDraftTxs := types.Transactions{} @@ -205,11 +202,11 @@ func TestTransactionZAttack(t *testing.T) { overDraftTxs = append(overDraftTxs, pricedValuedTransaction(uint64(j), 600000000000, 21000, big.NewInt(500), key)) } } - pool.addRemotesSync(overDraftTxs) - pool.addRemotesSync(overDraftTxs) - pool.addRemotesSync(overDraftTxs) - pool.addRemotesSync(overDraftTxs) - pool.addRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) + pool.AddRemotesSync(overDraftTxs) newPending, newQueued := count(t, pool) newIvPending := countInvalidPending() @@ -227,13 +224,12 @@ func TestTransactionZAttack(t *testing.T) { func BenchmarkFutureAttack(b *testing.B) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, eip1559Config, blockchain) + defer pool.Stop() fillPool(b, pool) key, _ := crypto.GenerateKey() @@ -245,6 +241,6 @@ func BenchmarkFutureAttack(b *testing.B) { } b.ResetTimer() for i := 0; i < 5; i++ { - pool.addRemotesSync(futureTxs) + pool.AddRemotesSync(futureTxs) } } diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/txpool_test.go similarity index 86% rename from core/txpool/legacypool/legacypool_test.go rename to core/txpool/txpool_test.go index 446ed045c5..0e4d438f35 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/txpool_test.go @@ -24,7 +24,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package legacypool +package txpool import ( "crypto/ecdsa" @@ -34,6 +34,7 @@ import ( "math/big" "math/rand" "os" + "strings" "sync" "sync/atomic" "testing" @@ -43,7 +44,6 @@ import ( "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/txpool" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/trie" @@ -82,21 +82,25 @@ func init() { } type testBlockChain struct { - config *params.ChainConfig - gasLimit atomic.Uint64 statedb *state.StateDB + gasLimit atomic.Uint64 chainHeadFeed *event.Feed lock sync.Mutex } -func newTestBlockChain(config *params.ChainConfig, gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain { - bc := testBlockChain{config: config, statedb: statedb, chainHeadFeed: new(event.Feed)} +func newTestBlockChain(gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain { + bc := testBlockChain{statedb: statedb, chainHeadFeed: chainHeadFeed} bc.gasLimit.Store(gasLimit) return &bc } -func (bc *testBlockChain) Config() *params.ChainConfig { - return bc.config +func (bc *testBlockChain) reset(statedb *state.StateDB, gasLimit uint64, chainHeadFeed *event.Feed) { + bc.lock.Lock() + defer bc.lock.Unlock() + + bc.statedb = statedb + bc.gasLimit.Store(gasLimit) + bc.chainHeadFeed = chainHeadFeed } func (bc *testBlockChain) CurrentBlock() *types.Header { @@ -168,51 +172,24 @@ func dynamicFeeTx(nonce uint64, gaslimit uint64, gasFee *big.Int, tip *big.Int, return tx } -func makeAddressReserver() txpool.AddressReserver { - var ( - reserved = make(map[common.Address]struct{}) - lock sync.Mutex - ) - return func(addr common.Address, reserve bool) error { - lock.Lock() - defer lock.Unlock() - - _, exists := reserved[addr] - if reserve { - if exists { - panic("already reserved") - } - reserved[addr] = struct{}{} - return nil - } - if !exists { - panic("not reserved") - } - delete(reserved, addr) - return nil - } -} - -func setupPool() (*LegacyPool, *ecdsa.PrivateKey) { +func setupPool() (*TxPool, *ecdsa.PrivateKey) { return setupPoolWithConfig(params.TestChainConfig) } -func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) { +func setupPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(10000000, statedb, new(event.Feed)) key, _ := crypto.GenerateKey() - pool := New(testTxPoolConfig, blockchain) - if err := pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()); err != nil { - panic(err) - } + pool := NewTxPool(testTxPoolConfig, config, blockchain) + // wait for the pool to initialize <-pool.initDoneCh return pool, key } // validatePoolInternals checks various consistency invariants within the pool. -func validatePoolInternals(pool *LegacyPool) error { +func validatePoolInternals(pool *TxPool) error { pool.mu.RLock() defer pool.mu.RUnlock() @@ -316,21 +293,20 @@ func TestStateChangeDuringReset(t *testing.T) { // setup pool with 2 transaction in it statedb.SetBalance(address, new(big.Int).SetUint64(params.Ether)) - blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger} + blockchain := &testChain{newTestBlockChain(1000000000, statedb, new(event.Feed)), address, &trigger} tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() nonce := pool.Nonce(address) if nonce != 0 { t.Fatalf("Invalid nonce, want 0, got %d", nonce) } - pool.addRemotesSync([]*types.Transaction{tx0, tx1}) + pool.AddRemotesSync([]*types.Transaction{tx0, tx1}) nonce = pool.Nonce(address) if nonce != 2 { @@ -347,13 +323,13 @@ func TestStateChangeDuringReset(t *testing.T) { } } -func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) { +func testAddBalance(pool *TxPool, addr common.Address, amount *big.Int) { pool.mu.Lock() pool.currentState.AddBalance(addr, amount) pool.mu.Unlock() } -func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) { +func testSetNonce(pool *TxPool, addr common.Address, nonce uint64) { pool.mu.Lock() pool.currentState.SetNonce(addr, nonce) pool.mu.Unlock() @@ -363,36 +339,36 @@ func TestInvalidTransactions(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Close() + defer pool.Stop() tx := transaction(0, 100, key) from, _ := deriveSender(tx) // Intrinsic gas too low testAddBalance(pool, from, big.NewInt(1)) - if err, want := pool.addRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) { + if err, want := pool.AddRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } // Insufficient funds tx = transaction(0, 100000, key) - if err, want := pool.addRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) { + if err, want := pool.AddRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } testSetNonce(pool, from, 1) testAddBalance(pool, from, big.NewInt(0xffffffffffffff)) tx = transaction(0, 100000, key) - if err, want := pool.addRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) { + if err, want := pool.AddRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } tx = transaction(1, 100000, key) - pool.gasTip.Store(big.NewInt(1000)) - if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) { + pool.gasPrice = big.NewInt(1000) + if err, want := pool.AddRemote(tx), ErrUnderpriced; !errors.Is(err, want) { t.Errorf("want %v have %v", want, err) } - if err := pool.addLocal(tx); err != nil { + if err := pool.AddLocal(tx); err != nil { t.Error("expected", nil, "got", err) } } @@ -401,7 +377,7 @@ func TestQueue(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Close() + defer pool.Stop() tx := transaction(0, 100, key) from, _ := deriveSender(tx) @@ -432,7 +408,7 @@ func TestQueue2(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Close() + defer pool.Stop() tx1 := transaction(0, 100, key) tx2 := transaction(10, 100, key) @@ -458,13 +434,13 @@ func TestNegativeValue(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Close() + defer pool.Stop() tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) from, _ := deriveSender(tx) testAddBalance(pool, from, big.NewInt(1)) - if err := pool.addRemote(tx); err != txpool.ErrNegativeValue { - t.Error("expected", txpool.ErrNegativeValue, "got", err) + if err := pool.AddRemote(tx); err != ErrNegativeValue { + t.Error("expected", ErrNegativeValue, "got", err) } } @@ -472,11 +448,11 @@ func TestTipAboveFeeCap(t *testing.T) { t.Parallel() pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Close() + defer pool.Stop() tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) - if err := pool.addRemote(tx); err != core.ErrTipAboveFeeCap { + if err := pool.AddRemote(tx); err != core.ErrTipAboveFeeCap { t.Error("expected", core.ErrTipAboveFeeCap, "got", err) } } @@ -485,18 +461,18 @@ func TestVeryHighValues(t *testing.T) { t.Parallel() pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Close() + defer pool.Stop() veryBigNumber := big.NewInt(1) veryBigNumber.Lsh(veryBigNumber, 300) tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key) - if err := pool.addRemote(tx); err != core.ErrTipVeryHigh { + if err := pool.AddRemote(tx); err != core.ErrTipVeryHigh { t.Error("expected", core.ErrTipVeryHigh, "got", err) } tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key) - if err := pool.addRemote(tx2); err != core.ErrFeeCapVeryHigh { + if err := pool.AddRemote(tx2); err != core.ErrFeeCapVeryHigh { t.Error("expected", core.ErrFeeCapVeryHigh, "got", err) } } @@ -505,14 +481,14 @@ func TestChainFork(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Close() + defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed)) <-pool.requestReset(nil, nil) } resetState() @@ -521,7 +497,7 @@ func TestChainFork(t *testing.T) { if _, err := pool.add(tx, false); err != nil { t.Error("didn't expect error", err) } - pool.removeTx(tx.Hash(), true, true) + pool.removeTx(tx.Hash(), true) // reset the pool's internal state resetState() @@ -534,14 +510,14 @@ func TestDoubleNonce(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Close() + defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) resetState := func() { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) statedb.AddBalance(addr, big.NewInt(100000000000000)) - pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + pool.chain.(*testBlockChain).reset(statedb, 1000000, new(event.Feed)) <-pool.requestReset(nil, nil) } resetState() @@ -585,7 +561,7 @@ func TestMissingNonce(t *testing.T) { t.Parallel() pool, key := setupPool() - defer pool.Close() + defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, addr, big.NewInt(100000000000000)) @@ -609,7 +585,7 @@ func TestNonceRecovery(t *testing.T) { const n = 10 pool, key := setupPool() - defer pool.Close() + defer pool.Stop() addr := crypto.PubkeyToAddress(key.PublicKey) testSetNonce(pool, addr, n) @@ -617,7 +593,7 @@ func TestNonceRecovery(t *testing.T) { <-pool.requestReset(nil, nil) tx := transaction(n, 100000, key) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Error(err) } // simulate some weird re-order of transactions and missing nonce(s) @@ -635,7 +611,7 @@ func TestDropping(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Close() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000)) @@ -711,7 +687,8 @@ func TestDropping(t *testing.T) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) } // Reduce the block gas limit, check that invalidated transactions are dropped - pool.chain.(*testBlockChain).gasLimit.Store(100) + tbc := pool.chain.(*testBlockChain) + tbc.reset(tbc.statedb, 100, tbc.chainHeadFeed) <-pool.requestReset(nil, nil) if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { @@ -739,11 +716,10 @@ func TestPostponing(t *testing.T) { // Create the pool to test the postponing with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Create two test accounts to produce different gap profiles with keys := make([]*ecdsa.PrivateKey, 2) @@ -768,7 +744,7 @@ func TestPostponing(t *testing.T) { txs = append(txs, tx) } } - for i, err := range pool.addRemotesSync(txs) { + for i, err := range pool.AddRemotesSync(txs) { if err != nil { t.Fatalf("tx %d: failed to add transactions: %v", i, err) } @@ -853,7 +829,7 @@ func TestGapFilling(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Close() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -864,7 +840,7 @@ func TestGapFilling(t *testing.T) { defer sub.Unsubscribe() // Create a pending and a queued transaction with a nonce-gap in between - pool.addRemotesSync([]*types.Transaction{ + pool.AddRemotesSync([]*types.Transaction{ transaction(0, 100000, key), transaction(2, 100000, key), }) @@ -907,7 +883,7 @@ func TestQueueAccountLimiting(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Close() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -952,15 +928,14 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them (last one will be the local) keys := make([]*ecdsa.PrivateKey, 5) @@ -982,7 +957,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { nonces[addr]++ } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) queued := 0 for addr, list := range pool.queue { @@ -999,7 +974,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { for i := uint64(0); i < 3*config.GlobalQueue; i++ { txs = append(txs, transaction(i+1, 100000, local)) } - pool.addLocals(txs) + pool.AddLocals(txs) // If locals are disabled, the previous eviction algorithm should apply here too if nolocals { @@ -1045,15 +1020,14 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // Create the pool to test the non-expiration enforcement statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.Lifetime = time.Second config.NoLocals = nolocals - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create two test accounts to ensure remotes expire but locals do not local, _ := crypto.GenerateKey() @@ -1063,10 +1037,10 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) // Add the two transactions and ensure they both are queued up - if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } pending, queued := pool.Stats() @@ -1133,7 +1107,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { } // Queue gapped transactions - if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil { @@ -1142,7 +1116,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { time.Sleep(5 * evictionInterval) // A half lifetime pass // Queue executable transactions, the life cycle should be restarted. - if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add remote transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil { @@ -1156,7 +1130,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) } if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) } if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) @@ -1190,7 +1164,7 @@ func TestPendingLimiting(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Close() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000000000)) @@ -1231,14 +1205,13 @@ func TestPendingGlobalLimiting(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 5) @@ -1258,7 +1231,7 @@ func TestPendingGlobalLimiting(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending := 0 for _, list := range pool.pending { @@ -1280,7 +1253,7 @@ func TestAllowedTxSize(t *testing.T) { // Create a test account and fund it pool, key := setupPool() - defer pool.Close() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000000)) @@ -1289,7 +1262,7 @@ func TestAllowedTxSize(t *testing.T) { // // It is assumed the fields in the transaction (except of the data) are: // - nonce <= 32 bytes - // - gasTip <= 32 bytes + // - gasPrice <= 32 bytes // - gasLimit <= 32 bytes // - recipient == 20 bytes // - value <= 32 bytes @@ -1297,21 +1270,22 @@ func TestAllowedTxSize(t *testing.T) { // All those fields are summed up to at most 213 bytes. baseSize := uint64(213) dataSize := txMaxSize - baseSize + maxGas := pool.currentMaxGas.Load() // Try adding a transaction with maximal allowed size - tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize) + tx := pricedDataTransaction(0, maxGas, big.NewInt(1), key, dataSize) if err := pool.addRemoteSync(tx); err != nil { t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err) } // Try adding a transaction with random allowed size - if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { + if err := pool.addRemoteSync(pricedDataTransaction(1, maxGas, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { t.Fatalf("failed to add transaction of random allowed size: %v", err) } // Try adding a transaction of minimal not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, txMaxSize)); err == nil { t.Fatalf("expected rejection on slightly oversize transaction") } // Try adding a transaction of random not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, maxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { t.Fatalf("expected rejection on oversize transaction") } // Run some sanity checks on the pool internals @@ -1333,16 +1307,15 @@ func TestCapClearsFromAll(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.AccountSlots = 2 config.AccountQueue = 2 config.GlobalSlots = 8 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them key, _ := crypto.GenerateKey() @@ -1354,7 +1327,7 @@ func TestCapClearsFromAll(t *testing.T) { txs = append(txs, transaction(uint64(j), 100000, key)) } // Import the batch and verify that limits have been enforced - pool.addRemotes(txs) + pool.AddRemotes(txs) if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } @@ -1368,14 +1341,13 @@ func TestPendingMinimumAllowance(t *testing.T) { // Create the pool to test the limit enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 1 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 5) @@ -1395,7 +1367,7 @@ func TestPendingMinimumAllowance(t *testing.T) { } } // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) for addr, list := range pool.pending { if list.Len() != int(config.AccountSlots) { @@ -1417,11 +1389,10 @@ func TestRepricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -1452,8 +1423,8 @@ func TestRepricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.addRemotesSync(txs) - pool.addLocal(ltx) + pool.AddRemotesSync(txs) + pool.AddLocal(ltx) pending, queued := pool.Stats() if pending != 7 { @@ -1469,7 +1440,7 @@ func TestRepricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Reprice the pool and check that underpriced transactions get dropped - pool.SetGasTip(big.NewInt(2)) + pool.SetGasPrice(big.NewInt(2)) pending, queued = pool.Stats() if pending != 2 { @@ -1485,14 +1456,14 @@ func TestRepricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Check that we can't add the old transactions back - if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced) } - if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced) } - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want error to conain %v", err, ErrUnderpriced) } if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) @@ -1502,7 +1473,7 @@ func TestRepricing(t *testing.T) { } // However we can add local underpriced transactions tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3]) - if err := pool.addLocal(tx); err != nil { + if err := pool.AddLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } if pending, _ = pool.Stats(); pending != 3 { @@ -1515,13 +1486,13 @@ func TestRepricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // And we can fill gaps with properly priced transactions - if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { + if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } - if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } if err := validateEvents(events, 5); err != nil { @@ -1542,7 +1513,7 @@ func TestRepricingDynamicFee(t *testing.T) { // Create the pool to test the pricing enforcement with pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Close() + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -1573,8 +1544,8 @@ func TestRepricingDynamicFee(t *testing.T) { ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3]) // Import the batch and that both pending and queued transactions match up - pool.addRemotesSync(txs) - pool.addLocal(ltx) + pool.AddRemotesSync(txs) + pool.AddLocal(ltx) pending, queued := pool.Stats() if pending != 7 { @@ -1590,7 +1561,7 @@ func TestRepricingDynamicFee(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Reprice the pool and check that underpriced transactions get dropped - pool.SetGasTip(big.NewInt(2)) + pool.SetGasPrice(big.NewInt(2)) pending, queued = pool.Stats() if pending != 2 { @@ -1607,16 +1578,16 @@ func TestRepricingDynamicFee(t *testing.T) { } // Check that we can't add the old transactions back tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.AddRemote(tx); !strings.Contains(err.Error(), ErrUnderpriced.Error()) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } if err := validateEvents(events, 0); err != nil { t.Fatalf("post-reprice event firing failed: %v", err) @@ -1626,7 +1597,7 @@ func TestRepricingDynamicFee(t *testing.T) { } // However we can add local underpriced transactions tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3]) - if err := pool.addLocal(tx); err != nil { + if err := pool.AddLocal(tx); err != nil { t.Fatalf("failed to add underpriced local transaction: %v", err) } if pending, _ = pool.Stats(); pending != 3 { @@ -1640,15 +1611,15 @@ func TestRepricingDynamicFee(t *testing.T) { } // And we can fill gaps with properly priced transactions tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1]) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add pending transaction: %v", err) } tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2]) - if err := pool.addRemoteSync(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to add queued transaction: %v", err) } if err := validateEvents(events, 5); err != nil { @@ -1666,11 +1637,10 @@ func TestRepricingKeepsLocals(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, eip1559Config, blockchain) + defer pool.Stop() // Create a number of test accounts and fund them keys := make([]*ecdsa.PrivateKey, 3) @@ -1682,23 +1652,23 @@ func TestRepricingKeepsLocals(t *testing.T) { for i := uint64(0); i < 500; i++ { // Add pending transaction. pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2]) - if err := pool.addLocal(pendingTx); err != nil { + if err := pool.AddLocal(pendingTx); err != nil { t.Fatal(err) } // Add queued transaction. queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2]) - if err := pool.addLocal(queuedTx); err != nil { + if err := pool.AddLocal(queuedTx); err != nil { t.Fatal(err) } // Add pending dynamic fee transaction. pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) - if err := pool.addLocal(pendingTx); err != nil { + if err := pool.AddLocal(pendingTx); err != nil { t.Fatal(err) } // Add queued dynamic fee transaction. queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) - if err := pool.addLocal(queuedTx); err != nil { + if err := pool.AddLocal(queuedTx); err != nil { t.Fatal(err) } } @@ -1720,13 +1690,13 @@ func TestRepricingKeepsLocals(t *testing.T) { validate() // Reprice the pool and check that nothing is dropped - pool.SetGasTip(big.NewInt(2)) + pool.SetGasPrice(big.NewInt(2)) validate() - pool.SetGasTip(big.NewInt(2)) - pool.SetGasTip(big.NewInt(4)) - pool.SetGasTip(big.NewInt(8)) - pool.SetGasTip(big.NewInt(100)) + pool.SetGasPrice(big.NewInt(2)) + pool.SetGasPrice(big.NewInt(4)) + pool.SetGasPrice(big.NewInt(8)) + pool.SetGasPrice(big.NewInt(100)) validate() } @@ -1740,15 +1710,14 @@ func TestUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 2 config.GlobalQueue = 2 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -1772,8 +1741,8 @@ func TestUnderpricing(t *testing.T) { ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2]) // Import the batch and that both pending and queued transactions match up - pool.addRemotes(txs) - pool.addLocal(ltx) + pool.AddRemotesSync(txs) + pool.AddLocal(ltx) pending, queued := pool.Stats() if pending != 3 { @@ -1789,8 +1758,8 @@ func TestUnderpricing(t *testing.T) { t.Fatalf("pool internal state corrupted: %v", err) } // Ensure that adding an underpriced transaction on block limit fails - if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } // Replace a future transaction with a future transaction if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 @@ -1803,12 +1772,12 @@ func TestUnderpricing(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 t.Fatalf("failed to add well priced transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 + if err := pool.addRemoteSync(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 t.Fatalf("failed to add well priced transaction: %v", err) } // Ensure that replacing a pending transaction with a future transaction fails - if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending { - t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending) + if err := pool.addRemoteSync(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != ErrFutureReplacePending { + t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, ErrFutureReplacePending) } pending, queued = pool.Stats() if pending != 2 { @@ -1825,11 +1794,11 @@ func TestUnderpricing(t *testing.T) { } // Ensure that adding local transactions can push out even higher priced ones ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2]) - if err := pool.addLocal(ltx); err != nil { + if err := pool.AddLocal(ltx); err != nil { t.Fatalf("failed to append underpriced local transaction: %v", err) } ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3]) - if err := pool.addLocal(ltx); err != nil { + if err := pool.AddLocal(ltx); err != nil { t.Fatalf("failed to add new underpriced local transaction: %v", err) } pending, queued = pool.Stats() @@ -1855,15 +1824,14 @@ func TestStableUnderpricing(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.GlobalSlots = 128 config.GlobalQueue = 0 - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(config, params.TestChainConfig, blockchain) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -1881,7 +1849,7 @@ func TestStableUnderpricing(t *testing.T) { for i := uint64(0); i < config.GlobalSlots; i++ { txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0])) } - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending, queued := pool.Stats() if pending != int(config.GlobalSlots) { @@ -1924,7 +1892,7 @@ func TestUnderpricingDynamicFee(t *testing.T) { t.Parallel() pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Close() + defer pool.Stop() pool.config.GlobalSlots = 2 pool.config.GlobalQueue = 2 @@ -1951,8 +1919,8 @@ func TestUnderpricingDynamicFee(t *testing.T) { ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2]) // Import the batch and that both pending and queued transactions match up - pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1 - pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 + pool.AddRemotesSync(txs) // Pend K0:0, K0:1; Que K1:1 + pool.AddLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 pending, queued := pool.Stats() if pending != 3 { @@ -1970,13 +1938,13 @@ func TestUnderpricingDynamicFee(t *testing.T) { // Ensure that adding an underpriced transaction fails tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) - if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 - t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + if err := pool.addRemoteSync(tx); err != ErrUnderpriced { // Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced) } // Ensure that adding high priced transactions drops cheap ones, but not own tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1]) - if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + if err := pool.addRemoteSync(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - t.Fatalf("failed to add well priced transaction: %v", err) } @@ -2003,11 +1971,11 @@ func TestUnderpricingDynamicFee(t *testing.T) { } // Ensure that adding local transactions can push out even higher priced ones ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2]) - if err := pool.addLocal(ltx); err != nil { + if err := pool.AddLocal(ltx); err != nil { t.Fatalf("failed to append underpriced local transaction: %v", err) } ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3]) - if err := pool.addLocal(ltx); err != nil { + if err := pool.AddLocal(ltx); err != nil { t.Fatalf("failed to add new underpriced local transaction: %v", err) } pending, queued = pool.Stats() @@ -2031,7 +1999,7 @@ func TestDualHeapEviction(t *testing.T) { t.Parallel() pool, _ := setupPoolWithConfig(eip1559Config) - defer pool.Close() + defer pool.Stop() pool.config.GlobalSlots = 10 pool.config.GlobalQueue = 10 @@ -2060,7 +2028,7 @@ func TestDualHeapEviction(t *testing.T) { tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key) highCap = tx } - pool.addRemotesSync([]*types.Transaction{tx}) + pool.AddRemotesSync([]*types.Transaction{tx}) } pending, queued := pool.Stats() if pending+queued != 20 { @@ -2090,11 +2058,10 @@ func TestDeduplication(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Create a test account to add transactions with key, _ := crypto.GenerateKey() @@ -2109,7 +2076,7 @@ func TestDeduplication(t *testing.T) { for i := 0; i < len(txs); i += 2 { firsts = append(firsts, txs[i]) } - errs := pool.addRemotesSync(firsts) + errs := pool.AddRemotesSync(firsts) if len(errs) != len(firsts) { t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts)) } @@ -2126,7 +2093,7 @@ func TestDeduplication(t *testing.T) { t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1) } // Try to add all of them now and ensure previous ones error out as knowns - errs = pool.addRemotesSync(txs) + errs = pool.AddRemotesSync(txs) if len(errs) != len(txs) { t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs)) } @@ -2157,11 +2124,10 @@ func TestReplacement(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Keep track of transaction events to ensure all executables get announced events := make(chan core.NewTxsEvent, 32) @@ -2179,10 +2145,10 @@ func TestReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap pending transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { + t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } - if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap pending transaction: %v", err) } if err := validateEvents(events, 2); err != nil { @@ -2192,10 +2158,10 @@ func TestReplacement(t *testing.T) { if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper pending transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { + t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } - if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper pending transaction: %v", err) } if err := validateEvents(events, 2); err != nil { @@ -2203,23 +2169,23 @@ func TestReplacement(t *testing.T) { } // Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { t.Fatalf("failed to add original cheap queued transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != ErrReplaceUnderpriced { + t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { t.Fatalf("failed to replace original cheap queued transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { t.Fatalf("failed to add original proper queued transaction: %v", err) } - if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != ErrReplaceUnderpriced { + t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, ErrReplaceUnderpriced) } - if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { + if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { t.Fatalf("failed to replace original proper queued transaction: %v", err) } @@ -2238,7 +2204,7 @@ func TestReplacementDynamicFee(t *testing.T) { // Create the pool to test the pricing enforcement with pool, key := setupPoolWithConfig(eip1559Config) - defer pool.Close() + defer pool.Stop() testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) // Keep track of transaction events to ensure all executables get announced @@ -2280,12 +2246,12 @@ func TestReplacementDynamicFee(t *testing.T) { } // 2. Don't bump tip or feecap => discard tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 3. Bump both more than min => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(3), big.NewInt(2), key) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) } // 4. Check events match expected (2 new executable txs during pending, 0 during queue) @@ -2303,27 +2269,27 @@ func TestReplacementDynamicFee(t *testing.T) { } // 6. Bump tip max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 7. Bump fee cap max allowed so it's still underpriced => discard tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 8. Bump tip min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 9. Bump fee cap min for acceptance => accept tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key) - if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { - t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + if err := pool.AddRemote(tx); err != ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, ErrReplaceUnderpriced) } // 10. Check events match expected (3 new executable txs during pending, 0 during queue) tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(tipThreshold), key) - if err := pool.addRemote(tx); err != nil { + if err := pool.AddRemote(tx); err != nil { t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) } // 11. Check events match expected (3 new executable txs during pending, 0 during queue) @@ -2363,15 +2329,14 @@ func testJournaling(t *testing.T, nolocals bool) { // Create the original pool to inject transaction into the journal statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) config := testTxPoolConfig config.NoLocals = nolocals config.Journal = journal config.Rejournal = time.Second - pool := New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + pool := NewTxPool(config, params.TestChainConfig, blockchain) // Create two test accounts to ensure remotes expire but locals do not local, _ := crypto.GenerateKey() @@ -2381,13 +2346,13 @@ func testJournaling(t *testing.T, nolocals bool) { testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) // Add three local and a remote transactions and ensure they are queued up - if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } - if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + if err := pool.AddLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { t.Fatalf("failed to add local transaction: %v", err) } if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { @@ -2404,12 +2369,11 @@ func testJournaling(t *testing.T, nolocals bool) { t.Fatalf("pool internal state corrupted: %v", err) } // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive - pool.Close() + pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain = newTestBlockChain(1000000, statedb, new(event.Feed)) - pool = New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + pool = NewTxPool(config, params.TestChainConfig, blockchain) pending, queued = pool.Stats() if queued != 0 { @@ -2431,12 +2395,11 @@ func testJournaling(t *testing.T, nolocals bool) { statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) <-pool.requestReset(nil, nil) time.Sleep(2 * config.Rejournal) - pool.Close() + pool.Stop() statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) - blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool = New(config, blockchain) - pool.Init(new(big.Int).SetUint64(config.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) + blockchain = newTestBlockChain(1000000, statedb, new(event.Feed)) + pool = NewTxPool(config, params.TestChainConfig, blockchain) pending, queued = pool.Stats() if pending != 0 { @@ -2454,7 +2417,7 @@ func testJournaling(t *testing.T, nolocals bool) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } - pool.Close() + pool.Stop() } // TestStatusCheck tests that the pool can correctly retrieve the @@ -2464,11 +2427,10 @@ func TestStatusCheck(t *testing.T) { // Create the pool to test the status retrievals with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + blockchain := newTestBlockChain(1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) - pool.Init(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() + pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain) + defer pool.Stop() // Create the test accounts to check various transaction statuses with keys := make([]*ecdsa.PrivateKey, 3) @@ -2485,7 +2447,7 @@ func TestStatusCheck(t *testing.T) { txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only // Import the transaction and ensure they are correctly added - pool.addRemotesSync(txs) + pool.AddRemotesSync(txs) pending, queued := pool.Stats() if pending != 2 { @@ -2503,11 +2465,13 @@ func TestStatusCheck(t *testing.T) { hashes[i] = tx.Hash() } hashes = append(hashes, common.Hash{}) - expect := []txpool.TxStatus{txpool.TxStatusPending, txpool.TxStatusPending, txpool.TxStatusQueued, txpool.TxStatusQueued, txpool.TxStatusUnknown} - for i := 0; i < len(hashes); i++ { - if status := pool.Status(hashes[i]); status != expect[i] { - t.Errorf("transaction %d: status mismatch: have %v, want %v", i, status, expect[i]) + statuses := pool.Status(hashes) + expect := []TxStatus{TxStatusPending, TxStatusPending, TxStatusQueued, TxStatusQueued, TxStatusUnknown} + + for i := 0; i < len(statuses); i++ { + if statuses[i] != expect[i] { + t.Errorf("transaction %d: status mismatch: have %v, want %v", i, statuses[i], expect[i]) } } } @@ -2539,7 +2503,7 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1 func benchmarkPendingDemotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one pool, key := setupPool() - defer pool.Close() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -2564,7 +2528,7 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1 func benchmarkFuturePromotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one pool, key := setupPool() - defer pool.Close() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000)) @@ -2592,7 +2556,7 @@ func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 1000 func benchmarkBatchInsert(b *testing.B, size int, local bool) { // Generate a batch of transactions to enqueue into the pool pool, key := setupPool() - defer pool.Close() + defer pool.Stop() account := crypto.PubkeyToAddress(key.PublicKey) testAddBalance(pool, account, big.NewInt(1000000000000000000)) @@ -2608,9 +2572,9 @@ func benchmarkBatchInsert(b *testing.B, size int, local bool) { b.ResetTimer() for _, batch := range batches { if local { - pool.addLocals(batch) + pool.AddLocals(batch) } else { - pool.addRemotes(batch) + pool.AddRemotes(batch) } } } @@ -2638,15 +2602,15 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { pool, _ := setupPool() testAddBalance(pool, account, big.NewInt(100000000)) for _, local := range locals { - pool.addLocal(local) + pool.AddLocal(local) } b.StartTimer() // Assign a high enough balance for testing testAddBalance(pool, remoteAddr, big.NewInt(100000000)) for i := 0; i < len(remotes); i++ { - pool.addRemotes([]*types.Transaction{remotes[i]}) + pool.AddRemotes([]*types.Transaction{remotes[i]}) } - pool.Close() + pool.Stop() } } @@ -2654,7 +2618,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { func BenchmarkMultiAccountBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool pool, _ := setupPool() - defer pool.Close() + defer pool.Stop() b.ReportAllocs() batches := make(types.Transactions, b.N) for i := 0; i < b.N; i++ { @@ -2667,6 +2631,6 @@ func BenchmarkMultiAccountBatchInsert(b *testing.B) { // Benchmark importing the transactions into the queue b.ResetTimer() for _, tx := range batches { - pool.addRemotesSync([]*types.Transaction{tx}) + pool.AddRemotesSync([]*types.Transaction{tx}) } } diff --git a/core/txpool/validation.go b/core/txpool/validation.go deleted file mode 100644 index 7b355bcd7c..0000000000 --- a/core/txpool/validation.go +++ /dev/null @@ -1,272 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package txpool - -import ( - "crypto/sha256" - "fmt" - "math/big" - - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/params" - "github.com/ava-labs/subnet-evm/precompile/contracts/txallowlist" - "github.com/ava-labs/subnet-evm/vmerrs" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/log" -) - -// ValidationOptions define certain differences between transaction validation -// across the different pools without having to duplicate those checks. -type ValidationOptions struct { - Config *params.ChainConfig // Chain configuration to selectively validate based on current fork rules - - Accept uint8 // Bitmap of transaction types that should be accepted for the calling pool - MaxSize uint64 // Maximum size of a transaction that the caller can meaningfully handle - MinTip *big.Int // Minimum gas tip needed to allow a transaction into the caller pool -} - -// ValidateTransaction is a helper method to check whether a transaction is valid -// according to the consensus rules, but does not check state-dependent validation -// (balance, nonce, etc). -// -// This check is public to allow different transaction pools to check the basic -// rules without duplicating code and running the risk of missed updates. -func ValidateTransaction(tx *types.Transaction, blobs []kzg4844.Blob, commits []kzg4844.Commitment, proofs []kzg4844.Proof, head *types.Header, signer types.Signer, opts *ValidationOptions) error { - // Ensure transactions not implemented by the calling pool are rejected - if opts.Accept&(1< opts.MaxSize { - return fmt.Errorf("%w: transaction size %v, limit %v", ErrOversizedData, tx.Size(), opts.MaxSize) - } - // Ensure only transactions that have been enabled are accepted - if !opts.Config.IsSubnetEVM(head.Time) && tx.Type() != types.LegacyTxType { - return fmt.Errorf("%w: type %d rejected, pool not yet in Berlin", core.ErrTxTypeNotSupported, tx.Type()) - } - if !opts.Config.IsSubnetEVM(head.Time) && tx.Type() == types.DynamicFeeTxType { - return fmt.Errorf("%w: type %d rejected, pool not yet in London", core.ErrTxTypeNotSupported, tx.Type()) - } - if !opts.Config.IsCancun(head.Number, head.Time) && tx.Type() == types.BlobTxType { - return fmt.Errorf("%w: type %d rejected, pool not yet in Cancun", core.ErrTxTypeNotSupported, tx.Type()) - } - // Check whether the init code size has been exceeded - if opts.Config.IsDurango(head.Time) && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { - return fmt.Errorf("%w: code size %v, limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) - } - // Transactions can't be negative. This may never happen using RLP decoded - // transactions but may occur for transactions created using the RPC. - if tx.Value().Sign() < 0 { - return ErrNegativeValue - } - // Ensure the transaction doesn't exceed the current block limit gas - if txGas := tx.Gas(); head.GasLimit < txGas { - return fmt.Errorf( - "%w: tx gas (%d) > current max gas (%d)", - ErrGasLimit, - txGas, - head.GasLimit, - ) - } - // Sanity check for extremely large numbers (supported by RLP or RPC) - if tx.GasFeeCap().BitLen() > 256 { - return core.ErrFeeCapVeryHigh - } - if tx.GasTipCap().BitLen() > 256 { - return core.ErrTipVeryHigh - } - // Ensure gasFeeCap is greater than or equal to gasTipCap - if tx.GasFeeCapIntCmp(tx.GasTipCap()) < 0 { - return core.ErrTipAboveFeeCap - } - // Make sure the transaction is signed properly - from, err := types.Sender(signer, tx) - if err != nil { - return ErrInvalidSender - } - // Ensure the transaction has more gas than the bare minimum needed to cover - // the transaction metadata - intrGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, opts.Config.Rules(head.Number, head.Time)) - if err != nil { - return err - } - if txGas := tx.Gas(); txGas < intrGas { - return fmt.Errorf("%w: address %v tx gas (%v) < intrinsic gas (%v)", core.ErrIntrinsicGas, from.Hex(), tx.Gas(), intrGas) - } - // Ensure the gasprice is high enough to cover the requirement of the calling - // pool and/or block producer - if tx.GasTipCapIntCmp(opts.MinTip) < 0 { - return fmt.Errorf("%w: tip needed %v, tip permitted %v", ErrUnderpriced, opts.MinTip, tx.GasTipCap()) - } - // Ensure blob transactions have valid commitments - if tx.Type() == types.BlobTxType { - // Ensure the number of items in the blob transaction and vairous side - // data match up before doing any expensive validations - hashes := tx.BlobHashes() - if len(hashes) == 0 { - return fmt.Errorf("blobless blob transaction") - } - if len(hashes) > params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob { - return fmt.Errorf("too many blobs in transaction: have %d, permitted %d", len(hashes), params.BlobTxMaxBlobGasPerBlock/params.BlobTxBlobGasPerBlob) - } - if len(blobs) != len(hashes) { - return fmt.Errorf("invalid number of %d blobs compared to %d blob hashes", len(blobs), len(hashes)) - } - if len(commits) != len(hashes) { - return fmt.Errorf("invalid number of %d blob commitments compared to %d blob hashes", len(commits), len(hashes)) - } - if len(proofs) != len(hashes) { - return fmt.Errorf("invalid number of %d blob proofs compared to %d blob hashes", len(proofs), len(hashes)) - } - // Blob quantities match up, validate that the provers match with the - // transaction hash before getting to the cryptography - hasher := sha256.New() - for i, want := range hashes { - hasher.Write(commits[i][:]) - hash := hasher.Sum(nil) - hasher.Reset() - - var vhash common.Hash - vhash[0] = params.BlobTxHashVersion - copy(vhash[1:], hash[1:]) - - if vhash != want { - return fmt.Errorf("blob %d: computed hash %#x mismatches transaction one %#x", i, vhash, want) - } - } - // Blob commitments match with the hashes in the transaction, verify the - // blobs themselves via KZG - for i := range blobs { - if err := kzg4844.VerifyBlobProof(blobs[i], commits[i], proofs[i]); err != nil { - return fmt.Errorf("invalid blob %d: %v", i, err) - } - } - } - return nil -} - -// ValidationOptionsWithState define certain differences between stateful transaction -// validation across the different pools without having to duplicate those checks. -type ValidationOptionsWithState struct { - State *state.StateDB // State database to check nonces and balances against - - // FirstNonceGap is an optional callback to retrieve the first nonce gap in - // the list of pooled transactions of a specific account. If this method is - // set, nonce gaps will be checked and forbidden. If this method is not set, - // nonce gaps will be ignored and permitted. - FirstNonceGap func(addr common.Address) uint64 - - // UsedAndLeftSlots is a mandatory callback to retrieve the number of tx slots - // used and the number still permitted for an account. New transactions will - // be rejected once the number of remaining slots reaches zero. - UsedAndLeftSlots func(addr common.Address) (int, int) - - // ExistingExpenditure is a mandatory callback to retrieve the cummulative - // cost of the already pooled transactions to check for overdrafts. - ExistingExpenditure func(addr common.Address) *big.Int - - // ExistingCost is a mandatory callback to retrieve an already pooled - // transaction's cost with the given nonce to check for overdrafts. - ExistingCost func(addr common.Address, nonce uint64) *big.Int - - Rules params.Rules - MinimumFee *big.Int -} - -// ValidateTransactionWithState is a helper method to check whether a transaction -// is valid according to the pool's internal state checks (balance, nonce, gaps). -// -// This check is public to allow different transaction pools to check the stateful -// rules without duplicating code and running the risk of missed updates. -func ValidateTransactionWithState(tx *types.Transaction, signer types.Signer, opts *ValidationOptionsWithState) error { - // Ensure the transaction adheres to nonce ordering - from, err := signer.Sender(tx) // already validated (and cached), but cleaner to check - if err != nil { - log.Error("Transaction sender recovery failed", "err", err) - return err - } - - // Drop the transaction if the gas fee cap is below the pool's minimum fee - if opts.MinimumFee != nil && tx.GasFeeCapIntCmp(opts.MinimumFee) < 0 { - return fmt.Errorf("%w: address %s have gas fee cap (%d) < pool minimum fee cap (%d)", ErrUnderpriced, from.Hex(), tx.GasFeeCap(), opts.MinimumFee) - } - - next := opts.State.GetNonce(from) - if next > tx.Nonce() { - return fmt.Errorf("%w: next nonce %v, tx nonce %v", core.ErrNonceTooLow, next, tx.Nonce()) - } - // Ensure the transaction doesn't produce a nonce gap in pools that do not - // support arbitrary orderings - if opts.FirstNonceGap != nil { - if gap := opts.FirstNonceGap(from); gap < tx.Nonce() { - return fmt.Errorf("%w: tx nonce %v, gapped nonce %v", core.ErrNonceTooHigh, tx.Nonce(), gap) - } - } - // Ensure the transactor has enough funds to cover the transaction costs - var ( - balance = opts.State.GetBalance(from) - cost = tx.Cost() - ) - if balance.Cmp(cost) < 0 { - return fmt.Errorf("%w: balance %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, cost, new(big.Int).Sub(cost, balance)) - } - // Ensure the transactor has enough funds to cover for replacements or nonce - // expansions without overdrafts - spent := opts.ExistingExpenditure(from) - if prev := opts.ExistingCost(from, tx.Nonce()); prev != nil { - bump := new(big.Int).Sub(cost, prev) - need := new(big.Int).Add(spent, bump) - if balance.Cmp(need) < 0 { - return fmt.Errorf("%w: balance %v, queued cost %v, tx bumped %v, overshot %v", core.ErrInsufficientFunds, balance, spent, bump, new(big.Int).Sub(need, balance)) - } - } else { - need := new(big.Int).Add(spent, cost) - if balance.Cmp(need) < 0 { - return fmt.Errorf("%w: balance %v, queued cost %v, tx cost %v, overshot %v", core.ErrInsufficientFunds, balance, spent, cost, new(big.Int).Sub(need, balance)) - } - // Transaction takes a new nonce value out of the pool. Ensure it doesn't - // overflow the number of permitted transactions from a single accoun - // (i.e. max cancellable via out-of-bound transaction). - if used, left := opts.UsedAndLeftSlots(from); left <= 0 { - return fmt.Errorf("%w: pooled %d txs", ErrAccountLimitExceeded, used) - } - } - - // If the tx allow list is enabled, return an error if the from address is not allow listed. - if opts.Rules.IsPrecompileEnabled(txallowlist.ContractAddress) { - txAllowListRole := txallowlist.GetTxAllowListStatus(opts.State, from) - if !txAllowListRole.IsEnabled() { - return fmt.Errorf("%w: %s", vmerrs.ErrSenderAddressNotAllowListed, from) - } - } - - return nil -} diff --git a/core/types/block.go b/core/types/block.go index a17d4c7422..06e7d6c1c0 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -94,26 +94,21 @@ type Header struct { // headers. BlockGasCost *big.Int `json:"blockGasCost" rlp:"optional"` - // BlobGasUsed was added by EIP-4844 and is ignored in legacy headers. - BlobGasUsed *uint64 `json:"blobGasUsed" rlp:"optional"` - - // ExcessBlobGas was added by EIP-4844 and is ignored in legacy headers. - ExcessBlobGas *uint64 `json:"excessBlobGas" rlp:"optional"` + // ExcessDataGas was added by EIP-4844 and is ignored in legacy headers. + ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` } // field type overrides for gencodec type headerMarshaling struct { - Difficulty *hexutil.Big - Number *hexutil.Big - GasLimit hexutil.Uint64 - GasUsed hexutil.Uint64 - Time hexutil.Uint64 - Extra hexutil.Bytes - BaseFee *hexutil.Big - BlockGasCost *hexutil.Big - Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON - BlobGasUsed *hexutil.Uint64 - ExcessBlobGas *hexutil.Uint64 + Difficulty *hexutil.Big + Number *hexutil.Big + GasLimit hexutil.Uint64 + GasUsed hexutil.Uint64 + Time hexutil.Uint64 + Extra hexutil.Bytes + BaseFee *hexutil.Big + BlockGasCost *hexutil.Big + Hash common.Hash `json:"hash"` // adds call to Hash() in MarshalJSON } // Hash returns the block hash of the header, which is simply the keccak256 hash of its @@ -152,23 +147,7 @@ type Body struct { Uncles []*Header } -// Block represents an Ethereum block. -// -// Note the Block type tries to be 'immutable', and contains certain caches that rely -// on that. The rules around block immutability are as follows: -// -// - We copy all data when the block is constructed. This makes references held inside -// the block independent of whatever value was passed in. -// -// - We copy all header data on access. This is because any change to the header would mess -// up the cached hash and size values in the block. Calling code is expected to take -// advantage of this to avoid over-allocating! -// -// - When new body data is attached to the block, a shallow copy of the block is returned. -// This ensures block modifications are race-free. -// -// - We do not copy body data on access because it does not affect the caches, and also -// because it would be too expensive. +// Block represents an entire block in the Ethereum blockchain. type Block struct { header *Header uncles []*Header @@ -186,8 +165,9 @@ type extblock struct { Uncles []*Header } -// NewBlock creates a new block. The input data is copied, changes to header and to the -// field values will not affect the block. +// NewBlock creates a new block. The input data is copied, +// changes to header and to the field values will not affect the +// block. // // The values of TxHash, UncleHash, ReceiptHash and Bloom in header // are ignored and set to values derived from the given txs, uncles @@ -226,7 +206,15 @@ func NewBlock( return b } -// CopyHeader creates a deep copy of a block header. +// NewBlockWithHeader creates a block with the given header data. The +// header data is copied, changes to header and to the field values +// will not affect the block. +func NewBlockWithHeader(header *Header) *Block { + return &Block{header: CopyHeader(header)} +} + +// CopyHeader creates a deep copy of a block header to prevent side effects from +// modifying a header variable. func CopyHeader(h *Header) *Header { cpy := *h if cpy.Difficulty = new(big.Int); h.Difficulty != nil { @@ -245,18 +233,10 @@ func CopyHeader(h *Header) *Header { cpy.Extra = make([]byte, len(h.Extra)) copy(cpy.Extra, h.Extra) } - if h.ExcessBlobGas != nil { - cpy.ExcessBlobGas = new(uint64) - *cpy.ExcessBlobGas = *h.ExcessBlobGas - } - if h.BlobGasUsed != nil { - cpy.BlobGasUsed = new(uint64) - *cpy.BlobGasUsed = *h.BlobGasUsed - } return &cpy } -// DecodeRLP decodes a block from RLP. +// DecodeRLP decodes the Ethereum func (b *Block) DecodeRLP(s *rlp.Stream) error { var eb extblock _, size, _ := s.Kind() @@ -268,23 +248,16 @@ func (b *Block) DecodeRLP(s *rlp.Stream) error { return nil } -// EncodeRLP serializes a block as RLP. +// EncodeRLP serializes b into the Ethereum RLP block format. func (b *Block) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &extblock{ + return rlp.Encode(w, extblock{ Header: b.header, Txs: b.transactions, Uncles: b.uncles, }) } -// Body returns the non-header content of the block. -// Note the returned data is not an independent copy. -func (b *Block) Body() *Body { - return &Body{b.transactions, b.uncles} -} - -// Accessors for body data. These do not return a copy because the content -// of the body slices does not affect the cached hash/size in block. +// TODO: copies func (b *Block) Uncles() []*Header { return b.uncles } func (b *Block) Transactions() Transactions { return b.transactions } @@ -298,13 +271,6 @@ func (b *Block) Transaction(hash common.Hash) *Transaction { return nil } -// Header returns the block header (as a copy). -func (b *Block) Header() *Header { - return CopyHeader(b.header) -} - -// Header value accessors. These do copy! - func (b *Block) Number() *big.Int { return new(big.Int).Set(b.header.Number) } func (b *Block) GasLimit() uint64 { return b.header.GasLimit } func (b *Block) GasUsed() uint64 { return b.header.GasUsed } @@ -338,23 +304,10 @@ func (b *Block) BlockGasCost() *big.Int { return new(big.Int).Set(b.header.BlockGasCost) } -func (b *Block) ExcessBlobGas() *uint64 { - var excessBlobGas *uint64 - if b.header.ExcessBlobGas != nil { - excessBlobGas = new(uint64) - *excessBlobGas = *b.header.ExcessBlobGas - } - return excessBlobGas -} +func (b *Block) Header() *Header { return CopyHeader(b.header) } -func (b *Block) BlobGasUsed() *uint64 { - var blobGasUsed *uint64 - if b.header.BlobGasUsed != nil { - blobGasUsed = new(uint64) - *blobGasUsed = *b.header.BlobGasUsed - } - return blobGasUsed -} +// Body returns the non-header content of the block. +func (b *Block) Body() *Body { return &Body{b.transactions, b.uncles} } // Size returns the true RLP encoded storage size of the block, either by encoding // and returning it, or returning a previously cached value. @@ -382,27 +335,22 @@ func CalcUncleHash(uncles []*Header) common.Hash { return rlpHash(uncles) } -// NewBlockWithHeader creates a block with the given header data. The -// header data is copied, changes to header and to the field values -// will not affect the block. -func NewBlockWithHeader(header *Header) *Block { - return &Block{header: CopyHeader(header)} -} - // WithSeal returns a new block with the data from b but the header replaced with // the sealed one. func (b *Block) WithSeal(header *Header) *Block { + cpy := *header + return &Block{ - header: CopyHeader(header), + header: &cpy, transactions: b.transactions, uncles: b.uncles, } } -// WithBody returns a copy of the block with the given transaction and uncle contents. +// WithBody returns a new block with the given transaction and uncle contents. func (b *Block) WithBody(transactions []*Transaction, uncles []*Header) *Block { block := &Block{ - header: b.header, + header: CopyHeader(b.header), transactions: make([]*Transaction, len(transactions)), uncles: make([]*Header, len(uncles)), } diff --git a/core/types/block_test.go b/core/types/block_test.go index c484dd268d..fb4ad2194a 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -28,16 +28,17 @@ package types import ( "bytes" + "hash" "math/big" "reflect" "testing" - "github.com/ava-labs/subnet-evm/internal/blocktest" "github.com/ava-labs/subnet-evm/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" ) func TestBlockEncoding(t *testing.T) { @@ -237,6 +238,31 @@ func BenchmarkEncodeBlock(b *testing.B) { } } +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) error { + h.hasher.Write(key) + h.hasher.Write(val) + return nil +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} + func makeBenchBlock() *Block { var ( key, _ = crypto.GenerateKey() @@ -275,7 +301,7 @@ func makeBenchBlock() *Block { Extra: []byte("benchmark uncle"), } } - return NewBlock(header, txs, uncles, receipts, blocktest.NewHasher()) + return NewBlock(header, txs, uncles, receipts, newHasher()) } func TestSubnetEVMBlockEncoding(t *testing.T) { diff --git a/core/types/gen_header_json.go b/core/types/gen_header_json.go index 224c2c3759..b0aa11fdae 100644 --- a/core/types/gen_header_json.go +++ b/core/types/gen_header_json.go @@ -16,26 +16,25 @@ var _ = (*headerMarshaling)(nil) // MarshalJSON marshals as JSON. func (h Header) MarshalJSON() ([]byte, error) { type Header struct { - ParentHash common.Hash `json:"parentHash" gencodec:"required"` - UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` - Coinbase common.Address `json:"miner" gencodec:"required"` - Root common.Hash `json:"stateRoot" gencodec:"required"` - TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` - ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` - Bloom Bloom `json:"logsBloom" gencodec:"required"` - Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` - Number *hexutil.Big `json:"number" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` - GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` - Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` - Extra hexutil.Bytes `json:"extraData" gencodec:"required"` - MixDigest common.Hash `json:"mixHash"` - Nonce BlockNonce `json:"nonce"` - BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` - BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` - Hash common.Hash `json:"hash"` + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + UncleHash common.Hash `json:"sha3Uncles" gencodec:"required"` + Coinbase common.Address `json:"miner" gencodec:"required"` + Root common.Hash `json:"stateRoot" gencodec:"required"` + TxHash common.Hash `json:"transactionsRoot" gencodec:"required"` + ReceiptHash common.Hash `json:"receiptsRoot" gencodec:"required"` + Bloom Bloom `json:"logsBloom" gencodec:"required"` + Difficulty *hexutil.Big `json:"difficulty" gencodec:"required"` + Number *hexutil.Big `json:"number" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Time hexutil.Uint64 `json:"timestamp" gencodec:"required"` + Extra hexutil.Bytes `json:"extraData" gencodec:"required"` + MixDigest common.Hash `json:"mixHash"` + Nonce BlockNonce `json:"nonce"` + BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` + BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` + ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` + Hash common.Hash `json:"hash"` } var enc Header enc.ParentHash = h.ParentHash @@ -55,8 +54,7 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.Nonce = h.Nonce enc.BaseFee = (*hexutil.Big)(h.BaseFee) enc.BlockGasCost = (*hexutil.Big)(h.BlockGasCost) - enc.BlobGasUsed = (*hexutil.Uint64)(h.BlobGasUsed) - enc.ExcessBlobGas = (*hexutil.Uint64)(h.ExcessBlobGas) + enc.ExcessDataGas = h.ExcessDataGas enc.Hash = h.Hash() return json.Marshal(&enc) } @@ -81,8 +79,7 @@ func (h *Header) UnmarshalJSON(input []byte) error { Nonce *BlockNonce `json:"nonce"` BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed" rlp:"optional"` - ExcessBlobGas *hexutil.Uint64 `json:"excessBlobGas" rlp:"optional"` + ExcessDataGas *big.Int `json:"excessDataGas" rlp:"optional"` } var dec Header if err := json.Unmarshal(input, &dec); err != nil { @@ -152,11 +149,8 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.BlockGasCost != nil { h.BlockGasCost = (*big.Int)(dec.BlockGasCost) } - if dec.BlobGasUsed != nil { - h.BlobGasUsed = (*uint64)(dec.BlobGasUsed) - } - if dec.ExcessBlobGas != nil { - h.ExcessBlobGas = (*uint64)(dec.ExcessBlobGas) + if dec.ExcessDataGas != nil { + h.ExcessDataGas = dec.ExcessDataGas } return nil } diff --git a/core/types/gen_header_rlp.go b/core/types/gen_header_rlp.go index 1735881553..d869348c83 100644 --- a/core/types/gen_header_rlp.go +++ b/core/types/gen_header_rlp.go @@ -42,9 +42,8 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBytes(obj.Nonce[:]) _tmp1 := obj.BaseFee != nil _tmp2 := obj.BlockGasCost != nil - _tmp3 := obj.BlobGasUsed != nil - _tmp4 := obj.ExcessBlobGas != nil - if _tmp1 || _tmp2 || _tmp3 || _tmp4 { + _tmp3 := obj.ExcessDataGas != nil + if _tmp1 || _tmp2 || _tmp3 { if obj.BaseFee == nil { w.Write(rlp.EmptyString) } else { @@ -54,7 +53,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BaseFee) } } - if _tmp2 || _tmp3 || _tmp4 { + if _tmp2 || _tmp3 { if obj.BlockGasCost == nil { w.Write(rlp.EmptyString) } else { @@ -64,18 +63,14 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BlockGasCost) } } - if _tmp3 || _tmp4 { - if obj.BlobGasUsed == nil { - w.Write([]byte{0x80}) - } else { - w.WriteUint64((*obj.BlobGasUsed)) - } - } - if _tmp4 { - if obj.ExcessBlobGas == nil { - w.Write([]byte{0x80}) + if _tmp3 { + if obj.ExcessDataGas == nil { + w.Write(rlp.EmptyString) } else { - w.WriteUint64((*obj.ExcessBlobGas)) + if obj.ExcessDataGas.Sign() == -1 { + return rlp.ErrNegativeBigInt + } + w.WriteBigInt(obj.ExcessDataGas) } } w.ListEnd(_tmp0) diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go index 4c641a9727..d83be14477 100644 --- a/core/types/gen_receipt_json.go +++ b/core/types/gen_receipt_json.go @@ -26,8 +26,6 @@ func (r Receipt) MarshalJSON() ([]byte, error) { ContractAddress common.Address `json:"contractAddress"` GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` - BlobGasUsed hexutil.Uint64 `json:"blobGasUsed,omitempty"` - BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"` BlockHash common.Hash `json:"blockHash,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` TransactionIndex hexutil.Uint `json:"transactionIndex"` @@ -43,8 +41,6 @@ func (r Receipt) MarshalJSON() ([]byte, error) { enc.ContractAddress = r.ContractAddress enc.GasUsed = hexutil.Uint64(r.GasUsed) enc.EffectiveGasPrice = (*hexutil.Big)(r.EffectiveGasPrice) - enc.BlobGasUsed = hexutil.Uint64(r.BlobGasUsed) - enc.BlobGasPrice = (*hexutil.Big)(r.BlobGasPrice) enc.BlockHash = r.BlockHash enc.BlockNumber = (*hexutil.Big)(r.BlockNumber) enc.TransactionIndex = hexutil.Uint(r.TransactionIndex) @@ -64,8 +60,6 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { ContractAddress *common.Address `json:"contractAddress"` GasUsed *hexutil.Uint64 `json:"gasUsed" gencodec:"required"` EffectiveGasPrice *hexutil.Big `json:"effectiveGasPrice"` - BlobGasUsed *hexutil.Uint64 `json:"blobGasUsed,omitempty"` - BlobGasPrice *hexutil.Big `json:"blobGasPrice,omitempty"` BlockHash *common.Hash `json:"blockHash,omitempty"` BlockNumber *hexutil.Big `json:"blockNumber,omitempty"` TransactionIndex *hexutil.Uint `json:"transactionIndex"` @@ -109,12 +103,6 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { if dec.EffectiveGasPrice != nil { r.EffectiveGasPrice = (*big.Int)(dec.EffectiveGasPrice) } - if dec.BlobGasUsed != nil { - r.BlobGasUsed = uint64(*dec.BlobGasUsed) - } - if dec.BlobGasPrice != nil { - r.BlobGasPrice = (*big.Int)(dec.BlobGasPrice) - } if dec.BlockHash != nil { r.BlockHash = *dec.BlockHash } diff --git a/core/types/hashes.go b/core/types/hashes.go index 2c29ce2b71..69a034b6d8 100644 --- a/core/types/hashes.go +++ b/core/types/hashes.go @@ -29,7 +29,6 @@ package types import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" ) var ( @@ -48,13 +47,3 @@ var ( // EmptyReceiptsHash is the known hash of the empty receipt set. EmptyReceiptsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") ) - -// TrieRootHash returns the hash itself if it's non-empty or the predefined -// emptyHash one instead. -func TrieRootHash(hash common.Hash) common.Hash { - if hash == (common.Hash{}) { - log.Error("Zero trie root hash!") - return EmptyRootHash - } - return hash -} diff --git a/core/types/receipt.go b/core/types/receipt.go index 2518d5e49a..6ed57778d5 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -73,8 +73,6 @@ type Receipt struct { ContractAddress common.Address `json:"contractAddress"` GasUsed uint64 `json:"gasUsed" gencodec:"required"` EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // required, but tag omitted for backwards compatibility - BlobGasUsed uint64 `json:"blobGasUsed,omitempty"` - BlobGasPrice *big.Int `json:"blobGasPrice,omitempty"` // Inclusion information: These fields provide information about the inclusion of the // transaction corresponding to this receipt. @@ -90,8 +88,6 @@ type receiptMarshaling struct { CumulativeGasUsed hexutil.Uint64 GasUsed hexutil.Uint64 EffectiveGasPrice *hexutil.Big - BlobGasUsed hexutil.Uint64 - BlobGasPrice *hexutil.Big BlockNumber *hexutil.Big TransactionIndex hexutil.Uint } @@ -208,7 +204,7 @@ func (r *Receipt) decodeTyped(b []byte) error { return errShortTypedReceipt } switch b[0] { - case DynamicFeeTxType, AccessListTxType, BlobTxType: + case DynamicFeeTxType, AccessListTxType: var data receiptRLP err := rlp.DecodeBytes(b[1:], &data) if err != nil { @@ -310,13 +306,14 @@ func (rs Receipts) Len() int { return len(rs) } func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { r := rs[i] data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs} - if r.Type == LegacyTxType { - rlp.Encode(w, data) - return - } - w.WriteByte(r.Type) switch r.Type { - case AccessListTxType, DynamicFeeTxType, BlobTxType: + case LegacyTxType: + rlp.Encode(w, data) + case AccessListTxType: + w.WriteByte(AccessListTxType) + rlp.Encode(w, data) + case DynamicFeeTxType: + w.WriteByte(DynamicFeeTxType) rlp.Encode(w, data) default: // For unsupported types, write nothing. Since this is for @@ -327,7 +324,7 @@ func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { // DeriveFields fills the receipts with their computed fields based on consensus // data and contextual infos like containing block and transactions. -func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, blobGasPrice *big.Int, txs []*Transaction) error { +func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, number uint64, time uint64, baseFee *big.Int, txs []*Transaction) error { signer := MakeSigner(config, new(big.Int).SetUint64(number), time) logIndex := uint(0) @@ -338,13 +335,8 @@ func (rs Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, nu // The transaction type and hash can be retrieved from the transaction itself rs[i].Type = txs[i].Type() rs[i].TxHash = txs[i].Hash() - rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee) - // EIP-4844 blob transaction fields - if txs[i].Type() == BlobTxType { - rs[i].BlobGasUsed = txs[i].BlobGas() - rs[i].BlobGasPrice = blobGasPrice - } + rs[i].EffectiveGasPrice = txs[i].inner.effectiveGasPrice(new(big.Int), baseFee) // block location fields rs[i].BlockHash = hash diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go index c0661fd20c..f310dc061f 100644 --- a/core/types/receipt_test.go +++ b/core/types/receipt_test.go @@ -140,24 +140,22 @@ var ( }), // EIP-4844 transactions. NewTx(&BlobTx{ - To: to6, + To: &to6, Nonce: 6, Value: uint256.NewInt(6), Gas: 6, GasTipCap: uint256.NewInt(66), GasFeeCap: uint256.NewInt(1066), BlobFeeCap: uint256.NewInt(100066), - BlobHashes: []common.Hash{{}}, }), NewTx(&BlobTx{ - To: to7, + To: &to7, Nonce: 7, Value: uint256.NewInt(7), Gas: 7, GasTipCap: uint256.NewInt(77), GasFeeCap: uint256.NewInt(1077), BlobFeeCap: uint256.NewInt(100077), - BlobHashes: []common.Hash{{}, {}, {}}, }), } @@ -282,8 +280,6 @@ var ( TxHash: txs[5].Hash(), GasUsed: 6, EffectiveGasPrice: big.NewInt(1066), - BlobGasUsed: params.BlobTxBlobGasPerBlob, - BlobGasPrice: big.NewInt(920), BlockHash: blockHash, BlockNumber: blockNumber, TransactionIndex: 5, @@ -297,8 +293,6 @@ var ( TxHash: txs[6].Hash(), GasUsed: 7, EffectiveGasPrice: big.NewInt(1077), - BlobGasUsed: 3 * params.BlobTxBlobGasPerBlob, - BlobGasPrice: big.NewInt(920), BlockHash: blockHash, BlockNumber: blockNumber, TransactionIndex: 6, @@ -319,9 +313,8 @@ func TestDecodeEmptyTypedReceipt(t *testing.T) { func TestDeriveFields(t *testing.T) { // Re-derive receipts. basefee := big.NewInt(1000) - blobGasPrice := big.NewInt(920) derivedReceipts := clearComputedFieldsOnReceipts(receipts) - err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, blobGasPrice, txs) + err := Receipts(derivedReceipts).DeriveFields(params.TestChainConfig, blockHash, blockNumber.Uint64(), blockTime, basefee, txs) if err != nil { t.Fatalf("DeriveFields(...) = %v, want ", err) } @@ -518,9 +511,6 @@ func clearComputedFieldsOnReceipt(receipt *Receipt) *Receipt { cpy.ContractAddress = common.Address{0xff, 0xff, 0x33} cpy.GasUsed = 0xffffffff cpy.Logs = clearComputedFieldsOnLogs(receipt.Logs) - cpy.EffectiveGasPrice = big.NewInt(0) - cpy.BlobGasUsed = 0 - cpy.BlobGasPrice = nil return &cpy } diff --git a/core/types/state_account.go b/core/types/state_account.go index 1c1ef026f4..72066a12d1 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -27,11 +27,9 @@ package types import ( - "bytes" "math/big" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" ) //go:generate go run github.com/ethereum/go-ethereum/rlp/rlpgen -type StateAccount -out gen_account_rlp.go @@ -44,88 +42,3 @@ type StateAccount struct { Root common.Hash // merkle root of the storage trie CodeHash []byte } - -// NewEmptyStateAccount constructs an empty state account. -func NewEmptyStateAccount() *StateAccount { - return &StateAccount{ - Balance: new(big.Int), - Root: EmptyRootHash, - CodeHash: EmptyCodeHash.Bytes(), - } -} - -// Copy returns a deep-copied state account object. -func (acct *StateAccount) Copy() *StateAccount { - var balance *big.Int - if acct.Balance != nil { - balance = new(big.Int).Set(acct.Balance) - } - return &StateAccount{ - Nonce: acct.Nonce, - Balance: balance, - Root: acct.Root, - CodeHash: common.CopyBytes(acct.CodeHash), - } -} - -// SlimAccount is a modified version of an Account, where the root is replaced -// with a byte slice. This format can be used to represent full-consensus format -// or slim format which replaces the empty root and code hash as nil byte slice. -type SlimAccount struct { - Nonce uint64 - Balance *big.Int - Root []byte // Nil if root equals to types.EmptyRootHash - CodeHash []byte // Nil if hash equals to types.EmptyCodeHash -} - -// SlimAccountRLP encodes the state account in 'slim RLP' format. -func SlimAccountRLP(account StateAccount) []byte { - slim := SlimAccount{ - Nonce: account.Nonce, - Balance: account.Balance, - } - if account.Root != EmptyRootHash { - slim.Root = account.Root[:] - } - if !bytes.Equal(account.CodeHash, EmptyCodeHash[:]) { - slim.CodeHash = account.CodeHash - } - data, err := rlp.EncodeToBytes(slim) - if err != nil { - panic(err) - } - return data -} - -// FullAccount decodes the data on the 'slim RLP' format and return -// the consensus format account. -func FullAccount(data []byte) (*StateAccount, error) { - var slim SlimAccount - if err := rlp.DecodeBytes(data, &slim); err != nil { - return nil, err - } - var account StateAccount - account.Nonce, account.Balance = slim.Nonce, slim.Balance - - // Interpret the storage root and code hash in slim format. - if len(slim.Root) == 0 { - account.Root = EmptyRootHash - } else { - account.Root = common.BytesToHash(slim.Root) - } - if len(slim.CodeHash) == 0 { - account.CodeHash = EmptyCodeHash[:] - } else { - account.CodeHash = slim.CodeHash - } - return &account, nil -} - -// FullAccountRLP converts data on the 'slim RLP' format into the full RLP-format. -func FullAccountRLP(data []byte) ([]byte, error) { - account, err := FullAccount(data) - if err != nil { - return nil, err - } - return rlp.EncodeToBytes(account) -} diff --git a/core/types/transaction.go b/core/types/transaction.go index 0dcbf20c00..34c185a7b7 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -28,6 +28,7 @@ package types import ( "bytes" + "container/heap" "errors" "io" "math/big" @@ -298,10 +299,10 @@ func (tx *Transaction) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.g // GasFeeCap returns the fee cap per gas of the transaction. func (tx *Transaction) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.gasFeeCap()) } -// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise. +// BlobGas returns the data gas limit of the transaction for blob transactions, 0 otherwise. func (tx *Transaction) BlobGas() uint64 { return tx.inner.blobGas() } -// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise. +// BlobGasFeeCap returns the data gas fee cap per data gas of the transaction for blob transactions, nil otherwise. func (tx *Transaction) BlobGasFeeCap() *big.Int { return tx.inner.blobGasFeeCap() } // BlobHashes returns the hases of the blob commitments for blob transactions, nil otherwise. @@ -403,19 +404,6 @@ func (tx *Transaction) BlobGasFeeCapIntCmp(other *big.Int) int { return tx.inner.blobGasFeeCap().Cmp(other) } -// SetTime sets the decoding time of a transaction. This is used by tests to set -// arbitrary times and by persistent transaction pools when loading old txs from -// disk. -func (tx *Transaction) SetTime(t time.Time) { - tx.time = t -} - -// Time returns the time when the transaction was first seen on the network. It -// is a heuristic to prefer mining older txs vs new all other things equal. -func (tx *Transaction) Time() time.Time { - return tx.time -} - // Hash returns the transaction hash. func (tx *Transaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { @@ -461,6 +449,16 @@ func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, e return &Transaction{inner: cpy, time: tx.time}, nil } +// FirstSeen is the time a transaction is first seen. +func (tx *Transaction) FirstSeen() time.Time { + return tx.time +} + +// SetFirstSeen sets overwrites the time a transaction is first seen. +func (tx *Transaction) SetFirstSeen(t time.Time) { + tx.time = t +} + // Transactions implements DerivableList for transactions. type Transactions []*Transaction @@ -524,6 +522,123 @@ func (s TxByNonce) Len() int { return len(s) } func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() } func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// TxWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap +type TxWithMinerFee struct { + Tx *Transaction + minerFee *big.Int +} + +// NewTxWithMinerFee creates a wrapped transaction, calculating the effective +// miner gasTipCap if a base fee is provided. +// Returns error in case of a negative effective miner gasTipCap. +func NewTxWithMinerFee(tx *Transaction, baseFee *big.Int) (*TxWithMinerFee, error) { + minerFee, err := tx.EffectiveGasTip(baseFee) + if err != nil { + return nil, err + } + return &TxWithMinerFee{ + Tx: tx, + minerFee: minerFee, + }, nil +} + +// TxByPriceAndTime implements both the sort and the heap interface, making it useful +// for all at once sorting as well as individually adding and removing elements. +type TxByPriceAndTime []*TxWithMinerFee + +func (s TxByPriceAndTime) Len() int { return len(s) } +func (s TxByPriceAndTime) Less(i, j int) bool { + // If the prices are equal, use the time the transaction was first seen for + // deterministic sorting + cmp := s[i].minerFee.Cmp(s[j].minerFee) + if cmp == 0 { + return s[i].Tx.time.Before(s[j].Tx.time) + } + return cmp > 0 +} +func (s TxByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +func (s *TxByPriceAndTime) Push(x interface{}) { + *s = append(*s, x.(*TxWithMinerFee)) +} + +func (s *TxByPriceAndTime) Pop() interface{} { + old := *s + n := len(old) + x := old[n-1] + old[n-1] = nil + *s = old[0 : n-1] + return x +} + +// TransactionsByPriceAndNonce represents a set of transactions that can return +// transactions in a profit-maximizing sorted order, while supporting removing +// entire batches of transactions for non-executable accounts. +type TransactionsByPriceAndNonce struct { + txs map[common.Address]Transactions // Per account nonce-sorted list of transactions + heads TxByPriceAndTime // Next transaction for each unique account (price heap) + signer Signer // Signer for the set of transactions + baseFee *big.Int // Current base fee +} + +// NewTransactionsByPriceAndNonce creates a transaction set that can retrieve +// price sorted transactions in a nonce-honouring way. +// +// Note, the input map is reowned so the caller should not interact any more with +// if after providing it to the constructor. +func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transactions, baseFee *big.Int) *TransactionsByPriceAndNonce { + // Initialize a price and received time based heap with the head transactions + heads := make(TxByPriceAndTime, 0, len(txs)) + for from, accTxs := range txs { + acc, _ := Sender(signer, accTxs[0]) + wrapped, err := NewTxWithMinerFee(accTxs[0], baseFee) + // Remove transaction if sender doesn't match from, or if wrapping fails. + if acc != from || err != nil { + delete(txs, from) + continue + } + heads = append(heads, wrapped) + txs[from] = accTxs[1:] + } + heap.Init(&heads) + + // Assemble and return the transaction set + return &TransactionsByPriceAndNonce{ + txs: txs, + heads: heads, + signer: signer, + baseFee: baseFee, + } +} + +// Peek returns the next transaction by price. +func (t *TransactionsByPriceAndNonce) Peek() *Transaction { + if len(t.heads) == 0 { + return nil + } + return t.heads[0].Tx +} + +// Shift replaces the current best head with the next one from the same account. +func (t *TransactionsByPriceAndNonce) Shift() { + acc, _ := Sender(t.signer, t.heads[0].Tx) + if txs, ok := t.txs[acc]; ok && len(txs) > 0 { + if wrapped, err := NewTxWithMinerFee(txs[0], t.baseFee); err == nil { + t.heads[0], t.txs[acc] = wrapped, txs[1:] + heap.Fix(&t.heads, 0) + return + } + } + heap.Pop(&t.heads) +} + +// Pop removes the best transaction, *not* replacing it with the next one from +// the same account. This should be used when a transaction cannot be executed +// and hence all subsequent ones should be discarded from the same account. +func (t *TransactionsByPriceAndNonce) Pop() { + heap.Pop(&t.heads) +} + // copyAddressPtr copies an address. func copyAddressPtr(a *common.Address) *common.Address { if a == nil { diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go index 2437a5b2f3..4d0dd2331f 100644 --- a/core/types/transaction_marshalling.go +++ b/core/types/transaction_marshalling.go @@ -47,7 +47,7 @@ type txJSON struct { GasPrice *hexutil.Big `json:"gasPrice"` MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` - MaxFeePerBlobGas *hexutil.Big `json:"maxFeePerBlobGas,omitempty"` + MaxFeePerDataGas *hexutil.Big `json:"maxFeePerDataGas,omitempty"` Value *hexutil.Big `json:"value"` Input *hexutil.Bytes `json:"input"` AccessList *AccessList `json:"accessList,omitempty"` @@ -55,32 +55,11 @@ type txJSON struct { V *hexutil.Big `json:"v"` R *hexutil.Big `json:"r"` S *hexutil.Big `json:"s"` - YParity *hexutil.Uint64 `json:"yParity,omitempty"` // Only used for encoding: Hash common.Hash `json:"hash"` } -// yParityValue returns the YParity value from JSON. For backwards-compatibility reasons, -// this can be given in the 'v' field or the 'yParity' field. If both exist, they must match. -func (tx *txJSON) yParityValue() (*big.Int, error) { - if tx.YParity != nil { - val := uint64(*tx.YParity) - if val != 0 && val != 1 { - return nil, errors.New("'yParity' field must be 0 or 1") - } - bigval := new(big.Int).SetUint64(val) - if tx.V != nil && tx.V.ToInt().Cmp(bigval) != 0 { - return nil, errors.New("'v' and 'yParity' fields do not match") - } - return bigval, nil - } - if tx.V != nil { - return tx.V.ToInt(), nil - } - return nil, errors.New("missing 'yParity' or 'v' field in transaction") -} - // MarshalJSON marshals as JSON with a hash. func (tx *Transaction) MarshalJSON() ([]byte, error) { var enc txJSON @@ -100,9 +79,6 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(itx.V) enc.R = (*hexutil.Big)(itx.R) enc.S = (*hexutil.Big)(itx.S) - if tx.Protected() { - enc.ChainID = (*hexutil.Big)(tx.ChainId()) - } case *AccessListTx: enc.ChainID = (*hexutil.Big)(itx.ChainID) @@ -116,8 +92,6 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(itx.V) enc.R = (*hexutil.Big)(itx.R) enc.S = (*hexutil.Big)(itx.S) - yparity := itx.V.Uint64() - enc.YParity = (*hexutil.Uint64)(&yparity) case *DynamicFeeTx: enc.ChainID = (*hexutil.Big)(itx.ChainID) @@ -132,8 +106,6 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(itx.V) enc.R = (*hexutil.Big)(itx.R) enc.S = (*hexutil.Big)(itx.S) - yparity := itx.V.Uint64() - enc.YParity = (*hexutil.Uint64)(&yparity) case *BlobTx: enc.ChainID = (*hexutil.Big)(itx.ChainID.ToBig()) @@ -141,7 +113,7 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.Gas = (*hexutil.Uint64)(&itx.Gas) enc.MaxFeePerGas = (*hexutil.Big)(itx.GasFeeCap.ToBig()) enc.MaxPriorityFeePerGas = (*hexutil.Big)(itx.GasTipCap.ToBig()) - enc.MaxFeePerBlobGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig()) + enc.MaxFeePerDataGas = (*hexutil.Big)(itx.BlobFeeCap.ToBig()) enc.Value = (*hexutil.Big)(itx.Value.ToBig()) enc.Input = (*hexutil.Bytes)(&itx.Data) enc.AccessList = &itx.AccessList @@ -150,8 +122,6 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { enc.V = (*hexutil.Big)(itx.V.ToBig()) enc.R = (*hexutil.Big)(itx.R.ToBig()) enc.S = (*hexutil.Big)(itx.S.ToBig()) - yparity := itx.V.Uint64() - enc.YParity = (*hexutil.Uint64)(&yparity) } return json.Marshal(&enc) } @@ -159,8 +129,7 @@ func (tx *Transaction) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (tx *Transaction) UnmarshalJSON(input []byte) error { var dec txJSON - err := json.Unmarshal(input, &dec) - if err != nil { + if err := json.Unmarshal(input, &dec); err != nil { return err } @@ -193,23 +162,20 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'input' in transaction") } itx.Data = *dec.Input - - // signature R + if dec.V == nil { + return errors.New("missing required field 'v' in transaction") + } + itx.V = (*big.Int)(dec.V) if dec.R == nil { return errors.New("missing required field 'r' in transaction") } itx.R = (*big.Int)(dec.R) - // signature S if dec.S == nil { return errors.New("missing required field 's' in transaction") } itx.S = (*big.Int)(dec.S) - // signature V - if dec.V == nil { - return errors.New("missing required field 'v' in transaction") - } - itx.V = (*big.Int)(dec.V) - if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 { + withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 + if withSignature { if err := sanityCheckSignature(itx.V, itx.R, itx.S, true); err != nil { return err } @@ -245,26 +211,23 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'input' in transaction") } itx.Data = *dec.Input + if dec.V == nil { + return errors.New("missing required field 'v' in transaction") + } if dec.AccessList != nil { itx.AccessList = *dec.AccessList } - - // signature R + itx.V = (*big.Int)(dec.V) if dec.R == nil { return errors.New("missing required field 'r' in transaction") } itx.R = (*big.Int)(dec.R) - // signature S if dec.S == nil { return errors.New("missing required field 's' in transaction") } itx.S = (*big.Int)(dec.S) - // signature V - itx.V, err = dec.yParityValue() - if err != nil { - return err - } - if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 { + withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 + if withSignature { if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil { return err } @@ -310,23 +273,17 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { if dec.AccessList != nil { itx.AccessList = *dec.AccessList } - - // signature R + itx.V = (*big.Int)(dec.V) if dec.R == nil { return errors.New("missing required field 'r' in transaction") } itx.R = (*big.Int)(dec.R) - // signature S if dec.S == nil { return errors.New("missing required field 's' in transaction") } itx.S = (*big.Int)(dec.S) - // signature V - itx.V, err = dec.yParityValue() - if err != nil { - return err - } - if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 { + withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 + if withSignature { if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil { return err } @@ -343,10 +300,9 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'nonce' in transaction") } itx.Nonce = uint64(*dec.Nonce) - if dec.To == nil { - return errors.New("missing required field 'to' in transaction") + if dec.To != nil { + itx.To = dec.To } - itx.To = *dec.To if dec.Gas == nil { return errors.New("missing required field 'gas' for txdata") } @@ -359,10 +315,10 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'maxFeePerGas' for txdata") } itx.GasFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerGas)) - if dec.MaxFeePerBlobGas == nil { - return errors.New("missing required field 'maxFeePerBlobGas' for txdata") + if dec.MaxFeePerDataGas == nil { + return errors.New("missing required field 'maxFeePerDataGas' for txdata") } - itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerBlobGas)) + itx.BlobFeeCap = uint256.MustFromBig((*big.Int)(dec.MaxFeePerDataGas)) if dec.Value == nil { return errors.New("missing required field 'value' in transaction") } @@ -381,35 +337,18 @@ func (tx *Transaction) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'blobVersionedHashes' in transaction") } itx.BlobHashes = dec.BlobVersionedHashes - - // signature R - var ok bool + itx.V = uint256.MustFromBig((*big.Int)(dec.V)) if dec.R == nil { return errors.New("missing required field 'r' in transaction") } - itx.R, ok = uint256.FromBig((*big.Int)(dec.R)) - if !ok { - return errors.New("'r' value overflows uint256") - } - // signature S + itx.R = uint256.MustFromBig((*big.Int)(dec.R)) if dec.S == nil { return errors.New("missing required field 's' in transaction") } - itx.S, ok = uint256.FromBig((*big.Int)(dec.S)) - if !ok { - return errors.New("'s' value overflows uint256") - } - // signature V - vbig, err := dec.yParityValue() - if err != nil { - return err - } - itx.V, ok = uint256.FromBig(vbig) - if !ok { - return errors.New("'v' value overflows uint256") - } - if itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 { - if err := sanityCheckSignature(vbig, itx.R.ToBig(), itx.S.ToBig(), false); err != nil { + itx.S = uint256.MustFromBig((*big.Int)(dec.S)) + withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 + if withSignature { + if err := sanityCheckSignature(itx.V.ToBig(), itx.R.ToBig(), itx.S.ToBig(), false); err != nil { return err } } diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 25d727a186..1a69404f06 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -49,7 +49,7 @@ type sigCache struct { // MakeSigner returns a Signer based on the given chain config and block number or time. func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint64) Signer { switch { - case config.IsCancun(blockNumber, blockTime): + case config.IsCancun(blockTime): return NewCancunSigner(config.ChainID) case config.IsSubnetEVM(blockTime): return NewLondonSigner(config.ChainID) @@ -63,9 +63,9 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int, blockTime uint } // LatestSigner returns the 'most permissive' Signer available for the given chain -// configuration. Specifically, this enables support of all types of transacrions -// when their respective forks are scheduled to occur at any block number (or time) -// in the chain config. +// configuration. Specifically, this enables support of EIP-155 replay protection and +// EIP-2930 access list transactions when their respective forks are scheduled to occur at +// any block number in the chain config. // // Use this in transaction-handling code where the current block number is unknown. If you // have the current block number available, use MakeSigner instead. diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 1e583b90f4..995afbb9a0 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -33,8 +33,10 @@ import ( "errors" "fmt" "math/big" + "math/rand" "reflect" "testing" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -267,6 +269,152 @@ func TestRecipientNormal(t *testing.T) { } } +func TestTransactionPriceNonceSortLegacy(t *testing.T) { + testTransactionPriceNonceSort(t, nil) +} + +func TestTransactionPriceNonceSort1559(t *testing.T) { + testTransactionPriceNonceSort(t, big.NewInt(0)) + testTransactionPriceNonceSort(t, big.NewInt(5)) + testTransactionPriceNonceSort(t, big.NewInt(50)) +} + +// Tests that transactions can be correctly sorted according to their price in +// decreasing order, but at the same time with increasing nonces when issued by +// the same account. +func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { + // Generate a batch of accounts to start with + keys := make([]*ecdsa.PrivateKey, 25) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + } + signer := LatestSignerForChainID(common.Big1) + + // Generate a batch of transactions with overlapping values, but shifted nonces + groups := map[common.Address]Transactions{} + expectedCount := 0 + for start, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + count := 25 + for i := 0; i < 25; i++ { + var tx *Transaction + gasFeeCap := rand.Intn(50) + if baseFee == nil { + tx = NewTx(&LegacyTx{ + Nonce: uint64(start + i), + To: &common.Address{}, + Value: big.NewInt(100), + Gas: 100, + GasPrice: big.NewInt(int64(gasFeeCap)), + Data: nil, + }) + } else { + tx = NewTx(&DynamicFeeTx{ + Nonce: uint64(start + i), + To: &common.Address{}, + Value: big.NewInt(100), + Gas: 100, + GasFeeCap: big.NewInt(int64(gasFeeCap)), + GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))), + Data: nil, + }) + if count == 25 && int64(gasFeeCap) < baseFee.Int64() { + count = i + } + } + tx, err := SignTx(tx, signer, key) + if err != nil { + t.Fatalf("failed to sign tx: %s", err) + } + groups[addr] = append(groups[addr], tx) + } + expectedCount += count + } + // Sort the transactions and cross check the nonce ordering + txset := NewTransactionsByPriceAndNonce(signer, groups, baseFee) + + txs := Transactions{} + for tx := txset.Peek(); tx != nil; tx = txset.Peek() { + txs = append(txs, tx) + txset.Shift() + } + if len(txs) != expectedCount { + t.Errorf("expected %d transactions, found %d", expectedCount, len(txs)) + } + for i, txi := range txs { + fromi, _ := Sender(signer, txi) + + // Make sure the nonce order is valid + for j, txj := range txs[i+1:] { + fromj, _ := Sender(signer, txj) + if fromi == fromj && txi.Nonce() > txj.Nonce() { + t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce()) + } + } + // If the next tx has different from account, the price must be lower than the current one + if i+1 < len(txs) { + next := txs[i+1] + fromNext, _ := Sender(signer, next) + tip, err := txi.EffectiveGasTip(baseFee) + nextTip, nextErr := next.EffectiveGasTip(baseFee) + if err != nil || nextErr != nil { + t.Errorf("error calculating effective tip") + } + if fromi != fromNext && tip.Cmp(nextTip) < 0 { + t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) + } + } + } +} + +// Tests that if multiple transactions have the same price, the ones seen earlier +// are prioritized to avoid network spam attacks aiming for a specific ordering. +func TestTransactionTimeSort(t *testing.T) { + // Generate a batch of accounts to start with + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + } + signer := HomesteadSigner{} + + // Generate a batch of transactions with overlapping prices, but different creation times + groups := map[common.Address]Transactions{} + for start, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + + tx, _ := SignTx(NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key) + tx.time = time.Unix(0, int64(len(keys)-start)) + + groups[addr] = append(groups[addr], tx) + } + // Sort the transactions and cross check the nonce ordering + txset := NewTransactionsByPriceAndNonce(signer, groups, nil) + + txs := Transactions{} + for tx := txset.Peek(); tx != nil; tx = txset.Peek() { + txs = append(txs, tx) + txset.Shift() + } + if len(txs) != len(keys) { + t.Errorf("expected %d transactions, found %d", len(keys), len(txs)) + } + for i, txi := range txs { + fromi, _ := Sender(signer, txi) + if i+1 < len(txs) { + next := txs[i+1] + fromNext, _ := Sender(signer, next) + + if txi.GasPrice().Cmp(next.GasPrice()) < 0 { + t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) + } + // Make sure time order is ascending if the txs have the same gas price + if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.time.After(next.time) { + t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.time, i+1, fromNext[:4], next.time) + } + } + } +} + // TestTransactionCoding tests serializing/de-serializing to/from rlp and JSON. func TestTransactionCoding(t *testing.T) { key, err := crypto.GenerateKey() @@ -388,7 +536,7 @@ func assertEqual(orig *Transaction, cpy *Transaction) error { } if orig.AccessList() != nil { if !reflect.DeepEqual(orig.AccessList(), cpy.AccessList()) { - return errors.New("access list wrong!") + return fmt.Errorf("access list wrong!") } } return nil diff --git a/core/types/tx_blob.go b/core/types/tx_blob.go index f97b1bf3c5..7ae956355f 100644 --- a/core/types/tx_blob.go +++ b/core/types/tx_blob.go @@ -31,11 +31,11 @@ type BlobTx struct { GasTipCap *uint256.Int // a.k.a. maxPriorityFeePerGas GasFeeCap *uint256.Int // a.k.a. maxFeePerGas Gas uint64 - To common.Address + To *common.Address `rlp:"nil"` // nil means contract creation Value *uint256.Int Data []byte AccessList AccessList - BlobFeeCap *uint256.Int // a.k.a. maxFeePerBlobGas + BlobFeeCap *uint256.Int // a.k.a. maxFeePerDataGas BlobHashes []common.Hash // Signature values @@ -48,7 +48,7 @@ type BlobTx struct { func (tx *BlobTx) copy() TxData { cpy := &BlobTx{ Nonce: tx.Nonce, - To: tx.To, + To: copyAddressPtr(tx.To), Data: common.CopyBytes(tx.Data), Gas: tx.Gas, // These are copied below. @@ -104,8 +104,8 @@ func (tx *BlobTx) gasTipCap() *big.Int { return tx.GasTipCap.ToBig() } func (tx *BlobTx) gasPrice() *big.Int { return tx.GasFeeCap.ToBig() } func (tx *BlobTx) value() *big.Int { return tx.Value.ToBig() } func (tx *BlobTx) nonce() uint64 { return tx.Nonce } -func (tx *BlobTx) to() *common.Address { tmp := tx.To; return &tmp } -func (tx *BlobTx) blobGas() uint64 { return params.BlobTxBlobGasPerBlob * uint64(len(tx.BlobHashes)) } +func (tx *BlobTx) to() *common.Address { return tx.To } +func (tx *BlobTx) blobGas() uint64 { return params.BlobTxDataGasPerBlob * uint64(len(tx.BlobHashes)) } func (tx *BlobTx) blobGasFeeCap() *big.Int { return tx.BlobFeeCap.ToBig() } func (tx *BlobTx) blobHashes() []common.Hash { return tx.BlobHashes } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 879137a10a..aac80bf47e 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -43,7 +43,6 @@ import ( "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bls12381" "github.com/ethereum/go-ethereum/crypto/bn256" - "github.com/ethereum/go-ethereum/crypto/kzg4844" "golang.org/x/crypto/ripemd160" ) @@ -105,21 +104,6 @@ var PrecompiledContractsBerlin = map[common.Address]contract.StatefulPrecompiled common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}), } -// PrecompiledContractsCancun contains the default set of pre-compiled Ethereum -// contracts used in the Cancun release. -var PrecompiledContractsCancun = map[common.Address]contract.StatefulPrecompiledContract{ - common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), - common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), - common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), - common.BytesToAddress([]byte{4}): newWrappedPrecompiledContract(&dataCopy{}), - common.BytesToAddress([]byte{5}): newWrappedPrecompiledContract(&bigModExp{eip2565: true}), - common.BytesToAddress([]byte{6}): newWrappedPrecompiledContract(&bn256AddIstanbul{}), - common.BytesToAddress([]byte{7}): newWrappedPrecompiledContract(&bn256ScalarMulIstanbul{}), - common.BytesToAddress([]byte{8}): newWrappedPrecompiledContract(&bn256PairingIstanbul{}), - common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}), - common.BytesToAddress([]byte{0x0a}): newWrappedPrecompiledContract(&kzgPointEvaluation{}), -} - // PrecompiledContractsBLS contains the set of pre-compiled Ethereum // contracts specified in EIP-2537. These are exported for testing purposes. var PrecompiledContractsBLS = map[common.Address]contract.StatefulPrecompiledContract{ @@ -135,7 +119,6 @@ var PrecompiledContractsBLS = map[common.Address]contract.StatefulPrecompiledCon } var ( - PrecompiledAddressesCancun []common.Address PrecompiledAddressesBerlin []common.Address PrecompiledAddressesIstanbul []common.Address PrecompiledAddressesByzantium []common.Address @@ -157,9 +140,6 @@ func init() { for k := range PrecompiledContractsBerlin { PrecompiledAddressesBerlin = append(PrecompiledAddressesBerlin, k) } - for k := range PrecompiledContractsCancun { - PrecompiledAddressesCancun = append(PrecompiledAddressesCancun, k) - } for k := range PrecompiledContractsBLS { PrecompiledAddressesBLS = append(PrecompiledAddressesBLS, k) } @@ -170,7 +150,6 @@ func init() { addrsList := append(PrecompiledAddressesHomestead, PrecompiledAddressesByzantium...) addrsList = append(addrsList, PrecompiledAddressesIstanbul...) addrsList = append(addrsList, PrecompiledAddressesBerlin...) - addrsList = append(addrsList, PrecompiledAddressesCancun...) addrsList = append(addrsList, PrecompiledAddressesBLS...) for _, k := range addrsList { PrecompileAllNativeAddresses[k] = struct{}{} @@ -189,8 +168,6 @@ func init() { // ActivePrecompiles returns the precompiles enabled with the current configuration. func ActivePrecompiles(rules params.Rules) []common.Address { switch { - case rules.IsCancun: - return PrecompiledAddressesCancun case rules.IsSubnetEVM: return PrecompiledAddressesBerlin case rules.IsIstanbul: @@ -1110,67 +1087,3 @@ func (c *bls12381MapG2) Run(input []byte) ([]byte, error) { // Encode the G2 point to 256 bytes return g.EncodePoint(r), nil } - -// kzgPointEvaluation implements the EIP-4844 point evaluation precompile. -type kzgPointEvaluation struct{} - -// RequiredGas estimates the gas required for running the point evaluation precompile. -func (b *kzgPointEvaluation) RequiredGas(input []byte) uint64 { - return params.BlobTxPointEvaluationPrecompileGas -} - -const ( - blobVerifyInputLength = 192 // Max input length for the point evaluation precompile. - blobCommitmentVersionKZG uint8 = 0x01 // Version byte for the point evaluation precompile. - blobPrecompileReturnValue = "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001" -) - -var ( - errBlobVerifyInvalidInputLength = errors.New("invalid input length") - errBlobVerifyMismatchedVersion = errors.New("mismatched versioned hash") - errBlobVerifyKZGProof = errors.New("error verifying kzg proof") -) - -// Run executes the point evaluation precompile. -func (b *kzgPointEvaluation) Run(input []byte) ([]byte, error) { - if len(input) != blobVerifyInputLength { - return nil, errBlobVerifyInvalidInputLength - } - // versioned hash: first 32 bytes - var versionedHash common.Hash - copy(versionedHash[:], input[:]) - - var ( - point kzg4844.Point - claim kzg4844.Claim - ) - // Evaluation point: next 32 bytes - copy(point[:], input[32:]) - // Expected output: next 32 bytes - copy(claim[:], input[64:]) - - // input kzg point: next 48 bytes - var commitment kzg4844.Commitment - copy(commitment[:], input[96:]) - if kZGToVersionedHash(commitment) != versionedHash { - return nil, errBlobVerifyMismatchedVersion - } - - // Proof: next 48 bytes - var proof kzg4844.Proof - copy(proof[:], input[144:]) - - if err := kzg4844.VerifyProof(commitment, point, claim, proof); err != nil { - return nil, fmt.Errorf("%w: %v", errBlobVerifyKZGProof, err) - } - - return common.Hex2Bytes(blobPrecompileReturnValue), nil -} - -// kZGToVersionedHash implements kzg_to_versioned_hash from EIP-4844 -func kZGToVersionedHash(kzg kzg4844.Commitment) common.Hash { - h := sha256.Sum256(kzg[:]) - h[0] = blobCommitmentVersionKZG - - return h -} diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index 8608fdf36d..e4114a046e 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -66,17 +66,15 @@ var allPrecompiles = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, common.BytesToAddress([]byte{9}): &blake2F{}, - common.BytesToAddress([]byte{0x0a}): &kzgPointEvaluation{}, - - common.BytesToAddress([]byte{0x0f, 0x0a}): &bls12381G1Add{}, - common.BytesToAddress([]byte{0x0f, 0x0b}): &bls12381G1Mul{}, - common.BytesToAddress([]byte{0x0f, 0x0c}): &bls12381G1MultiExp{}, - common.BytesToAddress([]byte{0x0f, 0x0d}): &bls12381G2Add{}, - common.BytesToAddress([]byte{0x0f, 0x0e}): &bls12381G2Mul{}, - common.BytesToAddress([]byte{0x0f, 0x0f}): &bls12381G2MultiExp{}, - common.BytesToAddress([]byte{0x0f, 0x10}): &bls12381Pairing{}, - common.BytesToAddress([]byte{0x0f, 0x11}): &bls12381MapG1{}, - common.BytesToAddress([]byte{0x0f, 0x12}): &bls12381MapG2{}, + common.BytesToAddress([]byte{10}): &bls12381G1Add{}, + common.BytesToAddress([]byte{11}): &bls12381G1Mul{}, + common.BytesToAddress([]byte{12}): &bls12381G1MultiExp{}, + common.BytesToAddress([]byte{13}): &bls12381G2Add{}, + common.BytesToAddress([]byte{14}): &bls12381G2Mul{}, + common.BytesToAddress([]byte{15}): &bls12381G2MultiExp{}, + common.BytesToAddress([]byte{16}): &bls12381Pairing{}, + common.BytesToAddress([]byte{17}): &bls12381MapG1{}, + common.BytesToAddress([]byte{18}): &bls12381MapG2{}, } // EIP-152 test vectors @@ -314,38 +312,36 @@ func benchJson(name, addr string, b *testing.B) { } } -func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "f0a", t) } -func TestPrecompiledBLS12381G1Mul(t *testing.T) { testJson("blsG1Mul", "f0b", t) } -func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "f0c", t) } -func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "f0d", t) } -func TestPrecompiledBLS12381G2Mul(t *testing.T) { testJson("blsG2Mul", "f0e", t) } -func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "f0f", t) } -func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "f10", t) } -func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "f11", t) } -func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "f12", t) } - -func TestPrecompiledPointEvaluation(t *testing.T) { testJson("pointEvaluation", "0a", t) } - -func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "f0a", b) } -func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "f0b", b) } -func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "f0c", b) } -func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "f0d", b) } -func BenchmarkPrecompiledBLS12381G2Mul(b *testing.B) { benchJson("blsG2Mul", "f0e", b) } -func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "f0f", b) } -func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "f10", b) } -func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "f11", b) } -func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "f12", b) } +func TestPrecompiledBLS12381G1Add(t *testing.T) { testJson("blsG1Add", "0a", t) } +func TestPrecompiledBLS12381G1Mul(t *testing.T) { testJson("blsG1Mul", "0b", t) } +func TestPrecompiledBLS12381G1MultiExp(t *testing.T) { testJson("blsG1MultiExp", "0c", t) } +func TestPrecompiledBLS12381G2Add(t *testing.T) { testJson("blsG2Add", "0d", t) } +func TestPrecompiledBLS12381G2Mul(t *testing.T) { testJson("blsG2Mul", "0e", t) } +func TestPrecompiledBLS12381G2MultiExp(t *testing.T) { testJson("blsG2MultiExp", "0f", t) } +func TestPrecompiledBLS12381Pairing(t *testing.T) { testJson("blsPairing", "10", t) } +func TestPrecompiledBLS12381MapG1(t *testing.T) { testJson("blsMapG1", "11", t) } +func TestPrecompiledBLS12381MapG2(t *testing.T) { testJson("blsMapG2", "12", t) } + +func BenchmarkPrecompiledBLS12381G1Add(b *testing.B) { benchJson("blsG1Add", "0a", b) } +func BenchmarkPrecompiledBLS12381G1Mul(b *testing.B) { benchJson("blsG1Mul", "0b", b) } +func BenchmarkPrecompiledBLS12381G1MultiExp(b *testing.B) { benchJson("blsG1MultiExp", "0c", b) } +func BenchmarkPrecompiledBLS12381G2Add(b *testing.B) { benchJson("blsG2Add", "0d", b) } +func BenchmarkPrecompiledBLS12381G2Mul(b *testing.B) { benchJson("blsG2Mul", "0e", b) } +func BenchmarkPrecompiledBLS12381G2MultiExp(b *testing.B) { benchJson("blsG2MultiExp", "0f", b) } +func BenchmarkPrecompiledBLS12381Pairing(b *testing.B) { benchJson("blsPairing", "10", b) } +func BenchmarkPrecompiledBLS12381MapG1(b *testing.B) { benchJson("blsMapG1", "11", b) } +func BenchmarkPrecompiledBLS12381MapG2(b *testing.B) { benchJson("blsMapG2", "12", b) } // Failure tests -func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "f0a", t) } -func TestPrecompiledBLS12381G1MulFail(t *testing.T) { testJsonFail("blsG1Mul", "f0b", t) } -func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "f0c", t) } -func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "f0d", t) } -func TestPrecompiledBLS12381G2MulFail(t *testing.T) { testJsonFail("blsG2Mul", "f0e", t) } -func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "f0f", t) } -func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "f10", t) } -func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "f11", t) } -func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "f12", t) } +func TestPrecompiledBLS12381G1AddFail(t *testing.T) { testJsonFail("blsG1Add", "0a", t) } +func TestPrecompiledBLS12381G1MulFail(t *testing.T) { testJsonFail("blsG1Mul", "0b", t) } +func TestPrecompiledBLS12381G1MultiExpFail(t *testing.T) { testJsonFail("blsG1MultiExp", "0c", t) } +func TestPrecompiledBLS12381G2AddFail(t *testing.T) { testJsonFail("blsG2Add", "0d", t) } +func TestPrecompiledBLS12381G2MulFail(t *testing.T) { testJsonFail("blsG2Mul", "0e", t) } +func TestPrecompiledBLS12381G2MultiExpFail(t *testing.T) { testJsonFail("blsG2MultiExp", "0f", t) } +func TestPrecompiledBLS12381PairingFail(t *testing.T) { testJsonFail("blsPairing", "10", t) } +func TestPrecompiledBLS12381MapG1Fail(t *testing.T) { testJsonFail("blsMapG1", "11", t) } +func TestPrecompiledBLS12381MapG2Fail(t *testing.T) { testJsonFail("blsMapG2", "12", t) } func loadJson(name string) ([]precompiledTest, error) { data, err := os.ReadFile(fmt.Sprintf("testdata/precompiles/%v.json", name)) diff --git a/core/vm/eips.go b/core/vm/eips.go index 3a96d275fb..254062d09a 100644 --- a/core/vm/eips.go +++ b/core/vm/eips.go @@ -37,8 +37,6 @@ import ( ) var activators = map[int]func(*JumpTable){ - 5656: enable5656, - 6780: enable6780, 3855: enable3855, 3860: enable3860, 3198: enable3198, @@ -238,69 +236,9 @@ func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by return nil, nil } -// enable3860 enables "EIP-3860: Limit and meter initcode" +// ebnable3860 enables "EIP-3860: Limit and meter initcode" // https://eips.ethereum.org/EIPS/eip-3860 func enable3860(jt *JumpTable) { jt[CREATE].dynamicGas = gasCreateEip3860 jt[CREATE2].dynamicGas = gasCreate2Eip3860 } - -// enable5656 enables EIP-5656 (MCOPY opcode) -// https://eips.ethereum.org/EIPS/eip-5656 -func enable5656(jt *JumpTable) { - jt[MCOPY] = &operation{ - execute: opMcopy, - constantGas: GasFastestStep, - dynamicGas: gasMcopy, - minStack: minStack(3, 0), - maxStack: maxStack(3, 0), - memorySize: memoryMcopy, - } -} - -// opMcopy implements the MCOPY opcode (https://eips.ethereum.org/EIPS/eip-5656) -func opMcopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - var ( - dst = scope.Stack.pop() - src = scope.Stack.pop() - length = scope.Stack.pop() - ) - // These values are checked for overflow during memory expansion calculation - // (the memorySize function on the opcode). - scope.Memory.Copy(dst.Uint64(), src.Uint64(), length.Uint64()) - return nil, nil -} - -// opBlobHash implements the BLOBHASH opcode -func opBlobHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - index := scope.Stack.peek() - if index.LtUint64(uint64(len(interpreter.evm.TxContext.BlobHashes))) { - blobHash := interpreter.evm.TxContext.BlobHashes[index.Uint64()] - index.SetBytes32(blobHash[:]) - } else { - index.Clear() - } - return nil, nil -} - -// enable4844 applies EIP-4844 (DATAHASH opcode) -func enable4844(jt *JumpTable) { - // New opcode - jt[BLOBHASH] = &operation{ - execute: opBlobHash, - constantGas: GasFastestStep, - minStack: minStack(1, 1), - maxStack: maxStack(1, 1), - } -} - -// enable6780 applies EIP-6780 (deactivate SELFDESTRUCT) -func enable6780(jt *JumpTable) { - jt[SELFDESTRUCT] = &operation{ - execute: opSelfdestruct6780, - dynamicGas: gasSelfdestructEIP3529, - constantGas: params.SelfdestructGasEIP150, - minStack: minStack(1, 0), - maxStack: maxStack(1, 0), - } -} diff --git a/core/vm/evm.go b/core/vm/evm.go index 6a44887f5e..45394be102 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -33,7 +33,6 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/subnet-evm/constants" - "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/precompile/contract" "github.com/ava-labs/subnet-evm/precompile/contracts/deployerallowlist" @@ -61,6 +60,10 @@ func IsProhibited(addr common.Address) bool { return modules.ReservedAddress(addr) } +// emptyCodeHash is used by create to ensure deployment is disallowed to already +// deployed contract addresses (relevant after the account abstraction). +var emptyCodeHash = crypto.Keccak256Hash(nil) + type ( // CanTransferFunc is the signature of a transfer guard function CanTransferFunc func(StateDB, common.Address, *big.Int) bool @@ -74,8 +77,6 @@ type ( func (evm *EVM) precompile(addr common.Address) (contract.StatefulPrecompiledContract, bool) { var precompiles map[common.Address]contract.StatefulPrecompiledContract switch { - case evm.chainRules.IsCancun: - precompiles = PrecompiledContractsCancun case evm.chainRules.IsSubnetEVM: precompiles = PrecompiledContractsBerlin case evm.chainRules.IsIstanbul: @@ -116,13 +117,12 @@ type BlockContext struct { PredicateResults *predicate.Results // Block information - Coinbase common.Address // Provides information for COINBASE - GasLimit uint64 // Provides information for GASLIMIT - BlockNumber *big.Int // Provides information for NUMBER - Time uint64 // Provides information for TIME - Difficulty *big.Int // Provides information for DIFFICULTY - BaseFee *big.Int // Provides information for BASEFEE - ExcessBlobGas *uint64 // ExcessBlobGas field in the header, needed to compute the data + Coinbase common.Address // Provides information for COINBASE + GasLimit uint64 // Provides information for GASLIMIT + BlockNumber *big.Int // Provides information for NUMBER + Time uint64 // Provides information for TIME + Difficulty *big.Int // Provides information for DIFFICULTY + BaseFee *big.Int // Provides information for BASEFEE } func (b *BlockContext) Number() *big.Int { @@ -144,9 +144,8 @@ func (b *BlockContext) GetPredicateResults(txHash common.Hash, address common.Ad // All fields can change between transactions. type TxContext struct { // Message information - Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE - BlobHashes []common.Hash // Provides information for BLOBHASH + Origin common.Address // Provides information for ORIGIN + GasPrice *big.Int // Provides information for GASPRICE } // EVM is the Ethereum Virtual Machine base object and provides @@ -194,7 +193,7 @@ func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig StateDB: statedb, Config: config, chainConfig: chainConfig, - chainRules: chainConfig.Rules(blockCtx.BlockNumber, blockCtx.Time), + chainRules: chainConfig.AvalancheRules(blockCtx.BlockNumber, blockCtx.Time), } evm.interpreter = NewEVMInterpreter(evm) return evm @@ -242,8 +241,7 @@ func (evm *EVM) Interpreter() *EVMInterpreter { func (evm *EVM) SetBlockContext(blockCtx BlockContext) { evm.Context = blockCtx num := blockCtx.BlockNumber - timestamp := blockCtx.Time - evm.chainRules = evm.chainConfig.Rules(num, timestamp) + evm.chainRules = evm.chainConfig.AvalancheRules(num, blockCtx.Time) } // Call executes the contract associated with the addr with the given input as @@ -528,7 +526,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, } // Ensure there's no existing contract already at the designated address contractHash := evm.StateDB.GetCodeHash(address) - if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != types.EmptyCodeHash) { + if evm.StateDB.GetNonce(address) != 0 || (contractHash != (common.Hash{}) && contractHash != emptyCodeHash) { return nil, common.Address{}, 0, vmerrs.ErrContractAddressCollision } // If the allow list is enabled, check that [evm.TxContext.Origin] has permission to deploy a contract. diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index a8bf40a326..36f0251301 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -71,7 +71,6 @@ func memoryGasCost(mem *Memory, newMemSize uint64) (uint64, error) { // as argument: // CALLDATACOPY (stack position 2) // CODECOPY (stack position 2) -// MCOPY (stack position 2) // EXTCODECOPY (stack position 3) // RETURNDATACOPY (stack position 2) func memoryCopierGas(stackpos int) gasFunc { @@ -101,7 +100,6 @@ func memoryCopierGas(stackpos int) gasFunc { var ( gasCallDataCopy = memoryCopierGas(2) gasCodeCopy = memoryCopierGas(2) - gasMcopy = memoryCopierGas(2) gasExtCodeCopy = memoryCopierGas(3) gasReturnDataCopy = memoryCopierGas(2) ) @@ -483,7 +481,7 @@ func gasSelfdestruct(evm *EVM, contract *Contract, stack *Stack, mem *Memory, me } } - if !evm.StateDB.HasSelfDestructed(contract.Address()) { + if !evm.StateDB.HasSuicided(contract.Address()) { evm.StateDB.AddRefund(params.SelfdestructRefundGas) } return gas, nil diff --git a/core/vm/instructions.go b/core/vm/instructions.go index d36de0f053..e02712b887 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -418,7 +418,7 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // emptyCodeHash. If the precompile account is not transferred any amount on a private or // customized chain, the return value will be zero. // -// 5. Caller tries to get the code hash for an account which is marked as self-destructed +// 5. Caller tries to get the code hash for an account which is marked as suicided // in the current transaction, the code hash of this account should be returned. // // 6. Caller tries to get the code hash for an account which is marked as deleted, this @@ -824,23 +824,7 @@ func opSelfdestruct(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext beneficiary := scope.Stack.pop() balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) - interpreter.evm.StateDB.SelfDestruct(scope.Contract.Address()) - if tracer := interpreter.evm.Config.Tracer; tracer != nil { - tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) - tracer.CaptureExit([]byte{}, 0, nil) - } - return nil, errStopToken -} - -func opSelfdestruct6780(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { - if interpreter.readOnly { - return nil, vmerrs.ErrWriteProtection - } - beneficiary := scope.Stack.pop() - balance := interpreter.evm.StateDB.GetBalance(scope.Contract.Address()) - interpreter.evm.StateDB.SubBalance(scope.Contract.Address(), balance) - interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) - interpreter.evm.StateDB.Selfdestruct6780(scope.Contract.Address()) + interpreter.evm.StateDB.Suicide(scope.Contract.Address()) if tracer := interpreter.evm.Config.Tracer; tracer != nil { tracer.CaptureEnter(SELFDESTRUCT, scope.Contract.Address(), beneficiary.Bytes20(), []byte{}, 0, balance) tracer.CaptureExit([]byte{}, 0, nil) diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index ce36b18bc8..ca8f46e146 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -32,16 +32,13 @@ import ( "fmt" "math/big" "os" - "strings" "testing" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/state" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/params" - "github.com/ava-labs/subnet-evm/vmerrs" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" "github.com/holiman/uint256" ) @@ -739,7 +736,7 @@ func TestRandom(t *testing.T) { for _, tt := range []testcase{ {name: "empty hash", random: common.Hash{}}, {name: "1", random: common.Hash{0}}, - {name: "emptyCodeHash", random: types.EmptyCodeHash}, + {name: "emptyCodeHash", random: emptyCodeHash}, {name: "hash(0x010203)", random: crypto.Keccak256Hash([]byte{0x01, 0x02, 0x03})}, } { var ( @@ -763,183 +760,3 @@ func TestRandom(t *testing.T) { } } } - -func TestBlobHash(t *testing.T) { - type testcase struct { - name string - idx uint64 - expect common.Hash - hashes []common.Hash - } - var ( - zero = common.Hash{0} - one = common.Hash{1} - two = common.Hash{2} - three = common.Hash{3} - ) - for _, tt := range []testcase{ - {name: "[{1}]", idx: 0, expect: one, hashes: []common.Hash{one}}, - {name: "[1,{2},3]", idx: 2, expect: three, hashes: []common.Hash{one, two, three}}, - {name: "out-of-bounds (empty)", idx: 10, expect: zero, hashes: []common.Hash{}}, - {name: "out-of-bounds", idx: 25, expect: zero, hashes: []common.Hash{one, two, three}}, - {name: "out-of-bounds (nil)", idx: 25, expect: zero, hashes: nil}, - } { - var ( - env = NewEVM(BlockContext{}, TxContext{BlobHashes: tt.hashes}, nil, params.TestChainConfig, Config{}) - stack = newstack() - pc = uint64(0) - evmInterpreter = env.interpreter - ) - stack.push(uint256.NewInt(tt.idx)) - opBlobHash(&pc, evmInterpreter, &ScopeContext{nil, stack, nil}) - if len(stack.data) != 1 { - t.Errorf("Expected one item on stack after %v, got %d: ", tt.name, len(stack.data)) - } - actual := stack.pop() - expected, overflow := uint256.FromBig(new(big.Int).SetBytes(tt.expect.Bytes())) - if overflow { - t.Errorf("Testcase %v: invalid overflow", tt.name) - } - if actual.Cmp(expected) != 0 { - t.Errorf("Testcase %v: expected %x, got %x", tt.name, expected, actual) - } - } -} - -func TestOpMCopy(t *testing.T) { - // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases - for i, tc := range []struct { - dst, src, len string - pre string - want string - wantGas uint64 - }{ - { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0. - dst: "0x0", src: "0x20", len: "0x20", - pre: "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", - want: "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", - wantGas: 6, - }, - - { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0. - dst: "0x0", src: "0x0", len: "0x20", - pre: "0101010101010101010101010101010101010101010101010101010101010101", - want: "0101010101010101010101010101010101010101010101010101010101010101", - wantGas: 6, - }, - { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping). - dst: "0x0", src: "0x1", len: "0x8", - pre: "000102030405060708 000000000000000000000000000000000000000000000000", - want: "010203040506070808 000000000000000000000000000000000000000000000000", - wantGas: 6, - }, - { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping). - dst: "0x1", src: "0x0", len: "0x8", - pre: "000102030405060708 000000000000000000000000000000000000000000000000", - want: "000001020304050607 000000000000000000000000000000000000000000000000", - wantGas: 6, - }, - // Tests below are not in the EIP, but maybe should be added - { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping). - dst: "0xFFFFFFFFFFFF", src: "0xFFFFFFFFFFFF", len: "0x0", - pre: "11", - want: "11", - wantGas: 3, - }, - { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds. - dst: "0xFFFFFFFFFFFF", src: "0x0", len: "0x0", - pre: "11", - want: "11", - wantGas: 3, - }, - { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem - dst: "0x0", src: "0xFFFFFFFFFFFF", len: "0x0", - pre: "11", - want: "11", - wantGas: 3, - }, - { // MCOPY - copy 1 from space outside of uint64 space - dst: "0x0", src: "0x10000000000000000", len: "0x1", - pre: "0", - }, - { // MCOPY - copy 1 from 0 to space outside of uint64 - dst: "0x10000000000000000", src: "0x0", len: "0x1", - pre: "0", - }, - { // MCOPY - copy nothing from 0 to space outside of uint64 - dst: "0x10000000000000000", src: "0x0", len: "0x0", - pre: "", - want: "", - wantGas: 3, - }, - { // MCOPY - copy 1 from 0x20 to 0x10, with no prior allocated mem - dst: "0x10", src: "0x20", len: "0x1", - pre: "", - // 64 bytes - want: "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - wantGas: 12, - }, - { // MCOPY - copy 1 from 0x19 to 0x10, with no prior allocated mem - dst: "0x10", src: "0x19", len: "0x1", - pre: "", - // 32 bytes - want: "0x0000000000000000000000000000000000000000000000000000000000000000", - wantGas: 9, - }, - } { - var ( - env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) - stack = newstack() - pc = uint64(0) - evmInterpreter = env.interpreter - ) - data := common.FromHex(strings.ReplaceAll(tc.pre, " ", "")) - // Set pre - mem := NewMemory() - mem.Resize(uint64(len(data))) - mem.Set(0, uint64(len(data)), data) - // Push stack args - len, _ := uint256.FromHex(tc.len) - src, _ := uint256.FromHex(tc.src) - dst, _ := uint256.FromHex(tc.dst) - - stack.push(len) - stack.push(src) - stack.push(dst) - wantErr := (tc.wantGas == 0) - // Calc mem expansion - var memorySize uint64 - if memSize, overflow := memoryMcopy(stack); overflow { - if wantErr { - continue - } - t.Errorf("overflow") - } else { - var overflow bool - if memorySize, overflow = math.SafeMul(toWordSize(memSize), 32); overflow { - t.Error(vmerrs.ErrGasUintOverflow) - } - } - // and the dynamic cost - var haveGas uint64 - if dynamicCost, err := gasMcopy(env, nil, stack, mem, memorySize); err != nil { - t.Error(err) - } else { - haveGas = GasFastestStep + dynamicCost - } - // Expand mem - if memorySize > 0 { - mem.Resize(memorySize) - } - // Do the copy - opMcopy(&pc, evmInterpreter, &ScopeContext{mem, stack, nil}) - want := common.FromHex(strings.ReplaceAll(tc.want, " ", "")) - if have := mem.store; !bytes.Equal(want, have) { - t.Errorf("case %d: \nwant: %#x\nhave: %#x\n", i, want, have) - } - wantGas := tc.wantGas - if haveGas != wantGas { - t.Errorf("case %d: gas wrong, want %d have %d\n", i, wantGas, haveGas) - } - } -} diff --git a/core/vm/interface.go b/core/vm/interface.go index 34b3e714da..5b91dfe379 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -61,13 +61,11 @@ type StateDB interface { GetTransientState(addr common.Address, key common.Hash) common.Hash SetTransientState(addr common.Address, key, value common.Hash) - SelfDestruct(common.Address) - HasSelfDestructed(common.Address) bool - - Selfdestruct6780(common.Address) + Suicide(common.Address) bool + HasSuicided(common.Address) bool // Exist reports whether the given account exists in state. - // Notably this should also return true for self-destructed accounts. + // Notably this should also return true for suicided accounts. Exist(common.Address) bool // Empty returns whether the given account is empty. Empty // is defined according to EIP161 (balance = nonce = code = 0). diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index ecbe6bee73..ef119ba26a 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -67,8 +67,6 @@ func NewEVMInterpreter(evm *EVM) *EVMInterpreter { // If jump table was not initialised we set the default one. var table *JumpTable switch { - case evm.chainRules.IsCancun: - table = &cancunInstructionSet case evm.chainRules.IsDurango: table = &durangoInstructionSet case evm.chainRules.IsSubnetEVM: diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 458654145f..a8c2089c4b 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -64,7 +64,6 @@ var ( istanbulInstructionSet = newIstanbulInstructionSet() subnetEVMInstructionSet = newSubnetEVMInstructionSet() durangoInstructionSet = newDurangoInstructionSet() - cancunInstructionSet = newCancunInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. @@ -88,22 +87,12 @@ func validate(jt JumpTable) JumpTable { return jt } -func newCancunInstructionSet() JumpTable { - instructionSet := newDurangoInstructionSet() - enable4844(&instructionSet) // EIP-4844 (DATAHASH opcode) - enable1153(&instructionSet) // EIP-1153 "Transient Storage" - enable5656(&instructionSet) // EIP-5656 (MCOPY opcode) - enable6780(&instructionSet) // EIP-6780 SELFDESTRUCT only in same transaction - return validate(instructionSet) -} - // newDurangoInstructionSet returns the frontier, homestead, byzantium, // constantinople, istanbul, petersburg, subnet-evm, durango instructions. func newDurangoInstructionSet() JumpTable { instructionSet := newSubnetEVMInstructionSet() enable3855(&instructionSet) // PUSH0 instruction enable3860(&instructionSet) // Limit and meter initcode - return validate(instructionSet) } diff --git a/core/vm/jump_table_export.go b/core/vm/jump_table_export.go index 7fdef835d2..da095ce605 100644 --- a/core/vm/jump_table_export.go +++ b/core/vm/jump_table_export.go @@ -24,8 +24,6 @@ import ( // the rules. func LookupInstructionSet(rules params.Rules) (JumpTable, error) { switch { - case rules.IsCancun: - return newCancunInstructionSet(), nil case rules.IsDurango: return newDurangoInstructionSet(), nil case rules.IsSubnetEVM: diff --git a/core/vm/memory.go b/core/vm/memory.go index 259b7bf463..eb6bc89078 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -113,14 +113,3 @@ func (m *Memory) Len() int { func (m *Memory) Data() []byte { return m.store } - -// Copy copies data from the src position slice into the dst position. -// The source and destination may overlap. -// OBS: This operation assumes that any necessary memory expansion has already been performed, -// and this method may panic otherwise. -func (m *Memory) Copy(dst, src, len uint64) { - if len == 0 { - return - } - copy(m.store[dst:], m.store[src:src+len]) -} diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go index 0a2fbe1f9e..2e30f7c5d0 100644 --- a/core/vm/memory_table.go +++ b/core/vm/memory_table.go @@ -58,14 +58,6 @@ func memoryMStore(stack *Stack) (uint64, bool) { return calcMemSize64WithUint(stack.Back(0), 32) } -func memoryMcopy(stack *Stack) (uint64, bool) { - mStart := stack.Back(0) // stack[0]: dest - if stack.Back(1).Gt(mStart) { - mStart = stack.Back(1) // stack[1]: source - } - return calcMemSize64(mStart, stack.Back(2)) // stack[2]: length -} - func memoryCreate(stack *Stack) (uint64, bool) { return calcMemSize64(stack.Back(1), stack.Back(2)) } @@ -88,6 +80,7 @@ func memoryCall(stack *Stack) (uint64, bool) { } return y, false } + func memoryDelegateCall(stack *Stack) (uint64, bool) { x, overflow := calcMemSize64(stack.Back(4), stack.Back(5)) if overflow { diff --git a/core/vm/memory_test.go b/core/vm/memory_test.go deleted file mode 100644 index ba36f8023c..0000000000 --- a/core/vm/memory_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package vm - -import ( - "bytes" - "strings" - "testing" - - "github.com/ethereum/go-ethereum/common" -) - -func TestMemoryCopy(t *testing.T) { - // Test cases from https://eips.ethereum.org/EIPS/eip-5656#test-cases - for i, tc := range []struct { - dst, src, len uint64 - pre string - want string - }{ - { // MCOPY 0 32 32 - copy 32 bytes from offset 32 to offset 0. - 0, 32, 32, - "0000000000000000000000000000000000000000000000000000000000000000 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", - "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f 000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", - }, - - { // MCOPY 0 0 32 - copy 32 bytes from offset 0 to offset 0. - 0, 0, 32, - "0101010101010101010101010101010101010101010101010101010101010101", - "0101010101010101010101010101010101010101010101010101010101010101", - }, - { // MCOPY 0 1 8 - copy 8 bytes from offset 1 to offset 0 (overlapping). - 0, 1, 8, - "000102030405060708 000000000000000000000000000000000000000000000000", - "010203040506070808 000000000000000000000000000000000000000000000000", - }, - { // MCOPY 1 0 8 - copy 8 bytes from offset 0 to offset 1 (overlapping). - 1, 0, 8, - "000102030405060708 000000000000000000000000000000000000000000000000", - "000001020304050607 000000000000000000000000000000000000000000000000", - }, - // Tests below are not in the EIP, but maybe should be added - { // MCOPY 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds index(overlapping). - 0xFFFFFFFFFFFF, 0xFFFFFFFFFFFF, 0, - "11", - "11", - }, - { // MCOPY 0xFFFFFFFFFFFF 0 0 - copy zero bytes from start of mem to out-of-bounds. - 0xFFFFFFFFFFFF, 0, 0, - "11", - "11", - }, - { // MCOPY 0 0xFFFFFFFFFFFF 0 - copy zero bytes from out-of-bounds to start of mem - 0, 0xFFFFFFFFFFFF, 0, - "11", - "11", - }, - } { - m := NewMemory() - // Clean spaces - data := common.FromHex(strings.ReplaceAll(tc.pre, " ", "")) - // Set pre - m.Resize(uint64(len(data))) - m.Set(0, uint64(len(data)), data) - // Do the copy - m.Copy(tc.dst, tc.src, tc.len) - want := common.FromHex(strings.ReplaceAll(tc.want, " ", "")) - if have := m.store; !bytes.Equal(want, have) { - t.Errorf("case %d: want: %#x\nhave: %#x\n", i, want, have) - } - } -} diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index bc638a8570..8cf30b9abf 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -108,7 +108,6 @@ const ( CHAINID OpCode = 0x46 SELFBALANCE OpCode = 0x47 BASEFEE OpCode = 0x48 - BLOBHASH OpCode = 0x49 ) // 0x50 range - 'storage' and execution. @@ -125,9 +124,6 @@ const ( MSIZE OpCode = 0x59 GAS OpCode = 0x5a JUMPDEST OpCode = 0x5b - TLOAD OpCode = 0x5c - TSTORE OpCode = 0x5d - MCOPY OpCode = 0x5e PUSH0 OpCode = 0x5f ) @@ -216,6 +212,12 @@ const ( LOG4 ) +// 0xb0 range. +const ( + TLOAD OpCode = 0xb3 + TSTORE OpCode = 0xb4 +) + // 0xf0 range - closures. const ( CREATE OpCode = 0xf0 @@ -294,7 +296,6 @@ var opCodeToString = map[OpCode]string{ CHAINID: "CHAINID", SELFBALANCE: "SELFBALANCE", BASEFEE: "BASEFEE", - BLOBHASH: "BLOBHASH", // 0x50 range - 'storage' and execution. POP: "POP", @@ -309,9 +310,6 @@ var opCodeToString = map[OpCode]string{ MSIZE: "MSIZE", GAS: "GAS", JUMPDEST: "JUMPDEST", - TLOAD: "TLOAD", - TSTORE: "TSTORE", - MCOPY: "MCOPY", PUSH0: "PUSH0", // 0x60 range - pushes. @@ -391,6 +389,10 @@ var opCodeToString = map[OpCode]string{ LOG3: "LOG3", LOG4: "LOG4", + // 0xb0 range. + TLOAD: "TLOAD", + TSTORE: "TSTORE", + // 0xf0 range - closures. CREATE: "CREATE", CALL: "CALL", @@ -451,7 +453,6 @@ var stringToOp = map[string]OpCode{ "CALLDATACOPY": CALLDATACOPY, "CHAINID": CHAINID, "BASEFEE": BASEFEE, - "BLOBHASH": BLOBHASH, "DELEGATECALL": DELEGATECALL, "STATICCALL": STATICCALL, "CODESIZE": CODESIZE, @@ -481,9 +482,6 @@ var stringToOp = map[string]OpCode{ "MSIZE": MSIZE, "GAS": GAS, "JUMPDEST": JUMPDEST, - "TLOAD": TLOAD, - "TSTORE": TSTORE, - "MCOPY": MCOPY, "PUSH0": PUSH0, "PUSH1": PUSH1, "PUSH2": PUSH2, @@ -554,6 +552,8 @@ var stringToOp = map[string]OpCode{ "LOG2": LOG2, "LOG3": LOG3, "LOG4": LOG4, + "TLOAD": TLOAD, + "TSTORE": TSTORE, "CREATE": CREATE, "CREATE2": CREATE2, "CALL": CALL, diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 7d8aec3741..cd68df29a8 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -35,52 +35,62 @@ import ( "github.com/ethereum/go-ethereum/common/math" ) -func makeGasSStoreFunc() gasFunc { - return func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { - // If we fail the minimum gas availability invariant, fail (0) - if contract.Gas <= params.SstoreSentryGasEIP2200 { - return 0, errors.New("not enough gas for reentrancy sentry") - } - // Gas sentry honoured, do the actual gas calculation based on the stored value - var ( - y, x = stack.Back(1), stack.peek() - slot = common.Hash(x.Bytes32()) - current = evm.StateDB.GetState(contract.Address(), slot) - cost = uint64(0) - ) - // Check slot presence in the access list - if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { - cost = params.ColdSloadCostEIP2929 - // If the caller cannot afford the cost, this change will be rolled back - evm.StateDB.AddSlotToAccessList(contract.Address(), slot) - if !addrPresent { - // Once we're done with YOLOv2 and schedule this for mainnet, might - // be good to remove this panic here, which is just really a - // canary to have during testing - panic("impossible case: address was not present in access list during sstore op") - } +// gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929 +// +// When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys. +// If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys. +// Additionally, modify the parameters defined in EIP 2200 as follows: +// +// Parameter Old value New value +// SLOAD_GAS 800 = WARM_STORAGE_READ_COST +// SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST +// +// The other parameters defined in EIP 2200 are unchanged. +// see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified +func gasSStoreEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + // If we fail the minimum gas availability invariant, fail (0) + if contract.Gas <= params.SstoreSentryGasEIP2200 { + return 0, errors.New("not enough gas for reentrancy sentry") + } + // Gas sentry honoured, do the actual gas calculation based on the stored value + var ( + y, x = stack.Back(1), stack.peek() + slot = common.Hash(x.Bytes32()) + current = evm.StateDB.GetState(contract.Address(), slot) + cost = uint64(0) + ) + // Check slot presence in the access list + if addrPresent, slotPresent := evm.StateDB.SlotInAccessList(contract.Address(), slot); !slotPresent { + cost = params.ColdSloadCostEIP2929 + // If the caller cannot afford the cost, this change will be rolled back + evm.StateDB.AddSlotToAccessList(contract.Address(), slot) + if !addrPresent { + // Once we're done with YOLOv2 and schedule this for mainnet, might + // be good to remove this panic here, which is just really a + // canary to have during testing + panic("impossible case: address was not present in access list during sstore op") } - value := common.Hash(y.Bytes32()) + } + value := common.Hash(y.Bytes32()) - if current == value { // noop (1) - // EIP 2200 original clause: - // return params.SloadGasEIP2200, nil - return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS - } - original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32()) - if original == current { - if original == (common.Hash{}) { // create slot (2.1.1) - return cost + params.SstoreSetGasEIP2200, nil - } - // EIP-2200 original clause: - // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2) - return cost + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929), nil // write existing slot (2.1.2) + if current == value { // noop (1) + // EIP 2200 original clause: + // return params.SloadGasEIP2200, nil + return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS + } + original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32()) + if original == current { + if original == (common.Hash{}) { // create slot (2.1.1) + return cost + params.SstoreSetGasEIP2200, nil } - // EIP-2200 original clause: - //return params.SloadGasEIP2200, nil // dirty update (2.2) - return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2) + // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2) + return cost + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929), nil // write existing slot (2.1.2) } + + // EIP-2200 original clause: + //return params.SloadGasEIP2200, nil // dirty update (2.2) + return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2) } // gasSLoadEIP2929 calculates dynamic gas for SLOAD according to EIP-2929 @@ -184,44 +194,22 @@ var ( gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall) gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall) gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode) - gasSelfdestructEIP2929 = makeSelfdestructGasFn(false) // Note: refunds were never enabled on Avalanche - // gasSelfdestructEIP3529 implements the changes in EIP-2539 (no refunds) - gasSelfdestructEIP3529 = makeSelfdestructGasFn(false) - // gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929 - // - // When calling SSTORE, check if the (address, storage_key) pair is in accessed_storage_keys. - // If it is not, charge an additional COLD_SLOAD_COST gas, and add the pair to accessed_storage_keys. - // Additionally, modify the parameters defined in EIP 2200 as follows: - // - // Parameter Old value New value - // SLOAD_GAS 800 = WARM_STORAGE_READ_COST - // SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST - // - //The other parameters defined in EIP 2200 are unchanged. - // see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified - gasSStoreEIP2929 = makeGasSStoreFunc() ) -// makeSelfdestructGasFn can create the selfdestruct dynamic gas function for EIP-2929 and EIP-2539 -func makeSelfdestructGasFn(refundsEnabled bool) gasFunc { - gasFunc := func(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { - var ( - gas uint64 - address = common.Address(stack.peek().Bytes20()) - ) - if !evm.StateDB.AddressInAccessList(address) { - // If the caller cannot afford the cost, this change will be rolled back - evm.StateDB.AddAddressToAccessList(address) - gas = params.ColdAccountAccessCostEIP2929 - } - // if empty and transfers value - if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { - gas += params.CreateBySelfdestructGas - } - if refundsEnabled && !evm.StateDB.HasSelfDestructed(contract.Address()) { - evm.StateDB.AddRefund(params.SelfdestructRefundGas) - } - return gas, nil +func gasSelfdestructEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + var ( + gas uint64 + address = common.Address(stack.peek().Bytes20()) + ) + if !evm.StateDB.AddressInAccessList(address) { + // If the caller cannot afford the cost, this change will be rolled back + evm.StateDB.AddAddressToAccessList(address) + gas = params.ColdAccountAccessCostEIP2929 } - return gasFunc + // if empty and transfers value + if evm.StateDB.Empty(address) && evm.StateDB.GetBalance(contract.Address()).Sign() != 0 { + gas += params.CreateBySelfdestructGas + } + + return gas, nil } diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go index a3cd09570c..07a1dd7923 100644 --- a/core/vm/runtime/env.go +++ b/core/vm/runtime/env.go @@ -33,9 +33,8 @@ import ( func NewEnv(cfg *Config) *vm.EVM { txContext := vm.TxContext{ - Origin: cfg.Origin, - GasPrice: cfg.GasPrice, - BlobHashes: cfg.BlobHashes, + Origin: cfg.Origin, + GasPrice: cfg.GasPrice, } blockContext := vm.BlockContext{ CanTransfer: core.CanTransfer, diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index fbe5b0c783..b1ed7667b7 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -54,7 +54,6 @@ type Config struct { Debug bool EVMConfig vm.Config BaseFee *big.Int - BlobHashes []common.Hash State *state.StateDB GetHashFn func(n uint64) common.Hash @@ -123,7 +122,7 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { address = common.BytesToAddress([]byte("contract")) vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) - rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time) + rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time) ) // Execute the preparatory steps for state transition which includes: // - prepare accessList(post-berlin) @@ -157,7 +156,7 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { var ( vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) - rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time) + rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time) ) // Execute the preparatory steps for state transition which includes: // - prepare accessList(post-berlin) @@ -186,7 +185,7 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er vmenv = NewEnv(cfg) sender = cfg.State.GetOrNewStateObject(cfg.Origin) statedb = cfg.State - rules = cfg.ChainConfig.Rules(vmenv.Context.BlockNumber, vmenv.Context.Time) + rules = cfg.ChainConfig.AvalancheRules(vmenv.Context.BlockNumber, vmenv.Context.Time) ) // Execute the preparatory steps for state transition which includes: // - prepare accessList(post-berlin) diff --git a/core/vm/testdata/precompiles/pointEvaluation.json b/core/vm/testdata/precompiles/pointEvaluation.json deleted file mode 100644 index 93fc66d836..0000000000 --- a/core/vm/testdata/precompiles/pointEvaluation.json +++ /dev/null @@ -1,9 +0,0 @@ -[ - { - "Input": "01d18459b334ffe8e2226eef1db874fda6db2bdd9357268b39220af2d59464fb564c0a11a0f704f4fc3e8acfe0f8245f0ad1347b378fbf96e206da11a5d3630624d25032e67a7e6a4910df5834b8fe70e6bcfeeac0352434196bdf4b2485d5a1978a0d595c823c05947b1156175e72634a377808384256e9921ebf72181890be2d6b58d4a73a880541d1656875654806942307f266e636553e94006d11423f2688945ff3bdf515859eba1005c1a7708d620a94d91a1c0c285f9584e75ec2f82a", - "Expected": "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", - "Name": "pointEvaluation1", - "Gas": 50000, - "NoBenchmark": false - } -] diff --git a/eth/api.go b/eth/api.go index 5842cb5625..993f8faf4e 100644 --- a/eth/api.go +++ b/eth/api.go @@ -27,7 +27,26 @@ package eth import ( + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/ava-labs/subnet-evm/core" + "github.com/ava-labs/subnet-evm/core/rawdb" + "github.com/ava-labs/subnet-evm/core/state" + "github.com/ava-labs/subnet-evm/core/types" + "github.com/ava-labs/subnet-evm/internal/ethapi" + "github.com/ava-labs/subnet-evm/rpc" + "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" ) // EthereumAPI provides an API to access Ethereum full node-related information. @@ -40,12 +59,427 @@ func NewEthereumAPI(e *Ethereum) *EthereumAPI { return &EthereumAPI{e} } -// Etherbase is the address that mining rewards will be sent to. +// Etherbase is the address that mining rewards will be send to. func (api *EthereumAPI) Etherbase() (common.Address, error) { return api.e.Etherbase() } -// Coinbase is the address that mining rewards will be sent to (alias for Etherbase). +// Coinbase is the address that mining rewards will be send to (alias for Etherbase). func (api *EthereumAPI) Coinbase() (common.Address, error) { return api.Etherbase() } + +// AdminAPI is the collection of Ethereum full node related APIs for node +// administration. +type AdminAPI struct { + eth *Ethereum +} + +// NewAdminAPI creates a new instance of AdminAPI. +func NewAdminAPI(eth *Ethereum) *AdminAPI { + return &AdminAPI{eth: eth} +} + +// ExportChain exports the current blockchain into a local file, +// or a range of blocks if first and last are non-nil. +func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) { + if first == nil && last != nil { + return false, errors.New("last cannot be specified without first") + } + if first != nil && last == nil { + head := api.eth.BlockChain().CurrentHeader().Number.Uint64() + last = &head + } + if _, err := os.Stat(file); err == nil { + // File already exists. Allowing overwrite could be a DoS vector, + // since the 'file' may point to arbitrary paths on the drive. + return false, errors.New("location would overwrite an existing file") + } + // Make sure we can create the file to export into + out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return false, err + } + defer out.Close() + + var writer io.Writer = out + if strings.HasSuffix(file, ".gz") { + writer = gzip.NewWriter(writer) + defer writer.(*gzip.Writer).Close() + } + + // Export the blockchain + if first != nil { + if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil { + return false, err + } + } else if err := api.eth.BlockChain().Export(writer); err != nil { + return false, err + } + return true, nil +} + +func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool { + for _, b := range bs { + if !chain.HasBlock(b.Hash(), b.NumberU64()) { + return false + } + } + + return true +} + +// ImportChain imports a blockchain from a local file. +func (api *AdminAPI) ImportChain(file string) (bool, error) { + // Make sure the can access the file to import + in, err := os.Open(file) + if err != nil { + return false, err + } + defer in.Close() + + var reader io.Reader = in + if strings.HasSuffix(file, ".gz") { + if reader, err = gzip.NewReader(reader); err != nil { + return false, err + } + } + + // Run actual the import in pre-configured batches + stream := rlp.NewStream(reader, 0) + + blocks, index := make([]*types.Block, 0, 2500), 0 + for batch := 0; ; batch++ { + // Load a batch of blocks from the input file + for len(blocks) < cap(blocks) { + block := new(types.Block) + if err := stream.Decode(block); err == io.EOF { + break + } else if err != nil { + return false, fmt.Errorf("block %d: failed to parse: %v", index, err) + } + blocks = append(blocks, block) + index++ + } + if len(blocks) == 0 { + break + } + + if hasAllBlocks(api.eth.BlockChain(), blocks) { + blocks = blocks[:0] + continue + } + // Import the batch and reset the buffer + if _, err := api.eth.BlockChain().InsertChain(blocks); err != nil { + return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err) + } + blocks = blocks[:0] + } + return true, nil +} + +// DebugAPI is the collection of Ethereum full node APIs for debugging the +// protocol. +type DebugAPI struct { + eth *Ethereum +} + +// NewDebugAPI creates a new DebugAPI instance. +func NewDebugAPI(eth *Ethereum) *DebugAPI { + return &DebugAPI{eth: eth} +} + +// DumpBlock retrieves the entire state of the database at a given block. +func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) { + opts := &state.DumpConfig{ + OnlyWithAddresses: true, + Max: AccountRangeMaxResults, // Sanity limit over RPC + } + var header *types.Header + if blockNr.IsAccepted() { + if api.eth.APIBackend.isLatestAndAllowed(blockNr) { + header = api.eth.blockchain.CurrentHeader() + } else { + header = api.eth.LastAcceptedBlock().Header() + } + } else { + block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) + if block == nil { + return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) + } + header = block.Header() + } + if header == nil { + return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) + } + stateDb, err := api.eth.BlockChain().StateAt(header.Root) + if err != nil { + return state.Dump{}, err + } + return stateDb.RawDump(opts), nil +} + +// Preimage is a debug API function that returns the preimage for a sha3 hash, if known. +func (api *DebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { + if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil { + return preimage, nil + } + return nil, errors.New("unknown preimage") +} + +// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network +// and returns them as a JSON list of block hashes. +func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*ethapi.BadBlockArgs, error) { + internalAPI := ethapi.NewBlockChainAPI(api.eth.APIBackend) + return internalAPI.GetBadBlocks(ctx) +} + +// AccountRangeMaxResults is the maximum number of results to be returned per call +const AccountRangeMaxResults = 256 + +// AccountRange enumerates all accounts in the given block and start point in paging request +func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hexutil.Bytes, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) { + var stateDb *state.StateDB + var err error + + if number, ok := blockNrOrHash.Number(); ok { + var header *types.Header + if number.IsAccepted() { + if api.eth.APIBackend.isLatestAndAllowed(number) { + header = api.eth.blockchain.CurrentHeader() + } else { + header = api.eth.LastAcceptedBlock().Header() + } + } else { + block := api.eth.blockchain.GetBlockByNumber(uint64(number)) + if block == nil { + return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) + } + header = block.Header() + } + if header == nil { + return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) + } + stateDb, err = api.eth.BlockChain().StateAt(header.Root) + if err != nil { + return state.IteratorDump{}, err + } + } else if hash, ok := blockNrOrHash.Hash(); ok { + block := api.eth.blockchain.GetBlockByHash(hash) + if block == nil { + return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex()) + } + stateDb, err = api.eth.BlockChain().StateAt(block.Root()) + if err != nil { + return state.IteratorDump{}, err + } + } else { + return state.IteratorDump{}, errors.New("either block number or block hash must be specified") + } + + opts := &state.DumpConfig{ + SkipCode: nocode, + SkipStorage: nostorage, + OnlyWithAddresses: !incompletes, + Start: start, + Max: uint64(maxResults), + } + if maxResults > AccountRangeMaxResults || maxResults <= 0 { + opts.Max = AccountRangeMaxResults + } + return stateDb.IteratorDump(opts), nil +} + +// StorageRangeResult is the result of a debug_storageRangeAt API call. +type StorageRangeResult struct { + Storage storageMap `json:"storage"` + NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie. +} + +type storageMap map[common.Hash]storageEntry + +type storageEntry struct { + Key *common.Hash `json:"key"` + Value common.Hash `json:"value"` +} + +// StorageRangeAt returns the storage at the given block height and transaction index. +func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) { + // Retrieve the block + block := api.eth.blockchain.GetBlockByHash(blockHash) + if block == nil { + return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) + } + _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0) + if err != nil { + return StorageRangeResult{}, err + } + defer release() + + st, err := statedb.StorageTrie(contractAddress) + if err != nil { + return StorageRangeResult{}, err + } + if st == nil { + return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) + } + return storageRangeAt(st, keyStart, maxResult) +} + +func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) { + it := trie.NewIterator(st.NodeIterator(start)) + result := StorageRangeResult{Storage: storageMap{}} + for i := 0; i < maxResult && it.Next(); i++ { + _, content, _, err := rlp.Split(it.Value) + if err != nil { + return StorageRangeResult{}, err + } + e := storageEntry{Value: common.BytesToHash(content)} + if preimage := st.GetKey(it.Key); preimage != nil { + preimage := common.BytesToHash(preimage) + e.Key = &preimage + } + result.Storage[common.BytesToHash(it.Key)] = e + } + // Add the 'next key' so clients can continue downloading. + if it.Next() { + next := common.BytesToHash(it.Key) + result.NextKey = &next + } + return result, nil +} + +// GetModifiedAccountsByNumber returns all accounts that have changed between the +// two blocks specified. A change is defined as a difference in nonce, balance, +// code hash, or storage hash. +// +// With one parameter, returns the list of accounts modified in the specified block. +func (api *DebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) { + var startBlock, endBlock *types.Block + + startBlock = api.eth.blockchain.GetBlockByNumber(startNum) + if startBlock == nil { + return nil, fmt.Errorf("start block %x not found", startNum) + } + + if endNum == nil { + endBlock = startBlock + startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash()) + if startBlock == nil { + return nil, fmt.Errorf("block %x has no parent", endBlock.Number()) + } + } else { + endBlock = api.eth.blockchain.GetBlockByNumber(*endNum) + if endBlock == nil { + return nil, fmt.Errorf("end block %d not found", *endNum) + } + } + return api.getModifiedAccounts(startBlock, endBlock) +} + +// GetModifiedAccountsByHash returns all accounts that have changed between the +// two blocks specified. A change is defined as a difference in nonce, balance, +// code hash, or storage hash. +// +// With one parameter, returns the list of accounts modified in the specified block. +func (api *DebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) { + var startBlock, endBlock *types.Block + startBlock = api.eth.blockchain.GetBlockByHash(startHash) + if startBlock == nil { + return nil, fmt.Errorf("start block %x not found", startHash) + } + + if endHash == nil { + endBlock = startBlock + startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash()) + if startBlock == nil { + return nil, fmt.Errorf("block %x has no parent", endBlock.Number()) + } + } else { + endBlock = api.eth.blockchain.GetBlockByHash(*endHash) + if endBlock == nil { + return nil, fmt.Errorf("end block %x not found", *endHash) + } + } + return api.getModifiedAccounts(startBlock, endBlock) +} + +func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) { + if startBlock.Number().Uint64() >= endBlock.Number().Uint64() { + return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64()) + } + triedb := api.eth.BlockChain().StateCache().TrieDB() + + oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb) + if err != nil { + return nil, err + } + newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb) + if err != nil { + return nil, err + } + diff, _ := trie.NewDifferenceIterator(oldTrie.NodeIterator([]byte{}), newTrie.NodeIterator([]byte{})) + iter := trie.NewIterator(diff) + + var dirty []common.Address + for iter.Next() { + key := newTrie.GetKey(iter.Key) + if key == nil { + return nil, fmt.Errorf("no preimage found for hash %x", iter.Key) + } + dirty = append(dirty, common.BytesToAddress(key)) + } + return dirty, nil +} + +// GetAccessibleState returns the first number where the node has accessible +// state on disk. Note this being the post-state of that block and the pre-state +// of the next block. +// The (from, to) parameters are the sequence of blocks to search, which can go +// either forwards or backwards +func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) { + var resolveNum = func(num rpc.BlockNumber) (uint64, error) { + // We don't have state for pending (-2), so treat it as latest + if num.Int64() < 0 { + block := api.eth.blockchain.CurrentBlock() + if block == nil { + return 0, errors.New("current block missing") + } + return block.Number.Uint64(), nil + } + return uint64(num.Int64()), nil + } + var ( + start uint64 + end uint64 + delta = int64(1) + lastLog time.Time + err error + ) + if start, err = resolveNum(from); err != nil { + return 0, err + } + if end, err = resolveNum(to); err != nil { + return 0, err + } + if start == end { + return 0, errors.New("from and to needs to be different") + } + if start > end { + delta = -1 + } + for i := int64(start); i != int64(end); i += delta { + if time.Since(lastLog) > 8*time.Second { + log.Info("Finding roots", "from", start, "to", end, "at", i) + lastLog = time.Now() + } + h := api.eth.BlockChain().GetHeaderByNumber(uint64(i)) + if h == nil { + return 0, fmt.Errorf("missing header %d", i) + } + if ok, _ := api.eth.ChainDb().Has(h.Root[:]); ok { + return uint64(i), nil + } + } + return 0, errors.New("no state found") +} diff --git a/eth/api_admin.go b/eth/api_admin.go deleted file mode 100644 index e1fe683c01..0000000000 --- a/eth/api_admin.go +++ /dev/null @@ -1,149 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "os" - "strings" - - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ethereum/go-ethereum/rlp" -) - -// AdminAPI is the collection of Ethereum full node related APIs for node -// administration. -type AdminAPI struct { - eth *Ethereum -} - -// NewAdminAPI creates a new instance of AdminAPI. -func NewAdminAPI(eth *Ethereum) *AdminAPI { - return &AdminAPI{eth: eth} -} - -// ExportChain exports the current blockchain into a local file, -// or a range of blocks if first and last are non-nil. -func (api *AdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) { - if first == nil && last != nil { - return false, errors.New("last cannot be specified without first") - } - if first != nil && last == nil { - head := api.eth.BlockChain().CurrentHeader().Number.Uint64() - last = &head - } - if _, err := os.Stat(file); err == nil { - // File already exists. Allowing overwrite could be a DoS vector, - // since the 'file' may point to arbitrary paths on the drive. - return false, errors.New("location would overwrite an existing file") - } - // Make sure we can create the file to export into - out, err := os.OpenFile(file, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - return false, err - } - defer out.Close() - - var writer io.Writer = out - if strings.HasSuffix(file, ".gz") { - writer = gzip.NewWriter(writer) - defer writer.(*gzip.Writer).Close() - } - - // Export the blockchain - if first != nil { - if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil { - return false, err - } - } else if err := api.eth.BlockChain().Export(writer); err != nil { - return false, err - } - return true, nil -} - -func hasAllBlocks(chain *core.BlockChain, bs []*types.Block) bool { - for _, b := range bs { - if !chain.HasBlock(b.Hash(), b.NumberU64()) { - return false - } - } - - return true -} - -// ImportChain imports a blockchain from a local file. -func (api *AdminAPI) ImportChain(file string) (bool, error) { - // Make sure the can access the file to import - in, err := os.Open(file) - if err != nil { - return false, err - } - defer in.Close() - - var reader io.Reader = in - if strings.HasSuffix(file, ".gz") { - if reader, err = gzip.NewReader(reader); err != nil { - return false, err - } - } - - // Run actual the import in pre-configured batches - stream := rlp.NewStream(reader, 0) - - blocks, index := make([]*types.Block, 0, 2500), 0 - for batch := 0; ; batch++ { - // Load a batch of blocks from the input file - for len(blocks) < cap(blocks) { - block := new(types.Block) - if err := stream.Decode(block); err == io.EOF { - break - } else if err != nil { - return false, fmt.Errorf("block %d: failed to parse: %v", index, err) - } - blocks = append(blocks, block) - index++ - } - if len(blocks) == 0 { - break - } - - if hasAllBlocks(api.eth.BlockChain(), blocks) { - blocks = blocks[:0] - continue - } - // Import the batch and reset the buffer - if _, err := api.eth.BlockChain().InsertChain(blocks); err != nil { - return false, fmt.Errorf("batch %d: failed to insert: %v", batch, err) - } - blocks = blocks[:0] - } - return true, nil -} diff --git a/eth/api_backend.go b/eth/api_backend.go index edf78810fd..5e660bebad 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -40,7 +40,6 @@ import ( "github.com/ava-labs/subnet-evm/core/bloombits" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/txpool" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/core/vm" "github.com/ava-labs/subnet-evm/eth/gasprice" @@ -54,7 +53,7 @@ import ( var ErrUnfinalizedData = errors.New("cannot query unfinalized data") -// EthAPIBackend implements ethapi.Backend and tracers.Backend for full nodes +// EthAPIBackend implements ethapi.Backend for full nodes type EthAPIBackend struct { extRPCEnabled bool allowUnprotectedTxs bool @@ -337,7 +336,7 @@ func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) if err := ctx.Err(); err != nil { return err } - if err := b.eth.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, false)[0]; err != nil { + if err := b.eth.txPool.AddLocal(signedTx); err != nil { return err } @@ -351,20 +350,13 @@ func (b *EthAPIBackend) GetPoolTransactions() (types.Transactions, error) { pending := b.eth.txPool.Pending(false) var txs types.Transactions for _, batch := range pending { - for _, lazy := range batch { - if tx := lazy.Resolve(); tx != nil { - txs = append(txs, tx.Tx) - } - } + txs = append(txs, batch...) } return txs, nil } func (b *EthAPIBackend) GetPoolTransaction(hash common.Hash) *types.Transaction { - if tx := b.eth.txPool.Get(hash); tx != nil { - return tx.Tx - } - return nil + return b.eth.txPool.Get(hash) } func (b *EthAPIBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { @@ -390,15 +382,15 @@ func (b *EthAPIBackend) GetPoolNonce(ctx context.Context, addr common.Address) ( return b.eth.txPool.Nonce(addr), nil } -func (b *EthAPIBackend) Stats() (runnable int, blocked int) { +func (b *EthAPIBackend) Stats() (pending int, queued int) { return b.eth.txPool.Stats() } -func (b *EthAPIBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { +func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { return b.eth.txPool.Content() } -func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { +func (b *EthAPIBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) { return b.eth.txPool.ContentFrom(addr) } diff --git a/eth/api_debug.go b/eth/api_debug.go deleted file mode 100644 index 50d7b140ec..0000000000 --- a/eth/api_debug.go +++ /dev/null @@ -1,363 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - "context" - "errors" - "fmt" - "time" - - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/internal/ethapi" - "github.com/ava-labs/subnet-evm/rpc" - "github.com/ava-labs/subnet-evm/trie" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" -) - -// DebugAPI is the collection of Ethereum full node APIs for debugging the -// protocol. -type DebugAPI struct { - eth *Ethereum -} - -// NewDebugAPI creates a new DebugAPI instance. -func NewDebugAPI(eth *Ethereum) *DebugAPI { - return &DebugAPI{eth: eth} -} - -// DumpBlock retrieves the entire state of the database at a given block. -func (api *DebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error) { - opts := &state.DumpConfig{ - OnlyWithAddresses: true, - Max: AccountRangeMaxResults, // Sanity limit over RPC - } - var header *types.Header - if blockNr.IsAccepted() { - if api.eth.APIBackend.isLatestAndAllowed(blockNr) { - header = api.eth.blockchain.CurrentHeader() - } else { - header = api.eth.LastAcceptedBlock().Header() - } - } else { - block := api.eth.blockchain.GetBlockByNumber(uint64(blockNr)) - if block == nil { - return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) - } - header = block.Header() - } - if header == nil { - return state.Dump{}, fmt.Errorf("block #%d not found", blockNr) - } - stateDb, err := api.eth.BlockChain().StateAt(header.Root) - if err != nil { - return state.Dump{}, err - } - return stateDb.RawDump(opts), nil -} - -// Preimage is a debug API function that returns the preimage for a sha3 hash, if known. -func (api *DebugAPI) Preimage(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { - if preimage := rawdb.ReadPreimage(api.eth.ChainDb(), hash); preimage != nil { - return preimage, nil - } - return nil, errors.New("unknown preimage") -} - -// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network -// and returns them as a JSON list of block hashes. -func (api *DebugAPI) GetBadBlocks(ctx context.Context) ([]*ethapi.BadBlockArgs, error) { - internalAPI := ethapi.NewBlockChainAPI(api.eth.APIBackend) - return internalAPI.GetBadBlocks(ctx) -} - -// AccountRangeMaxResults is the maximum number of results to be returned per call -const AccountRangeMaxResults = 256 - -// AccountRange enumerates all accounts in the given block and start point in paging request -func (api *DebugAPI) AccountRange(blockNrOrHash rpc.BlockNumberOrHash, start hexutil.Bytes, maxResults int, nocode, nostorage, incompletes bool) (state.IteratorDump, error) { - var stateDb *state.StateDB - var err error - - if number, ok := blockNrOrHash.Number(); ok { - var header *types.Header - if number.IsAccepted() { - if api.eth.APIBackend.isLatestAndAllowed(number) { - header = api.eth.blockchain.CurrentHeader() - } else { - header = api.eth.LastAcceptedBlock().Header() - } - } else { - block := api.eth.blockchain.GetBlockByNumber(uint64(number)) - if block == nil { - return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) - } - header = block.Header() - } - if header == nil { - return state.IteratorDump{}, fmt.Errorf("block #%d not found", number) - } - stateDb, err = api.eth.BlockChain().StateAt(header.Root) - if err != nil { - return state.IteratorDump{}, err - } - } else if hash, ok := blockNrOrHash.Hash(); ok { - block := api.eth.blockchain.GetBlockByHash(hash) - if block == nil { - return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex()) - } - stateDb, err = api.eth.BlockChain().StateAt(block.Root()) - if err != nil { - return state.IteratorDump{}, err - } - } else { - return state.IteratorDump{}, errors.New("either block number or block hash must be specified") - } - - opts := &state.DumpConfig{ - SkipCode: nocode, - SkipStorage: nostorage, - OnlyWithAddresses: !incompletes, - Start: start, - Max: uint64(maxResults), - } - if maxResults > AccountRangeMaxResults || maxResults <= 0 { - opts.Max = AccountRangeMaxResults - } - return stateDb.IteratorDump(opts), nil -} - -// StorageRangeResult is the result of a debug_storageRangeAt API call. -type StorageRangeResult struct { - Storage storageMap `json:"storage"` - NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie. -} - -type storageMap map[common.Hash]storageEntry - -type storageEntry struct { - Key *common.Hash `json:"key"` - Value common.Hash `json:"value"` -} - -// StorageRangeAt returns the storage at the given block height and transaction index. -func (api *DebugAPI) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex int, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) { - // Retrieve the block - block := api.eth.blockchain.GetBlockByHash(blockHash) - if block == nil { - return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) - } - _, _, statedb, release, err := api.eth.stateAtTransaction(ctx, block, txIndex, 0) - if err != nil { - return StorageRangeResult{}, err - } - defer release() - - st, err := statedb.StorageTrie(contractAddress) - if err != nil { - return StorageRangeResult{}, err - } - if st == nil { - return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) - } - return storageRangeAt(st, keyStart, maxResult) -} - -func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeResult, error) { - trieIt, err := st.NodeIterator(start) - if err != nil { - return StorageRangeResult{}, err - } - it := trie.NewIterator(trieIt) - result := StorageRangeResult{Storage: storageMap{}} - for i := 0; i < maxResult && it.Next(); i++ { - _, content, _, err := rlp.Split(it.Value) - if err != nil { - return StorageRangeResult{}, err - } - e := storageEntry{Value: common.BytesToHash(content)} - if preimage := st.GetKey(it.Key); preimage != nil { - preimage := common.BytesToHash(preimage) - e.Key = &preimage - } - result.Storage[common.BytesToHash(it.Key)] = e - } - // Add the 'next key' so clients can continue downloading. - if it.Next() { - next := common.BytesToHash(it.Key) - result.NextKey = &next - } - return result, nil -} - -// GetModifiedAccountsByNumber returns all accounts that have changed between the -// two blocks specified. A change is defined as a difference in nonce, balance, -// code hash, or storage hash. -// -// With one parameter, returns the list of accounts modified in the specified block. -func (api *DebugAPI) GetModifiedAccountsByNumber(startNum uint64, endNum *uint64) ([]common.Address, error) { - var startBlock, endBlock *types.Block - - startBlock = api.eth.blockchain.GetBlockByNumber(startNum) - if startBlock == nil { - return nil, fmt.Errorf("start block %x not found", startNum) - } - - if endNum == nil { - endBlock = startBlock - startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash()) - if startBlock == nil { - return nil, fmt.Errorf("block %x has no parent", endBlock.Number()) - } - } else { - endBlock = api.eth.blockchain.GetBlockByNumber(*endNum) - if endBlock == nil { - return nil, fmt.Errorf("end block %d not found", *endNum) - } - } - return api.getModifiedAccounts(startBlock, endBlock) -} - -// GetModifiedAccountsByHash returns all accounts that have changed between the -// two blocks specified. A change is defined as a difference in nonce, balance, -// code hash, or storage hash. -// -// With one parameter, returns the list of accounts modified in the specified block. -func (api *DebugAPI) GetModifiedAccountsByHash(startHash common.Hash, endHash *common.Hash) ([]common.Address, error) { - var startBlock, endBlock *types.Block - startBlock = api.eth.blockchain.GetBlockByHash(startHash) - if startBlock == nil { - return nil, fmt.Errorf("start block %x not found", startHash) - } - - if endHash == nil { - endBlock = startBlock - startBlock = api.eth.blockchain.GetBlockByHash(startBlock.ParentHash()) - if startBlock == nil { - return nil, fmt.Errorf("block %x has no parent", endBlock.Number()) - } - } else { - endBlock = api.eth.blockchain.GetBlockByHash(*endHash) - if endBlock == nil { - return nil, fmt.Errorf("end block %x not found", *endHash) - } - } - return api.getModifiedAccounts(startBlock, endBlock) -} - -func (api *DebugAPI) getModifiedAccounts(startBlock, endBlock *types.Block) ([]common.Address, error) { - if startBlock.Number().Uint64() >= endBlock.Number().Uint64() { - return nil, fmt.Errorf("start block height (%d) must be less than end block height (%d)", startBlock.Number().Uint64(), endBlock.Number().Uint64()) - } - triedb := api.eth.BlockChain().StateCache().TrieDB() - - oldTrie, err := trie.NewStateTrie(trie.StateTrieID(startBlock.Root()), triedb) - if err != nil { - return nil, err - } - newTrie, err := trie.NewStateTrie(trie.StateTrieID(endBlock.Root()), triedb) - if err != nil { - return nil, err - } - oldIt, err := oldTrie.NodeIterator([]byte{}) - if err != nil { - return nil, err - } - newIt, err := newTrie.NodeIterator([]byte{}) - if err != nil { - return nil, err - } - diff, _ := trie.NewDifferenceIterator(oldIt, newIt) - iter := trie.NewIterator(diff) - - var dirty []common.Address - for iter.Next() { - key := newTrie.GetKey(iter.Key) - if key == nil { - return nil, fmt.Errorf("no preimage found for hash %x", iter.Key) - } - dirty = append(dirty, common.BytesToAddress(key)) - } - return dirty, nil -} - -// GetAccessibleState returns the first number where the node has accessible -// state on disk. Note this being the post-state of that block and the pre-state -// of the next block. -// The (from, to) parameters are the sequence of blocks to search, which can go -// either forwards or backwards -func (api *DebugAPI) GetAccessibleState(from, to rpc.BlockNumber) (uint64, error) { - var resolveNum = func(num rpc.BlockNumber) (uint64, error) { - // We don't have state for pending (-2), so treat it as latest - if num.Int64() < 0 { - block := api.eth.blockchain.CurrentBlock() - if block == nil { - return 0, errors.New("current block missing") - } - return block.Number.Uint64(), nil - } - return uint64(num.Int64()), nil - } - var ( - start uint64 - end uint64 - delta = int64(1) - lastLog time.Time - err error - ) - if start, err = resolveNum(from); err != nil { - return 0, err - } - if end, err = resolveNum(to); err != nil { - return 0, err - } - if start == end { - return 0, errors.New("from and to needs to be different") - } - if start > end { - delta = -1 - } - for i := int64(start); i != int64(end); i += delta { - if time.Since(lastLog) > 8*time.Second { - log.Info("Finding roots", "from", start, "to", end, "at", i) - lastLog = time.Now() - } - h := api.eth.BlockChain().GetHeaderByNumber(uint64(i)) - if h == nil { - return 0, fmt.Errorf("missing header %d", i) - } - if ok, _ := api.eth.ChainDb().Has(h.Root[:]); ok { - return uint64(i), nil - } - } - return 0, errors.New("no state found") -} diff --git a/eth/backend.go b/eth/backend.go index 9871559a09..a8de24fad9 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -30,7 +30,6 @@ package eth import ( "errors" "fmt" - "math/big" "sync" "time" @@ -43,8 +42,6 @@ import ( "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/state/pruner" "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/txpool/blobpool" - "github.com/ava-labs/subnet-evm/core/txpool/legacypool" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/core/vm" "github.com/ava-labs/subnet-evm/eth/ethconfig" @@ -84,8 +81,7 @@ type Ethereum struct { config *Config // Handlers - txPool *txpool.TxPool - + txPool *txpool.TxPool blockchain *core.BlockChain gossiper PushGossiper @@ -156,7 +152,7 @@ func New( // Since RecoverPruning will only continue a pruning run that already began, we do not need to ensure that // reprocessState has already been called and completed successfully. To ensure this, we must maintain // that Prune is only run after reprocessState has finished successfully. - if err := pruner.RecoverPruning(config.OfflinePruningDataDirectory, chainDb); err != nil { + if err := pruner.RecoverPruning(config.OfflinePruningDataDirectory, chainDb, config.TrieCleanJournal); err != nil { log.Error("Failed to recover state", "error", err) } @@ -197,6 +193,8 @@ func New( } cacheConfig = &core.CacheConfig{ TrieCleanLimit: config.TrieCleanCache, + TrieCleanJournal: config.TrieCleanJournal, + TrieCleanRejournal: config.TrieCleanRejournal, TrieDirtyLimit: config.TrieDirtyCache, TrieDirtyCommitTarget: config.TrieDirtyCommitTarget, TriePrefetcherParallelism: config.TriePrefetcherParallelism, @@ -221,6 +219,7 @@ func New( if err := eth.precheckPopulateMissingTries(); err != nil { return nil, err } + var err error eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, eth.engine, vmConfig, lastAcceptedHash, config.SkipUpgradeCheck) if err != nil { @@ -238,15 +237,7 @@ func New( eth.bloomIndexer.Start(eth.blockchain) - config.BlobPool.Datadir = "" - blobPool := blobpool.New(config.BlobPool, &chainWithFinalBlock{eth.blockchain}) - - legacyPool := legacypool.New(config.TxPool, eth.blockchain) - - eth.txPool, err = txpool.New(new(big.Int).SetUint64(config.TxPool.PriceLimit), eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) - if err != nil { - return nil, err - } + eth.txPool = txpool.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain) eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, clock) @@ -373,7 +364,7 @@ func (s *Ethereum) Start() { func (s *Ethereum) Stop() error { s.bloomIndexer.Close() close(s.closeBloomHandler) - s.txPool.Close() + s.txPool.Stop() s.blockchain.Stop() s.engine.Close() @@ -456,6 +447,7 @@ func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, gspec *co log.Info("Starting offline pruning", "dataDir", s.config.OfflinePruningDataDirectory, "bloomFilterSize", s.config.OfflinePruningBloomFilterSize) prunerConfig := pruner.Config{ BloomSize: s.config.OfflinePruningBloomFilterSize, + Cachedir: s.config.TrieCleanJournal, Datadir: s.config.OfflinePruningDataDirectory, } diff --git a/eth/chain_with_final_block.go b/eth/chain_with_final_block.go deleted file mode 100644 index df4ccf70dd..0000000000 --- a/eth/chain_with_final_block.go +++ /dev/null @@ -1,23 +0,0 @@ -package eth - -import ( - "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/types" -) - -const blocksToKeep = 604_800 // Approx. 2 weeks worth of blocks assuming 2s block time - -type chainWithFinalBlock struct { - *core.BlockChain -} - -// CurrentFinalBlock returns the current block below which blobs should not -// be maintained anymore for reorg purposes. -func (c *chainWithFinalBlock) CurrentFinalBlock() *types.Header { - lastAccepted := c.LastAcceptedBlock().Header().Number.Uint64() - if lastAccepted <= blocksToKeep { - return nil - } - - return c.GetHeaderByNumber(lastAccepted - blocksToKeep) -} diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 8115198e4d..8a64e67683 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -30,8 +30,7 @@ import ( "time" "github.com/ava-labs/subnet-evm/core" - "github.com/ava-labs/subnet-evm/core/txpool/blobpool" - "github.com/ava-labs/subnet-evm/core/txpool/legacypool" + "github.com/ava-labs/subnet-evm/core/txpool" "github.com/ava-labs/subnet-evm/eth/gasprice" "github.com/ava-labs/subnet-evm/miner" "github.com/ethereum/go-ethereum/common" @@ -62,8 +61,7 @@ func NewDefaultConfig() Config { SnapshotCache: 256, AcceptedCacheSize: 32, Miner: miner.Config{}, - TxPool: legacypool.DefaultConfig, - BlobPool: blobpool.DefaultConfig, + TxPool: txpool.DefaultConfig, RPCGasCap: 25000000, RPCEVMTimeout: 5 * time.Second, GPO: DefaultFullGPOConfig, @@ -98,6 +96,8 @@ type Config struct { // TrieDB and snapshot options TrieCleanCache int + TrieCleanJournal string + TrieCleanRejournal time.Duration TrieDirtyCache int TrieDirtyCommitTarget int TriePrefetcherParallelism int @@ -112,8 +112,7 @@ type Config struct { Miner miner.Config // Transaction pool options - TxPool legacypool.Config - BlobPool blobpool.Config + TxPool txpool.Config // Gas Price Oracle options GPO gasprice.Config diff --git a/eth/filters/api.go b/eth/filters/api.go index 4adea84f7a..5bdd4e9344 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -386,12 +386,12 @@ func (api *FilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { if api.sys.backend.IsAllowUnfinalizedQueries() { logsSub, err = api.events.SubscribeLogs(interfaces.FilterQuery(crit), logs) if err != nil { - return "", err + return rpc.ID(""), err } } else { logsSub, err = api.events.SubscribeAcceptedLogs(interfaces.FilterQuery(crit), logs) if err != nil { - return "", err + return rpc.ID(""), err } } diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 15a8bc803b..e4bb6bf087 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -132,32 +132,37 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } } - var ( - beginPending = f.begin == rpc.PendingBlockNumber.Int64() - endPending = f.end == rpc.PendingBlockNumber.Int64() - endSet = f.end >= 0 - ) - - // special case for pending logs - if beginPending && !endPending { - return nil, errors.New("invalid block range") - } - // Short-cut if all we care about is pending logs - if beginPending && endPending { + if f.begin == rpc.PendingBlockNumber.Int64() { + if f.end != rpc.PendingBlockNumber.Int64() { + return nil, errors.New("invalid block range") + } + // There is no pending block, if the request specifies only the pending block, then return nil. return nil, nil } + // Figure out the limits of the filter range + // LatestBlockNumber is transformed into the last accepted block in HeaderByNumber + // so it is left in place here. + header, err := f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + if err != nil { + return nil, err + } + if header == nil { + return nil, nil + } + var ( + head = header.Number.Int64() + ) resolveSpecial := func(number int64) (int64, error) { var hdr *types.Header switch number { - case rpc.LatestBlockNumber.Int64(), rpc.PendingBlockNumber.Int64(): + case rpc.LatestBlockNumber.Int64(): + return head, nil + case rpc.PendingBlockNumber.Int64(): // we should return head here since we've already captured // that we need to get the pending logs in the pending boolean above - hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) - if hdr == nil { - return 0, errors.New("latest header not found") - } + return head, nil case rpc.FinalizedBlockNumber.Int64(): hdr, _ = f.sys.backend.HeaderByNumber(ctx, rpc.FinalizedBlockNumber) if hdr == nil { @@ -173,9 +178,6 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { } return hdr.Number.Int64(), nil } - - var err error - // range query need to resolve the special begin/end block number if f.begin, err = resolveSpecial(f.begin); err != nil { return nil, err } @@ -189,7 +191,7 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { // We error in this case to prevent a bad UX where the caller thinks there // are no logs from the specified beginning to end (when in reality there may // be some). - if endSet && f.end < f.begin { + if f.end < f.begin { return nil, fmt.Errorf("begin block %d is greater than end block %d", f.begin, f.end) } @@ -199,77 +201,43 @@ func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { return nil, fmt.Errorf("requested too many blocks from %d to %d, maximum is set to %d", f.begin, f.end, maxBlocks) } // Gather all indexed logs, and finish with non indexed ones - logChan, errChan := f.rangeLogsAsync(ctx) - var logs []*types.Log - for { - select { - case log := <-logChan: - logs = append(logs, log) - case err := <-errChan: - if err != nil { - // if an error occurs during extraction, we do return the extracted data - return logs, err - } - return logs, nil - } - } -} - -// rangeLogsAsync retrieves block-range logs that match the filter criteria asynchronously, -// it creates and returns two channels: one for delivering log data, and one for reporting errors. -func (f *Filter) rangeLogsAsync(ctx context.Context) (chan *types.Log, chan error) { var ( - logChan = make(chan *types.Log) - errChan = make(chan error) + logs []*types.Log + end = uint64(f.end) + size, sections = f.sys.backend.BloomStatus() ) - - go func() { - defer func() { - close(errChan) - close(logChan) - }() - - // Gather all indexed logs, and finish with non indexed ones - var ( - end = uint64(f.end) - size, sections = f.sys.backend.BloomStatus() - err error - ) - if indexed := sections * size; indexed > uint64(f.begin) { - if indexed > end { - indexed = end + 1 - } - if err = f.indexedLogs(ctx, indexed-1, logChan); err != nil { - errChan <- err - return - } + if indexed := sections * size; indexed > uint64(f.begin) { + if indexed > end { + logs, err = f.indexedLogs(ctx, end) + } else { + logs, err = f.indexedLogs(ctx, indexed-1) } - - if err := f.unindexedLogs(ctx, end, logChan); err != nil { - errChan <- err - return + if err != nil { + return logs, err } - - errChan <- nil - }() - - return logChan, errChan + } + rest, err := f.unindexedLogs(ctx, end) + logs = append(logs, rest...) + return logs, err } // indexedLogs returns the logs matching the filter criteria based on the bloom // bits indexed available locally or via the network. -func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error { +func (f *Filter) indexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) { // Create a matcher session and request servicing from the backend matches := make(chan uint64, 64) session, err := f.matcher.Start(ctx, uint64(f.begin), end, matches) if err != nil { - return err + return nil, err } defer session.Close() f.sys.backend.ServiceFilter(ctx, session) + // Iterate over the matches until exhausted or context closed + var logs []*types.Log + for { select { case number, ok := <-matches: @@ -279,50 +247,47 @@ func (f *Filter) indexedLogs(ctx context.Context, end uint64, logChan chan *type if err == nil { f.begin = int64(end) + 1 } - return err + return logs, err } f.begin = int64(number) + 1 // Retrieve the suggested block and pull any truly matching logs header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) if header == nil || err != nil { - return err + return logs, err } found, err := f.checkMatches(ctx, header) if err != nil { - return err - } - for _, log := range found { - logChan <- log + return logs, err } + logs = append(logs, found...) case <-ctx.Done(): - return ctx.Err() + return logs, ctx.Err() } } } // unindexedLogs returns the logs matching the filter criteria based on raw block // iteration and bloom matching. -func (f *Filter) unindexedLogs(ctx context.Context, end uint64, logChan chan *types.Log) error { +func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, error) { + var logs []*types.Log + for ; f.begin <= int64(end); f.begin++ { + if f.begin%10 == 0 && ctx.Err() != nil { + return logs, ctx.Err() + } header, err := f.sys.backend.HeaderByNumber(ctx, rpc.BlockNumber(f.begin)) if header == nil || err != nil { - return err + return logs, err } found, err := f.blockLogs(ctx, header) if err != nil { - return err - } - for _, log := range found { - select { - case logChan <- log: - case <-ctx.Done(): - return ctx.Err() - } + return logs, err } + logs = append(logs, found...) } - return nil + return logs, nil } // blockLogs returns the logs matching the filter criteria within a single block. @@ -343,25 +308,22 @@ func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*typ unfiltered := types.FlattenLogs(logsList) logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - if len(logs) == 0 { - return nil, nil - } - // Most backends will deliver un-derived logs, but check nevertheless. - if len(logs) > 0 && logs[0].TxHash != (common.Hash{}) { + if len(logs) > 0 { + // We have matching logs, check if we need to resolve full logs via the light client + if logs[0].TxHash == (common.Hash{}) { + receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash()) + if err != nil { + return nil, err + } + unfiltered = unfiltered[:0] + for _, receipt := range receipts { + unfiltered = append(unfiltered, receipt.Logs...) + } + logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) + } return logs, nil } - // We have matching logs, check if we need to resolve full logs via the light client - receipts, err := f.sys.backend.GetReceipts(ctx, header.Hash()) - if err != nil { - return nil, err - } - unfiltered = unfiltered[:0] - for _, receipt := range receipts { - unfiltered = append(unfiltered, receipt.Logs...) - } - logs = filterLogs(unfiltered, nil, nil, f.addresses, f.topics) - - return logs, nil + return nil, nil } func includes(addresses []common.Address, a common.Address) bool { diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index 6b4a2d582d..4ebd672d47 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -518,6 +518,15 @@ func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) { } } +func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) { + for _, f := range filters[LogsSubscription] { + matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics) + if len(matchedLogs) > 0 { + f.logs <- matchedLogs + } + } +} + func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent, accepted bool) { for _, f := range filters[PendingTransactionsSubscription] { f.txs <- ev.Txs @@ -569,7 +578,7 @@ func (es *EventSystem) eventLoop() { case ev := <-es.logsAcceptedCh: es.handleAcceptedLogs(index, ev) case ev := <-es.rmLogsCh: - es.handleLogs(index, ev.Logs) + es.handleRemovedLogs(index, ev) case ev := <-es.pendingLogsCh: es.handlePendingLogs(index, ev) case ev := <-es.chainCh: diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index c44cc6559b..d56b8c7540 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -28,21 +28,16 @@ package filters import ( "context" - "encoding/json" "math/big" - "strings" + "reflect" "testing" - "time" - "github.com/ava-labs/subnet-evm/accounts/abi" "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/core/vm" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/rpc" - "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" @@ -121,53 +116,10 @@ func BenchmarkFilters(b *testing.B) { func TestFilters(t *testing.T) { var ( - db = rawdb.NewMemoryDatabase() - _, sys = newTestFilterSystem(t, db, Config{}) - // Sender account + db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false) + _, sys = newTestFilterSystem(t, db, Config{}) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key1.PublicKey) - signer = types.NewLondonSigner(big.NewInt(1)) - // Logging contract - contract = common.Address{0xfe} - contract2 = common.Address{0xff} - abiStr = `[{"inputs":[],"name":"log0","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"}],"name":"log1","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"}],"name":"log2","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"}],"name":"log3","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"t1","type":"uint256"},{"internalType":"uint256","name":"t2","type":"uint256"},{"internalType":"uint256","name":"t3","type":"uint256"},{"internalType":"uint256","name":"t4","type":"uint256"}],"name":"log4","outputs":[],"stateMutability":"nonpayable","type":"function"}]` - /* - // SPDX-License-Identifier: GPL-3.0 - pragma solidity >=0.7.0 <0.9.0; - - contract Logger { - function log0() external { - assembly { - log0(0, 0) - } - } - - function log1(uint t1) external { - assembly { - log1(0, 0, t1) - } - } - - function log2(uint t1, uint t2) external { - assembly { - log2(0, 0, t1, t2) - } - } - - function log3(uint t1, uint t2, uint t3) external { - assembly { - log3(0, 0, t1, t2, t3) - } - } - - function log4(uint t1, uint t2, uint t3, uint t4) external { - assembly { - log4(0, 0, t1, t2, t3, t4) - } - } - } - */ - bytecode = common.FromHex("608060405234801561001057600080fd5b50600436106100575760003560e01c80630aa731851461005c5780632a4c08961461006657806378b9a1f314610082578063c670f8641461009e578063c683d6a3146100ba575b600080fd5b6100646100d6565b005b610080600480360381019061007b9190610143565b6100dc565b005b61009c60048036038101906100979190610196565b6100e8565b005b6100b860048036038101906100b391906101d6565b6100f2565b005b6100d460048036038101906100cf9190610203565b6100fa565b005b600080a0565b808284600080a3505050565b8082600080a25050565b80600080a150565b80828486600080a450505050565b600080fd5b6000819050919050565b6101208161010d565b811461012b57600080fd5b50565b60008135905061013d81610117565b92915050565b60008060006060848603121561015c5761015b610108565b5b600061016a8682870161012e565b935050602061017b8682870161012e565b925050604061018c8682870161012e565b9150509250925092565b600080604083850312156101ad576101ac610108565b5b60006101bb8582860161012e565b92505060206101cc8582860161012e565b9150509250929050565b6000602082840312156101ec576101eb610108565b5b60006101fa8482850161012e565b91505092915050565b6000806000806080858703121561021d5761021c610108565b5b600061022b8782880161012e565b945050602061023c8782880161012e565b935050604061024d8782880161012e565b925050606061025e8782880161012e565b9150509295919450925056fea264697066735822122073a4b156f487e59970dc1ef449cc0d51467268f676033a17188edafcee861f9864736f6c63430008110033") hash1 = common.BytesToHash([]byte("topic1")) hash2 = common.BytesToHash([]byte("topic2")) @@ -175,213 +127,134 @@ func TestFilters(t *testing.T) { hash4 = common.BytesToHash([]byte("topic4")) gspec = &core.Genesis{ - Config: params.TestChainConfig, - Alloc: core.GenesisAlloc{ - addr: {Balance: big.NewInt(0).Mul(big.NewInt(100), big.NewInt(params.Ether))}, - contract: {Balance: big.NewInt(0), Code: bytecode}, - contract2: {Balance: big.NewInt(0), Code: bytecode}, - }, + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(1000000)}}, BaseFee: big.NewInt(1), } ) + defer db.Close() - contractABI, err := abi.JSON(strings.NewReader(abiStr)) - if err != nil { - t.Fatal(err) - } - - // Hack: GenerateChainWithGenesis creates a new db. - // Commit the genesis manually and use GenerateChain. - _, err = gspec.Commit(db, trie.NewDatabase(db)) - if err != nil { - t.Fatal(err) - } - //_, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 1000, 10, func(i int, gen *core.BlockGen) { - chain, _, err := core.GenerateChain(gspec.Config, gspec.ToBlock(), dummy.NewFaker(), db, 1000, 10, func(i int, gen *core.BlockGen) { + _, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 1000, 10, func(i int, gen *core.BlockGen) { switch i { case 1: - data, err := contractABI.Pack("log1", hash1.Big()) - if err != nil { - t.Fatal(err) + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + { + Address: addr, + Topics: []common.Hash{hash1}, + }, } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 0, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract, - Data: data, - }), signer, key1) - gen.AddTx(tx) - tx2, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 1, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract2, - Data: data, - }), signer, key1) - gen.AddTx(tx2) + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, gen.BaseFee(), nil)) case 2: - data, err := contractABI.Pack("log2", hash2.Big(), hash1.Big()) - if err != nil { - t.Fatal(err) + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + { + Address: addr, + Topics: []common.Hash{hash2}, + }, } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 2, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract, - Data: data, - }), signer, key1) - gen.AddTx(tx) + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, gen.BaseFee(), nil)) + case 998: - data, err := contractABI.Pack("log1", hash3.Big()) - if err != nil { - t.Fatal(err) + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + { + Address: addr, + Topics: []common.Hash{hash3}, + }, } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 3, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract2, - Data: data, - }), signer, key1) - gen.AddTx(tx) + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(998, common.HexToAddress("0x998"), big.NewInt(998), 998, gen.BaseFee(), nil)) case 999: - data, err := contractABI.Pack("log1", hash4.Big()) - if err != nil { - t.Fatal(err) + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + { + Address: addr, + Topics: []common.Hash{hash4}, + }, } - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{ - Nonce: 4, - GasPrice: gen.BaseFee(), - Gas: 30000, - To: &contract, - Data: data, - }), signer, key1) - gen.AddTx(tx) + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) } }) require.NoError(t, err) - bc, err := core.NewBlockChain(db, core.DefaultCacheConfig, gspec, dummy.NewCoinbaseFaker(), vm.Config{}, gspec.ToBlock().Hash(), false) - if err != nil { - t.Fatal(err) - } - _, err = bc.InsertChain(chain) - if err != nil { - t.Fatal(err) + // The test txs are not properly signed, can't simply create a chain + // and then import blocks. TODO(rjl493456442) try to get rid of the + // manual database writes. + gspec.MustCommit(db) + for i, block := range chain { + rawdb.WriteBlock(db, block) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) + rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i]) } // Set block 998 as Finalized (-3) - // bc.SetFinalized(chain[998].Header()) + // rawdb.WriteFinalizedBlockHash(db, chain[998].Hash()) err = rawdb.WriteAcceptorTip(db, chain[998].Hash()) require.NoError(t, err) + filter := sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) + logs, _ := filter.Logs(context.Background()) + if len(logs) != 4 { + t.Error("expected 4 log, got", len(logs)) + } + for i, tc := range []struct { - f *Filter - want string - err string + f *Filter + wantHashes []common.Hash }{ { - f: sys.NewBlockFilter(chain[2].Hash(), []common.Address{contract}, nil), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, + sys.NewRangeFilter(900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}), + []common.Hash{hash3}, }, { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{contract}, [][]common.Hash{{hash1, hash2, hash3, hash4}}), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, + sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{addr}, [][]common.Hash{{hash3}}), + []common.Hash{hash3}, }, { - f: sys.NewRangeFilter(900, 999, []common.Address{contract}, [][]common.Hash{{hash3}}), + sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}), + []common.Hash{hash1, hash2}, }, { - f: sys.NewRangeFilter(990, int64(rpc.LatestBlockNumber), []common.Address{contract2}, [][]common.Hash{{hash3}}), - want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`, + sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}), + nil, }, { - f: sys.NewRangeFilter(1, 10, []common.Address{contract}, [][]common.Hash{{hash2}, {hash1}}), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, + sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil), + nil, }, { - f: sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xa8028c655b6423204c8edfbc339f57b042d6bec2b6a61145d76b7c08b4cccd42","transactionIndex":"0x0","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x0","removed":false},{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x2","transactionHash":"0xdba3e2ea9a7d690b722d70ee605fd67ba4c00d1d3aecd5cf187a7b92ad8eb3df","transactionIndex":"0x1","blockHash":"0x24417bb49ce44cfad65da68f33b510bf2a129c0d89ccf06acb6958b8585ccf34","logIndex":"0x1","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696332","0x0000000000000000000000000000000000000000000000000000746f70696331"],"data":"0x","blockNumber":"0x3","transactionHash":"0xdefe471992a07a02acdfbe33edaae22fbb86d7d3cec3f1b8e4e77702fb3acc1d","transactionIndex":"0x0","blockHash":"0x7a7556792ca7d37882882e2b001fe14833eaf81c2c7f865c9c771ec37a024f6b","logIndex":"0x0","removed":false}]`, + sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}), + nil, }, { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}}), + sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash4}, }, { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), []common.Address{common.BytesToAddress([]byte("failmenow"))}, nil), + sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), []common.Hash{hash3, hash4}, }, { - f: sys.NewRangeFilter(0, int64(rpc.LatestBlockNumber), nil, [][]common.Hash{{common.BytesToHash([]byte("fail"))}, {hash1}}), + sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), []common.Hash{hash3}, }, { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, + sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), nil, }, { - f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), - want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false},{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, + sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), nil, }, { - f: sys.NewRangeFilter(int64(rpc.FinalizedBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), - want: `[{"address":"0xff00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696333"],"data":"0x","blockNumber":"0x3e7","transactionHash":"0x53e3675800c6908424b61b35a44e51ca4c73ca603e58a65b32c67968b4f42200","transactionIndex":"0x0","blockHash":"0x2e4620a2b426b0612ec6cad9603f466723edaed87f98c9137405dd4f7a2409ff","logIndex":"0x0","removed":false}]`, + sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), nil, }, { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.FinalizedBlockNumber), nil, nil), + sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), nil, }, { - f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), - err: "safe header not found", - }, { - f: sys.NewRangeFilter(int64(rpc.SafeBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), - err: "safe header not found", - }, { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.SafeBlockNumber), nil, nil), - err: "safe header not found", - }, { - f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), - }, { - f: sys.NewRangeFilter(int64(rpc.LatestBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), - want: `[{"address":"0xfe00000000000000000000000000000000000000","topics":["0x0000000000000000000000000000000000000000000000000000746f70696334"],"data":"0x","blockNumber":"0x3e8","transactionHash":"0x9a87842100a638dfa5da8842b4beda691d2fd77b0c84b57f24ecfa9fb208f747","transactionIndex":"0x0","blockHash":"0xb360bad5265261c075ece02d3bf0e39498a6a76310482cdfd90588748e6c5ee0","logIndex":"0x0","removed":false}]`, - }, { - f: sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.LatestBlockNumber), nil, nil), - err: "invalid block range", + sys.NewRangeFilter(int64(rpc.PendingBlockNumber), int64(rpc.PendingBlockNumber), nil, nil), nil, }, } { - logs, err := tc.f.Logs(context.Background()) - if err == nil && tc.err != "" { - t.Fatalf("test %d, expected error %q, got nil", i, tc.err) - } else if err != nil && err.Error() != tc.err { - t.Fatalf("test %d, expected error %q, got %q", i, tc.err, err.Error()) + logs, _ := tc.f.Logs(context.Background()) + var haveHashes []common.Hash + for _, l := range logs { + haveHashes = append(haveHashes, l.Topics[0]) } - if tc.want == "" && len(logs) == 0 { - continue + if have, want := len(haveHashes), len(tc.wantHashes); have != want { + t.Fatalf("test %d, have %d logs, want %d", i, have, want) } - tc.want = patchWant(t, tc.want, chain) - have, err := json.Marshal(logs) - if err != nil { - t.Fatal(err) - } - if string(have) != tc.want { - t.Fatalf("test %d, have:\n%s\nwant:\n%s", i, have, tc.want) - } - } - - t.Run("timeout", func(t *testing.T) { - f := sys.NewRangeFilter(0, -1, nil, nil) - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(-time.Hour)) - defer cancel() - _, err := f.Logs(ctx) - if err == nil { - t.Fatal("expected error") + if len(haveHashes) == 0 { + continue } - if err != context.DeadlineExceeded { - t.Fatalf("expected context.DeadlineExceeded, got %v", err) + if !reflect.DeepEqual(tc.wantHashes, haveHashes) { + t.Fatalf("test %d, have %v want %v", i, haveHashes, tc.wantHashes) } - }) -} - -func patchWant(t *testing.T, want string, blocks []*types.Block) string { - var logs []*types.Log - err := json.Unmarshal([]byte(want), &logs) - if err != nil { - t.Fatal(err) - } - - for _, log := range logs { - blockIndex := log.BlockNumber - 1 - log.BlockHash = blocks[blockIndex].Hash() - log.TxHash = blocks[blockIndex].Transactions()[log.TxIndex].Hash() - } - result, err := json.Marshal(logs) - if err != nil { - t.Fatal(err) } - return string(result) } diff --git a/eth/gasprice/feehistory.go b/eth/gasprice/feehistory.go index 143adac2d4..81fb379c7a 100644 --- a/eth/gasprice/feehistory.go +++ b/eth/gasprice/feehistory.go @@ -31,7 +31,7 @@ import ( "errors" "fmt" "math/big" - "slices" + "sort" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/rpc" @@ -46,16 +46,26 @@ var ( ) // txGasAndReward is sorted in ascending order based on reward -type txGasAndReward struct { - gasUsed uint64 - reward *big.Int -} +type ( + txGasAndReward struct { + gasUsed uint64 + reward *big.Int + } + sortGasAndReward []txGasAndReward + slimBlock struct { + GasUsed uint64 + GasLimit uint64 + BaseFee *big.Int + Txs []txGasAndReward + } +) -type slimBlock struct { - GasUsed uint64 - GasLimit uint64 - BaseFee *big.Int - Txs []txGasAndReward +func (s sortGasAndReward) Len() int { return len(s) } +func (s sortGasAndReward) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortGasAndReward) Less(i, j int) bool { + return s[i].reward.Cmp(s[j].reward) < 0 } // processBlock prepares a [slimBlock] from a retrieved block and list of @@ -67,14 +77,12 @@ func processBlock(block *types.Block, receipts types.Receipts) *slimBlock { } sb.GasUsed = block.GasUsed() sb.GasLimit = block.GasLimit() - sorter := make([]txGasAndReward, len(block.Transactions())) + sorter := make(sortGasAndReward, len(block.Transactions())) for i, tx := range block.Transactions() { reward, _ := tx.EffectiveGasTip(sb.BaseFee) sorter[i] = txGasAndReward{gasUsed: receipts[i].GasUsed, reward: reward} } - slices.SortStableFunc(sorter, func(a, b txGasAndReward) int { - return a.reward.Cmp(b.reward) - }) + sort.Stable(sorter) sb.Txs = sorter return &sb } diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 899c2eba0e..79a33f807b 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -30,6 +30,7 @@ import ( "context" "fmt" "math/big" + "sort" "sync" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -45,7 +46,6 @@ import ( "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" - "golang.org/x/exp/slices" ) const ( @@ -396,12 +396,12 @@ func (oracle *Oracle) suggestDynamicFees(ctx context.Context) (*big.Int, *big.In price := lastPrice baseFee := lastBaseFee if len(tipResults) > 0 { - slices.SortFunc(tipResults, func(a, b *big.Int) int { return a.Cmp(b) }) + sort.Sort(bigIntArray(tipResults)) price = tipResults[(len(tipResults)-1)*oracle.percentile/100] } if len(baseFeeResults) > 0 { - slices.SortFunc(baseFeeResults, func(a, b *big.Int) int { return a.Cmp(b) }) + sort.Sort(bigIntArray(baseFeeResults)) baseFee = baseFeeResults[(len(baseFeeResults)-1)*oracle.percentile/100] } if price.Cmp(oracle.maxPrice) > 0 { @@ -434,3 +434,9 @@ func (oracle *Oracle) getFeeInfo(ctx context.Context, number uint64) (*feeInfo, } return oracle.feeInfoProvider.addHeader(ctx, header) } + +type bigIntArray []*big.Int + +func (s bigIntArray) Len() int { return len(s) } +func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 } +func (s bigIntArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/eth/state_accessor.go b/eth/state_accessor.go index e435fdb34f..302a7e10de 100644 --- a/eth/state_accessor.go +++ b/eth/state_accessor.go @@ -176,7 +176,7 @@ func (eth *Ethereum) StateAtBlock(ctx context.Context, block *types.Block, reexe return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) } // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(current.NumberU64(), eth.blockchain.Config().IsEIP158(current.Number()), true) + root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number()), true) if err != nil { return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", current.NumberU64(), current.Root().Hex(), err) diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go index 1b81756747..10a6ca4d1d 100644 --- a/eth/tracers/api_test.go +++ b/eth/tracers/api_test.go @@ -27,6 +27,7 @@ package tracers import ( + "bytes" "context" "crypto/ecdsa" "encoding/json" @@ -34,6 +35,7 @@ import ( "fmt" "math/big" "reflect" + "sort" "sync/atomic" "testing" @@ -52,7 +54,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" - "golang.org/x/exp/slices" ) var ( @@ -820,13 +821,19 @@ type Account struct { addr common.Address } -func newAccounts(n int) (accounts []Account) { +type Accounts []Account + +func (a Accounts) Len() int { return len(a) } +func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 } + +func newAccounts(n int) (accounts Accounts) { for i := 0; i < n; i++ { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) accounts = append(accounts, Account{key: key, addr: addr}) } - slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) }) + sort.Sort(accounts) return accounts } diff --git a/eth/tracers/internal/tracetest/calltrace_test.go b/eth/tracers/internal/tracetest/calltrace_test.go index b89a50a364..7e259ea054 100644 --- a/eth/tracers/internal/tracetest/calltrace_test.go +++ b/eth/tracers/internal/tracetest/calltrace_test.go @@ -382,6 +382,7 @@ func TestInternals(t *testing.T) { Balance: big.NewInt(500000000000000), }, }, false) + evm := vm.NewEVM(context, txContext, statedb, params.TestPreSubnetEVMConfig, vm.Config{Tracer: tc.tracer}) msg := &core.Message{ To: &to, diff --git a/eth/tracers/internal/tracetest/prestate_test.go b/eth/tracers/internal/tracetest/prestate_test.go index 7c296f3e6a..4e8316cd58 100644 --- a/eth/tracers/internal/tracetest/prestate_test.go +++ b/eth/tracers/internal/tracetest/prestate_test.go @@ -102,9 +102,10 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) { } // Configure a blockchain with the given prestate var ( - signer = types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number)), uint64(test.Context.Time)) - origin, _ = signer.Sender(tx) - txContext = vm.TxContext{ + blockNumber = new(big.Int).SetUint64(uint64(test.Context.Number)) + signer = types.MakeSigner(test.Genesis.Config, blockNumber, uint64(test.Context.Time)) + origin, _ = signer.Sender(tx) + txContext = vm.TxContext{ Origin: origin, GasPrice: tx.GasPrice(), } @@ -112,7 +113,7 @@ func testPrestateDiffTracer(tracerName string, dirPath string, t *testing.T) { CanTransfer: core.CanTransfer, Transfer: core.Transfer, Coinbase: test.Context.Miner, - BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), + BlockNumber: blockNumber, Time: uint64(test.Context.Time), Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), diff --git a/eth/tracers/js/goja.go b/eth/tracers/js/goja.go index cb40e1bec8..9c2bdac9a4 100644 --- a/eth/tracers/js/goja.go +++ b/eth/tracers/js/goja.go @@ -96,7 +96,7 @@ func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString b if !obj.Get("constructor").SameAs(bufType) { break } - b := obj.Export().([]byte) + b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes() return b, nil } return nil, errors.New("invalid buffer type") @@ -255,7 +255,7 @@ func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Addr t.ctx["value"] = valueBig t.ctx["block"] = t.vm.ToValue(env.Context.BlockNumber.Uint64()) // Update list of precompiles based on current block - rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Time) + rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Time) t.activePrecompiles = vm.ActivePrecompiles(rules) } diff --git a/eth/tracers/logger/logger.go b/eth/tracers/logger/logger.go index 27d726ea97..83e3d631ab 100644 --- a/eth/tracers/logger/logger.go +++ b/eth/tracers/logger/logger.go @@ -157,7 +157,6 @@ func (l *StructLogger) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, s if l.cfg.Limit != 0 && l.cfg.Limit <= len(l.logs) { return } - memory := scope.Memory stack := scope.Stack contract := scope.Contract @@ -419,7 +418,6 @@ type StructLogRes struct { Depth int `json:"depth"` Error string `json:"error,omitempty"` Stack *[]string `json:"stack,omitempty"` - ReturnData string `json:"returnData,omitempty"` Memory *[]string `json:"memory,omitempty"` Storage *map[string]string `json:"storage,omitempty"` RefundCounter uint64 `json:"refund,omitempty"` @@ -445,9 +443,6 @@ func formatLogs(logs []StructLog) []StructLogRes { } formatted[index].Stack = &stack } - if trace.ReturnData != nil && len(trace.ReturnData) > 0 { - formatted[index].ReturnData = hexutil.Bytes(trace.ReturnData).String() - } if trace.Memory != nil { memory := make([]string, 0, (len(trace.Memory)+31)/32) for i := 0; i+32 <= len(trace.Memory); i += 32 { diff --git a/eth/tracers/native/4byte.go b/eth/tracers/native/4byte.go index 50a4b23c39..14a39356f0 100644 --- a/eth/tracers/native/4byte.go +++ b/eth/tracers/native/4byte.go @@ -91,7 +91,7 @@ func (t *fourByteTracer) store(id []byte, size int) { // CaptureStart implements the EVMLogger interface to initialize the tracing operation. func (t *fourByteTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { // Update list of precompiles based on current block - rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Time) + rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Time) t.activePrecompiles = vm.ActivePrecompiles(rules) // Save the outer calldata also diff --git a/eth/tracers/native/call_flat.go b/eth/tracers/native/call_flat.go index c83e24a25c..e67bed2040 100644 --- a/eth/tracers/native/call_flat.go +++ b/eth/tracers/native/call_flat.go @@ -158,7 +158,7 @@ func newFlatCallTracer(ctx *tracers.Context, cfg json.RawMessage) (tracers.Trace func (t *flatCallTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { t.tracer.CaptureStart(env, from, to, create, input, gas, value) // Update list of precompiles based on current block - rules := env.ChainConfig().Rules(env.Context.BlockNumber, env.Context.Time) + rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Timestamp()) t.activePrecompiles = vm.ActivePrecompiles(rules) } @@ -259,7 +259,7 @@ func flatFromNested(input *callFrame, traceAddress []int, convertErrs bool, ctx case vm.CREATE, vm.CREATE2: frame = newFlatCreate(input) case vm.SELFDESTRUCT: - frame = newFlatSelfdestruct(input) + frame = newFlatSuicide(input) case vm.CALL, vm.STATICCALL, vm.CALLCODE, vm.DELEGATECALL: frame = newFlatCall(input) default: @@ -341,7 +341,7 @@ func newFlatCall(input *callFrame) *flatCallFrame { } } -func newFlatSelfdestruct(input *callFrame) *flatCallFrame { +func newFlatSuicide(input *callFrame) *flatCallFrame { return &flatCallFrame{ Type: "suicide", Action: flatCallAction{ diff --git a/ethclient/subnetevmclient/subnet_evm_client.go b/ethclient/subnetevmclient/subnet_evm_client.go index 1ce9a50b3f..cf4ed51773 100644 --- a/ethclient/subnetevmclient/subnet_evm_client.go +++ b/ethclient/subnetevmclient/subnet_evm_client.go @@ -105,11 +105,6 @@ func (ec *Client) GetProof(ctx context.Context, account common.Address, keys []s StorageProof []storageResult `json:"storageProof"` } - // Avoid keys being 'null'. - if keys == nil { - keys = []string{} - } - var res accountResult err := ec.c.CallContext(ctx, &res, "eth_getProof", account, keys, ethclient.ToBlockNumArg(blockNumber)) // Turn hexutils back to normal datatypes diff --git a/go.mod b/go.mod index 158237581c..4e84054be0 100644 --- a/go.mod +++ b/go.mod @@ -10,20 +10,18 @@ require ( github.com/deckarep/golang-set/v2 v2.1.0 github.com/docker/docker v1.6.2 github.com/dop251/goja v0.0.0-20230605162241-28ee0ee714f3 - github.com/ethereum/go-ethereum v1.12.2 + github.com/ethereum/go-ethereum v1.12.0 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 github.com/go-cmd/cmd v1.4.1 - github.com/golang/protobuf v1.5.3 github.com/google/uuid v1.6.0 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 github.com/hashicorp/go-bexpr v0.1.10 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d - github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 github.com/holiman/bloomfilter/v2 v2.0.3 - github.com/holiman/uint256 v1.2.3 + github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c github.com/kylelemons/godebug v1.1.0 github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty v0.0.16 @@ -39,15 +37,14 @@ require ( github.com/status-im/keycard-go v0.2.0 github.com/stretchr/testify v1.8.4 github.com/tyler-smith/go-bip39 v1.1.0 - github.com/urfave/cli/v2 v2.24.1 + github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa go.uber.org/goleak v1.3.0 go.uber.org/mock v0.4.0 golang.org/x/crypto v0.18.0 - golang.org/x/exp v0.0.0-20231127185646-65229373498e golang.org/x/sync v0.6.0 golang.org/x/sys v0.16.0 golang.org/x/text v0.14.0 - golang.org/x/time v0.3.0 + golang.org/x/time v0.1.0 google.golang.org/protobuf v1.32.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) @@ -56,9 +53,8 @@ require ( github.com/BurntSushi/toml v1.3.2 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/ava-labs/coreth v0.13.2-0.20240304213436-8afbf2d68461 // indirect + github.com/ava-labs/coreth v0.13.1-rc.5 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bits-and-blooms/bitset v1.7.0 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/btcsuite/btcd/btcutil v1.1.3 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect @@ -67,13 +63,9 @@ require ( github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 // indirect github.com/cockroachdb/redact v1.1.3 // indirect - github.com/consensys/bavard v0.1.13 // indirect - github.com/consensys/gnark-crypto v0.12.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect - github.com/ethereum/c-kzg-4844 v0.3.1 // indirect github.com/getsentry/sentry-go v0.18.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -82,6 +74,7 @@ require ( github.com/go-stack/stack v1.8.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.2 // indirect github.com/google/go-cmp v0.6.0 // indirect @@ -102,7 +95,6 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect github.com/pelletier/go-toml v1.9.5 // indirect @@ -134,6 +126,7 @@ require ( go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect + golang.org/x/exp v0.0.0-20231127185646-65229373498e // indirect golang.org/x/net v0.20.0 // indirect golang.org/x/term v0.16.0 // indirect golang.org/x/tools v0.16.0 // indirect @@ -144,5 +137,4 @@ require ( gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/go.sum b/go.sum index 624152a4e4..40cbf87166 100644 --- a/go.sum +++ b/go.sum @@ -58,13 +58,11 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/ava-labs/avalanchego v1.11.2 h1:8iodZ+RjqpRwHdiXPPtvaNt72qravge7voGzw3yPRzg= github.com/ava-labs/avalanchego v1.11.2/go.mod h1:oTVnF9idL57J4LM/6RByTmKhI4QvV6OCnF99ysyBljE= -github.com/ava-labs/coreth v0.13.2-0.20240304213436-8afbf2d68461 h1:SIwGF3eVEwmexLm7is/MvG7W5sbmpGXaUT6RfUPP3jw= -github.com/ava-labs/coreth v0.13.2-0.20240304213436-8afbf2d68461/go.mod h1:v24MTMbxFSvyM7YeQFyWiXjIzVo2+UVs7tgH7xrByew= +github.com/ava-labs/coreth v0.13.1-rc.5 h1:YcTs9nryZLkf4gPmMyFx1TREFpDTPdg/VCNGGHSF2TY= +github.com/ava-labs/coreth v0.13.1-rc.5/go.mod h1:4y1igTe/sFOIrpAtXoY+AdmfftNHrmrhBBRVfGCAPcw= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo= -github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= @@ -119,18 +117,12 @@ github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lg github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/consensys/bavard v0.1.13 h1:oLhMLOFGTLdlda/kma4VOJazblc7IM5y5QPd2A/YjhQ= -github.com/consensys/bavard v0.1.13/go.mod h1:9ItSMtA/dXMAiL7BG6bqW2m3NdSEObYWoH223nGHukI= -github.com/consensys/gnark-crypto v0.12.1 h1:lHH39WuuFgVHONRl3J0LRBtuYdQTumFSDtJF7HpyG8M= -github.com/consensys/gnark-crypto v0.12.1/go.mod h1:v2Gy7L/4ZRosZ7Ivs+9SfUDr0f5UlG+EM5t7MPHiLuY= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A= -github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -166,10 +158,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg= -github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= -github.com/ethereum/go-ethereum v1.12.2 h1:eGHJ4ij7oyVqUQn48LBz3B7pvQ8sV0wGJiIE6gDq/6Y= -github.com/ethereum/go-ethereum v1.12.2/go.mod h1:1cRAEV+rp/xX0zraSCBnu9Py3HQ+geRMj3HdR+k0wfI= +github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0= +github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= @@ -299,7 +289,6 @@ github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8q github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -327,12 +316,10 @@ github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuW github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= -github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= -github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o= -github.com/holiman/uint256 v1.2.3/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= +github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c h1:DZfsyhDK1hnSS5lH8l+JggqzEleHteTYfutAiVlSUM8= +github.com/holiman/uint256 v1.2.2-0.20230321075855-87b91420868c/go.mod h1:SC8Ryt4n+UBbPbIBKaG9zbbDlp4jOru9xFZmPzLUTxw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= @@ -419,9 +406,6 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -555,8 +539,8 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/cli/v2 v2.24.1 h1:/QYYr7g0EhwXEML8jO+8OYt5trPnLHS0p3mrgExJ5NU= -github.com/urfave/cli/v2 v2.24.1/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa h1:5SqCsI/2Qya2bCzK15ozrqo2sZxkh0FHynJZOTVoV6Q= +github.com/urfave/cli/v2 v2.17.2-0.20221006022127-8f469abc00aa/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= @@ -821,8 +805,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1029,5 +1013,3 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= diff --git a/internal/blocktest/test_hash.go b/internal/blocktest/test_hash.go deleted file mode 100644 index 014e9ff4b0..0000000000 --- a/internal/blocktest/test_hash.go +++ /dev/null @@ -1,69 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package utesting provides a standalone replacement for package testing. -// -// This package exists because package testing cannot easily be embedded into a -// standalone go program. It provides an API that mirrors the standard library -// testing API. - -package blocktest - -import ( - "hash" - - "github.com/ethereum/go-ethereum/common" - "golang.org/x/crypto/sha3" -) - -// testHasher is the helper tool for transaction/receipt list hashing. -// The original hasher is trie, in order to get rid of import cycle, -// use the testing hasher instead. -type testHasher struct { - hasher hash.Hash -} - -// NewHasher returns a new testHasher instance. -func NewHasher() *testHasher { - return &testHasher{hasher: sha3.NewLegacyKeccak256()} -} - -// Reset resets the hash state. -func (h *testHasher) Reset() { - h.hasher.Reset() -} - -// Update updates the hash state with the given key and value. -func (h *testHasher) Update(key, val []byte) error { - h.hasher.Write(key) - h.hasher.Write(val) - return nil -} - -// Hash returns the hash value. -func (h *testHasher) Hash() common.Hash { - return common.BytesToHash(h.hasher.Sum(nil)) -} diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go index 9856302647..442ed99197 100644 --- a/internal/cmdtest/test_cmd.go +++ b/internal/cmdtest/test_cmd.go @@ -65,13 +65,13 @@ type TestCmd struct { Err error } -var id atomic.Int32 +var id int32 // Run exec's the current binary using name as argv[0] which will trigger the // reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) func (tt *TestCmd) Run(name string, args ...string) { - id.Add(1) - tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id.Load())} + id := atomic.AddInt32(&id, 1) + tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} tt.cmd = &exec.Cmd{ Path: reexec.Self(), Args: append([]string{name}, args...), diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 1453b92049..0bd4675fa9 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -29,7 +29,6 @@ package debug import ( "fmt" "io" - "net" "net/http" _ "net/http/pprof" "os" @@ -317,7 +316,7 @@ func Setup(ctx *cli.Context) error { port := ctx.Int(pprofPortFlag.Name) - address := net.JoinHostPort(listenHost, fmt.Sprintf("%d", port)) + address := fmt.Sprintf("%s:%d", listenHost, port) StartPProf(address) } if len(logFile) > 0 || rotation { diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 37e2be6ae3..c0f3d7a69d 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -691,78 +691,46 @@ type StorageResult struct { Proof []string `json:"proof"` } -// proofList implements ethdb.KeyValueWriter and collects the proofs as -// hex-strings for delivery to rpc-caller. -type proofList []string - -func (n *proofList) Put(key []byte, value []byte) error { - *n = append(*n, hexutil.Encode(value)) - return nil -} - -func (n *proofList) Delete(key []byte) error { - panic("not supported") -} - // GetProof returns the Merkle-proof for a given account and optionally some storage keys. func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNrOrHash rpc.BlockNumberOrHash) (*AccountResult, error) { - var ( - keys = make([]common.Hash, len(storageKeys)) - keyLengths = make([]int, len(storageKeys)) - storageProof = make([]StorageResult, len(storageKeys)) - storageTrie state.Trie - storageHash = types.EmptyRootHash - codeHash = types.EmptyCodeHash - ) - // Deserialize all keys. This prevents state access on invalid input. - for i, hexKey := range storageKeys { - var err error - keys[i], keyLengths[i], err = decodeHash(hexKey) - if err != nil { - return nil, err - } - } - state, _, err := s.b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) if state == nil || err != nil { return nil, err } - if storageTrie, err = state.StorageTrie(address); err != nil { + storageTrie, err := state.StorageTrie(address) + if err != nil { return nil, err } + storageHash := types.EmptyRootHash + codeHash := state.GetCodeHash(address) + storageProof := make([]StorageResult, len(storageKeys)) - // If we have a storageTrie, the account exists and we must update - // the storage root hash and the code hash. + // if we have a storageTrie, (which means the account exists), we can update the storagehash if storageTrie != nil { storageHash = storageTrie.Hash() - codeHash = state.GetCodeHash(address) - } - // Create the proofs for the storageKeys. - for i, key := range keys { - // Output key encoding is a bit special: if the input was a 32-byte hash, it is - // returned as such. Otherwise, we apply the QUANTITY encoding mandated by the - // JSON-RPC spec for getProof. This behavior exists to preserve backwards - // compatibility with older client versions. - var outputKey string - if keyLengths[i] != 32 { - outputKey = hexutil.EncodeBig(key.Big()) - } else { - outputKey = hexutil.Encode(key[:]) - } + } else { + // no storageTrie means the account does not exist, so the codeHash is the hash of an empty bytearray. + codeHash = crypto.Keccak256Hash(nil) + } - if storageTrie == nil { - storageProof[i] = StorageResult{outputKey, &hexutil.Big{}, []string{}} - continue - } - var proof proofList - if err := storageTrie.Prove(crypto.Keccak256(key.Bytes()), &proof); err != nil { + // create the proof for the storageKeys + for i, hexKey := range storageKeys { + key, err := decodeHash(hexKey) + if err != nil { return nil, err } - value := (*hexutil.Big)(state.GetState(address, key).Big()) - storageProof[i] = StorageResult{outputKey, value, proof} + if storageTrie != nil { + proof, storageError := state.GetStorageProof(address, key) + if storageError != nil { + return nil, storageError + } + storageProof[i] = StorageResult{hexKey, (*hexutil.Big)(state.GetState(address, key).Big()), toHexSlice(proof)} + } else { + storageProof[i] = StorageResult{hexKey, &hexutil.Big{}, []string{}} + } } - // Create the accountProof. + // create the accountProof accountProof, proofErr := state.GetProof(address) if proofErr != nil { return nil, proofErr @@ -781,7 +749,7 @@ func (s *BlockChainAPI) GetProof(ctx context.Context, address common.Address, st // decodeHash parses a hex-encoded 32-byte hash. The input may optionally // be prefixed by 0x and can have a byte length up to 32. -func decodeHash(s string) (h common.Hash, inputLength int, err error) { +func decodeHash(s string) (common.Hash, error) { if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { s = s[2:] } @@ -790,19 +758,17 @@ func decodeHash(s string) (h common.Hash, inputLength int, err error) { } b, err := hex.DecodeString(s) if err != nil { - return common.Hash{}, 0, errors.New("hex string invalid") + return common.Hash{}, errors.New("hex string invalid") } if len(b) > 32 { - return common.Hash{}, len(b), errors.New("hex string too long, want at most 32 bytes") + return common.Hash{}, errors.New("hex string too long, want at most 32 bytes") } - return common.BytesToHash(b), len(b), nil + return common.BytesToHash(b), nil } // GetHeaderByNumber returns the requested canonical block header. -// - When blockNr is -1 the chain pending header is returned. -// - When blockNr is -2 the chain latest header is returned. -// - When blockNr is -3 the chain finalized header is returned. -// - When blockNr is -4 the chain safe header is returned. +// * When blockNr is -1 the chain head is returned. +// * When blockNr is -2 the pending chain head is returned. func (s *BlockChainAPI) GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (map[string]interface{}, error) { header, err := s.b.HeaderByNumber(ctx, number) if header != nil && err == nil { @@ -829,10 +795,8 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m } // GetBlockByNumber returns the requested canonical block. -// - When blockNr is -1 the chain pending block is returned. -// - When blockNr is -2 the chain latest block is returned. -// - When blockNr is -3 the chain finalized block is returned. -// - When blockNr is -4 the chain safe block is returned. +// - When blockNr is -1 the chain head is returned. +// - When blockNr is -2 the pending chain head is returned. // - When fullTx is true all transactions in the block are returned, otherwise // only the transaction hash is returned. func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { @@ -927,7 +891,7 @@ func (s *BlockChainAPI) GetStorageAt(ctx context.Context, address common.Address if state == nil || err != nil { return nil, err } - key, _, err := decodeHash(hexKey) + key, err := decodeHash(hexKey) if err != nil { return nil, fmt.Errorf("unable to decode storage key: %s", err) } @@ -1086,10 +1050,33 @@ func (context *ChainContext) GetHeader(hash common.Hash, number uint64) *types.H return header } -func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.StateDB, header *types.Header, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { +func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { + defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) + + state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) + if state == nil || err != nil { + return nil, err + } if err := overrides.Apply(state); err != nil { return nil, err } + // If the request is for the pending block, override the block timestamp, number, and estimated + // base fee, so that the check runs as if it were run on a newly generated block. + if blkNumber, isNum := blockNrOrHash.Number(); isNum && blkNumber == rpc.PendingBlockNumber { + // Override header with a copy to ensure the original header is not modified + header = types.CopyHeader(header) + // Grab the hash of the unmodified header, so that the modified header can point to the + // prior block as its parent. + parentHash := header.Hash() + header.Time = uint64(time.Now().Unix()) + header.ParentHash = parentHash + header.Number = new(big.Int).Add(header.Number, big.NewInt(1)) + estimatedBaseFee, err := b.EstimateBaseFee(ctx) + if err != nil { + return nil, err + } + header.BaseFee = estimatedBaseFee + } // Setup context so it may be cancelled the call has completed // or, in case of unmetered gas, setup a context with a timeout. @@ -1138,35 +1125,6 @@ func doCall(ctx context.Context, b Backend, args TransactionArgs, state *state.S return result, nil } -func DoCall(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, blockOverrides *BlockOverrides, timeout time.Duration, globalGasCap uint64) (*core.ExecutionResult, error) { - defer func(start time.Time) { log.Debug("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) - - state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if state == nil || err != nil { - return nil, err - } - - // If the request is for the pending block, override the block timestamp, number, and estimated - // base fee, so that the check runs as if it were run on a newly generated block. - if blkNumber, isNum := blockNrOrHash.Number(); isNum && blkNumber == rpc.PendingBlockNumber { - // Override header with a copy to ensure the original header is not modified - header = types.CopyHeader(header) - // Grab the hash of the unmodified header, so that the modified header can point to the - // prior block as its parent. - parentHash := header.Hash() - header.Time = uint64(time.Now().Unix()) - header.ParentHash = parentHash - header.Number = new(big.Int).Add(header.Number, big.NewInt(1)) - estimatedBaseFee, err := b.EstimateBaseFee(ctx) - if err != nil { - return nil, err - } - header.BaseFee = estimatedBaseFee - } - - return doCall(ctx, b, args, state, header, overrides, blockOverrides, timeout, globalGasCap) -} - func newRevertError(result *core.ExecutionResult) *revertError { reason, errUnpack := abi.UnpackRevert(result.Revert()) err := errors.New("execution reverted") @@ -1248,7 +1206,7 @@ func (s *BlockChainAPI) Call(ctx context.Context, args TransactionArgs, blockNrO return result.Return(), result.Err } -func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverride, gasCap uint64) (hexutil.Uint64, error) { +func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNrOrHash rpc.BlockNumberOrHash, gasCap uint64) (hexutil.Uint64, error) { // Binary search the gas requirement, as it may be higher than the amount used var ( lo uint64 = params.TxGas - 1 @@ -1290,10 +1248,6 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr if err != nil { return 0, err } - err = overrides.Apply(state) - if err != nil { - return 0, err - } balance := state.GetBalance(*args.From) // from can't be nil available := new(big.Int).Set(balance) if args.Value != nil { @@ -1323,10 +1277,10 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr cap = hi // Create a helper to check if a gas allowance results in an executable transaction - executable := func(gas uint64, state *state.StateDB, header *types.Header) (bool, *core.ExecutionResult, error) { + executable := func(gas uint64) (bool, *core.ExecutionResult, error) { args.Gas = (*hexutil.Uint64)(&gas) - result, err := doCall(ctx, b, args, state, header, nil, nil, 0, gasCap) + result, err := DoCall(ctx, b, args, blockNrOrHash, nil, nil, 0, gasCap) if err != nil { if errors.Is(err, core.ErrIntrinsicGas) { return true, nil, nil // Special case, raise gas limit @@ -1335,19 +1289,10 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr } return result.Failed(), result, nil } - state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) - if state == nil || err != nil { - return 0, err - } - err = overrides.Apply(state) - if err != nil { - return 0, err - } // Execute the binary search and hone in on an executable gas limit for lo+1 < hi { - s := state.Copy() mid := (hi + lo) / 2 - failed, _, err := executable(mid, s, header) + failed, _, err := executable(mid) // If the error is not nil(consensus error), it means the provided message // call or transaction will never be accepted no matter how much gas it is @@ -1363,7 +1308,7 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr } // Reject the transaction as invalid if it still fails at the highest allowance if hi == cap { - failed, result, err := executable(hi, state, header) + failed, result, err := executable(hi) if err != nil { return 0, err } @@ -1383,12 +1328,12 @@ func DoEstimateGas(ctx context.Context, b Backend, args TransactionArgs, blockNr // EstimateGas returns an estimate of the amount of gas needed to execute the // given transaction against the current pending block. -func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash, overrides *StateOverride) (hexutil.Uint64, error) { +func (s *BlockChainAPI) EstimateGas(ctx context.Context, args TransactionArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) if blockNrOrHash != nil { bNrOrHash = *blockNrOrHash } - return DoEstimateGas(ctx, s.b, args, bNrOrHash, overrides, s.b.RPCGasCap()) + return DoEstimateGas(ctx, s.b, args, bNrOrHash, s.b.RPCGasCap()) } // RPCMarshalHeader converts the given header to the RPC output . @@ -1405,6 +1350,7 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { "miner": head.Coinbase, "difficulty": (*hexutil.Big)(head.Difficulty), "extraData": hexutil.Bytes(head.Extra), + "size": hexutil.Uint64(head.Size()), "gasLimit": hexutil.Uint64(head.GasLimit), "gasUsed": hexutil.Uint64(head.GasUsed), "timestamp": hexutil.Uint64(head.Time), @@ -1425,7 +1371,7 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { // RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. -func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) map[string]interface{} { +func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *params.ChainConfig) (map[string]interface{}, error) { fields := RPCMarshalHeader(block.Header()) fields["size"] = hexutil.Uint64(block.Size()) @@ -1451,7 +1397,8 @@ func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool, config *param uncleHashes[i] = uncle.Hash() } fields["uncles"] = uncleHashes - return fields + + return fields, nil } // rpcMarshalHeader uses the generalized output filler, then adds the total difficulty field, which requires @@ -1467,13 +1414,16 @@ func (s *BlockChainAPI) rpcMarshalHeader(ctx context.Context, header *types.Head // rpcMarshalBlock uses the generalized output filler, then adds the total difficulty field, which requires // a `BlockchainAPI`. func (s *BlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { - fields := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig()) + fields, err := RPCMarshalBlock(b, inclTx, fullTx, s.b.ChainConfig()) + if err != nil { + return nil, err + } if inclTx { // Note: Subnet-EVM enforces that the difficulty of a block is always 1, such that the total difficulty of a block // will be equivalent to its height. fields["totalDifficulty"] = (*hexutil.Big)(b.Number()) } - return fields, nil + return fields, err } // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction @@ -1497,7 +1447,6 @@ type RPCTransaction struct { V *hexutil.Big `json:"v"` R *hexutil.Big `json:"r"` S *hexutil.Big `json:"s"` - YParity *hexutil.Uint64 `json:"yParity,omitempty"` } // newRPCTransaction returns a transaction that will serialize to the RPC @@ -1525,32 +1474,25 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) result.TransactionIndex = (*hexutil.Uint64)(&index) } - switch tx.Type() { case types.LegacyTxType: // if a legacy transaction has an EIP-155 chain id, include it explicitly if id := tx.ChainId(); id.Sign() != 0 { result.ChainID = (*hexutil.Big)(id) } - case types.AccessListTxType: al := tx.AccessList() - yparity := hexutil.Uint64(v.Sign()) result.Accesses = &al result.ChainID = (*hexutil.Big)(tx.ChainId()) - result.YParity = &yparity - case types.DynamicFeeTxType: al := tx.AccessList() - yparity := hexutil.Uint64(v.Sign()) result.Accesses = &al result.ChainID = (*hexutil.Big)(tx.ChainId()) - result.YParity = &yparity result.GasFeeCap = (*hexutil.Big)(tx.GasFeeCap()) result.GasTipCap = (*hexutil.Big)(tx.GasTipCap()) // if the transaction has been mined, compute the effective gas price if baseFee != nil && blockHash != (common.Hash{}) { - // price = min(gasTipCap + baseFee, gasFeeCap) + // price = min(tip, gasFeeCap - baseFee) + baseFee price := math.BigMin(new(big.Int).Add(tx.GasTipCap(), baseFee), tx.GasFeeCap()) result.GasPrice = (*hexutil.Big)(price) } else { @@ -1647,7 +1589,7 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH to = crypto.CreateAddress(args.from(), uint64(*args.Nonce)) } // Retrieve the precompiles since they don't need to be added to the access list - precompiles := vm.ActivePrecompiles(b.ChainConfig().Rules(header.Number, header.Time)) + precompiles := vm.ActivePrecompiles(b.ChainConfig().AvalancheRules(header.Number, header.Time)) // Create an initial tracer prevTracer := logger.NewAccessListTracer(nil, args.from(), to, precompiles) @@ -1661,7 +1603,8 @@ func AccessList(ctx context.Context, b Backend, blockNrOrHash rpc.BlockNumberOrH // Copy the original db so we don't modify it statedb := db.Copy() - // Set the accesslist to the last al + // Set the access list tracer to the last al + args.AccessList = &accessList msg, err := args.ToMessage(b.RPCGasCap(), header.BaseFee) if err != nil { @@ -1699,6 +1642,7 @@ type BadBlockArgs struct { // and returns them as a JSON list of block hashes. func (s *BlockChainAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) { var ( + err error badBlocks, reasons = s.b.BadBlocks() results = make([]*BadBlockArgs, 0, len(badBlocks)) ) @@ -1712,7 +1656,9 @@ func (s *BlockChainAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, erro } else { blockRlp = fmt.Sprintf("%#x", rlpBytes) } - blockJSON = RPCMarshalBlock(block, true, true, s.b.ChainConfig()) + if blockJSON, err = RPCMarshalBlock(block, true, true, s.b.ChainConfig()); err != nil { + blockJSON = map[string]interface{}{"error": err.Error()} + } results = append(results, &BadBlockArgs{ Hash: block.Hash(), RLP: blockRlp, @@ -1851,7 +1797,7 @@ func (s *TransactionAPI) GetRawTransactionByHash(ctx context.Context, hash commo // GetTransactionReceipt returns the transaction receipt for the given transaction hash. func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) { tx, blockHash, blockNumber, index, err := s.b.GetTransaction(ctx, hash) - if tx == nil || err != nil { + if err != nil { // When the transaction doesn't exist, the RPC method should return JSON null // as per specification. return nil, nil @@ -1860,11 +1806,12 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. if err != nil { return nil, err } + receipts, err := s.b.GetReceipts(ctx, blockHash) if err != nil { return nil, err } - if uint64(len(receipts)) <= index { + if len(receipts) <= int(index) { return nil, nil } receipt := receipts[index] @@ -1893,7 +1840,6 @@ func marshalReceipt(receipt *types.Receipt, blockHash common.Hash, blockNumber u "type": hexutil.Uint(tx.Type()), "effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice), } - // Assign receipt status or post state. if len(receipt.PostState) > 0 { fields["root"] = hexutil.Bytes(receipt.PostState) @@ -2257,6 +2203,7 @@ func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, err // NetAPI offers network related RPC methods type NetAPI struct { + // net *p2p.Server networkVersion uint64 } diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index f168777153..45884d2995 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -27,16 +27,21 @@ package ethapi import ( + "bytes" "context" "crypto/ecdsa" "encoding/json" "errors" "fmt" + "hash" "math/big" "reflect" + "sort" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/subnet-evm/accounts" "github.com/ava-labs/subnet-evm/commontype" "github.com/ava-labs/subnet-evm/consensus" @@ -47,7 +52,6 @@ import ( "github.com/ava-labs/subnet-evm/core/state" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/core/vm" - "github.com/ava-labs/subnet-evm/internal/blocktest" "github.com/ava-labs/subnet-evm/params" "github.com/ava-labs/subnet-evm/rpc" "github.com/ethereum/go-ethereum/common" @@ -55,9 +59,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/exp/slices" + "golang.org/x/crypto/sha3" ) func TestTransaction_RoundTripRpcJSON(t *testing.T) { @@ -70,7 +72,7 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) { t.Parallel() for i, tt := range tests { var tx2 types.Transaction - tx, err := types.SignNewTx(key, signer, tt.Tx) + tx, err := types.SignNewTx(key, signer, tt) if err != nil { t.Fatalf("test %d: signing failed: %v", i, err) } @@ -83,7 +85,7 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) { t.Fatalf("test %d: stx changed, want %x have %x", i, want, have) } - // rpcTransaction + // rpcTransaction rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, nil, config) if data, err := json.Marshal(rpcTx); err != nil { t.Fatalf("test %d: marshalling failed; %v", i, err) @@ -91,259 +93,102 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) { t.Fatalf("test %d: unmarshal failed: %v", i, err) } else if want, have := tx.Hash(), tx2.Hash(); want != have { t.Fatalf("test %d: tx changed, want %x have %x", i, want, have) - } else { - want, have := tt.Want, string(data) - require.JSONEqf(t, want, have, "test %d: rpc json not match, want %s have %s", i, want, have) } } } -type txData struct { - Tx types.TxData - Want string -} - -func allTransactionTypes(addr common.Address, config *params.ChainConfig) []txData { - return []txData{ - { - Tx: &types.LegacyTx{ - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - V: big.NewInt(9), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x6", - "hash": "0x3fa586d2448ae279279fa7036da74eb932763661543428c1a0aba21b95b37bdb", - "input": "0x0001020304", - "nonce": "0x5", - "to": "0xdead000000000000000000000000000000000000", - "transactionIndex": null, - "value": "0x8", - "type": "0x0", - "chainId": "0x1", - "v": "0x25", - "r": "0xac639f4319e9268898e29444b97101f1225e2a0837151626da23e73dda2443fc", - "s": "0x4fcc3f4c3a75f70ee45bb42d4b0aad432cc8c0140efb3e2611d6a6dda8460907" - }`, - }, { - Tx: &types.LegacyTx{ - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x6", - "hash": "0x617a316c6ff7ed2aa6ead1b4bb28a1322c2156c1c72f376a976d2d2adb1748ee", - "input": "0x0001020304", - "nonce": "0x5", - "to": null, - "transactionIndex": null, - "value": "0x8", - "type": "0x0", - "chainId": "0x1", - "v": "0x25", - "r": "0xee8e89b513778d4815ae5969f3d55e0f7590f31b08f2a2290d5bc4ae37fce299", - "s": "0x663db5c74c10e2b6525e7026e7cfd569b819ec91a042322655ff2b35060784b1" - }`, - }, - { - Tx: &types.AccessListTx{ - ChainID: config.ChainID, - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, - }, +func allTransactionTypes(addr common.Address, config *params.ChainConfig) []types.TxData { + return []types.TxData{ + &types.LegacyTx{ + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + V: big.NewInt(9), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.LegacyTx{ + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.AccessListTx{ + ChainID: config.ChainID, + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x6", - "hash": "0x6becb7b9c171aa0d6d0a90dcd97bc3529c4d521f9cc9b7e31616aa9afc178c10", - "input": "0x0001020304", - "nonce": "0x5", - "to": "0xdead000000000000000000000000000000000000", - "transactionIndex": null, - "value": "0x8", - "type": "0x1", - "accessList": [ - { - "address": "0x0200000000000000000000000000000000000000", - "storageKeys": [ - "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ] - } - ], - "chainId": "0x1", - "v": "0x1", - "r": "0xea2289ca0243beecbe69d337bbc53c618e1fb6bd2ec69fd4121df47149a4d4be", - "s": "0x2dc134b6bc43abbdfebef0b2d62c175459fc1e8ddff60c8e740c461d7ea1522f", - "yParity": "0x1" - }`, - }, { - Tx: &types.AccessListTx{ - ChainID: config.ChainID, - Nonce: 5, - GasPrice: big.NewInt(6), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, - }, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.AccessListTx{ + ChainID: config.ChainID, + Nonce: 5, + GasPrice: big.NewInt(6), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x6", - "hash": "0x22fbf81bae4640511c706e2c72d2f2ef1abc1e7861f2b82c4cae5b102a40709c", - "input": "0x0001020304", - "nonce": "0x5", - "to": null, - "transactionIndex": null, - "value": "0x8", - "type": "0x1", - "accessList": [ - { - "address": "0x0200000000000000000000000000000000000000", - "storageKeys": [ - "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ] - } - ], - "chainId": "0x1", - "v": "0x1", - "r": "0xc50e18edd861639735ec69ca12d82fcbb2c1921d2e2a8fd3a75f408d2d4b8118", - "s": "0x32a908d1bc2db0229354f4dd392ffc37934e24ae8b18a620c6588c72660b6238", - "yParity": "0x1" - }`, - }, { - Tx: &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: 5, - GasTipCap: big.NewInt(6), - GasFeeCap: big.NewInt(9), - Gas: 7, - To: &addr, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{ - types.AccessTuple{ - Address: common.Address{0x2}, - StorageKeys: []common.Hash{types.EmptyRootHash}, - }, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.DynamicFeeTx{ + ChainID: config.ChainID, + Nonce: 5, + GasTipCap: big.NewInt(6), + GasFeeCap: big.NewInt(9), + Gas: 7, + To: &addr, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{ + types.AccessTuple{ + Address: common.Address{0x2}, + StorageKeys: []common.Hash{types.EmptyRootHash}, }, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), - }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x9", - "maxFeePerGas": "0x9", - "maxPriorityFeePerGas": "0x6", - "hash": "0xc5763d2ce6af3f694dcda8a9a50d4f75005a711edd382e993dd0406e0c54cfde", - "input": "0x0001020304", - "nonce": "0x5", - "to": "0xdead000000000000000000000000000000000000", - "transactionIndex": null, - "value": "0x8", - "type": "0x2", - "accessList": [ - { - "address": "0x0200000000000000000000000000000000000000", - "storageKeys": [ - "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - ] - } - ], - "chainId": "0x1", - "v": "0x0", - "r": "0x740eb1e3bc206760182993845b7815fd8cf7a42f1a1ed26027f736e9eccfd20f", - "s": "0x31da567e2b3a83e58e42f7902c3706926c926625f6978c24fdaa21b9d143bbf7", - "yParity": "0x0" - }`, - }, { - Tx: &types.DynamicFeeTx{ - ChainID: config.ChainID, - Nonce: 5, - GasTipCap: big.NewInt(6), - GasFeeCap: big.NewInt(9), - Gas: 7, - To: nil, - Value: big.NewInt(8), - Data: []byte{0, 1, 2, 3, 4}, - AccessList: types.AccessList{}, - V: big.NewInt(32), - R: big.NewInt(10), - S: big.NewInt(11), }, - Want: `{ - "blockHash": null, - "blockNumber": null, - "from": "0x71562b71999873db5b286df957af199ec94617f7", - "gas": "0x7", - "gasPrice": "0x9", - "maxFeePerGas": "0x9", - "maxPriorityFeePerGas": "0x6", - "hash": "0x85545f69b2410640fbbb7157b9a79adece45bac4b2803733d250d049e9501a28", - "input": "0x0001020304", - "nonce": "0x5", - "to": null, - "transactionIndex": null, - "value": "0x8", - "type": "0x2", - "accessList": [], - "chainId": "0x1", - "v": "0x1", - "r": "0x5004538adbe499313737033b22eb2b50a9450f02fab3971a591e6d57761b2cdf", - "s": "0x5f7b1f5d11bd467d84f32beb2e89629351b96c5204c4f72d5d2040bee369a73a", - "yParity": "0x1" - }`, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), + }, + &types.DynamicFeeTx{ + ChainID: config.ChainID, + Nonce: 5, + GasTipCap: big.NewInt(6), + GasFeeCap: big.NewInt(9), + Gas: 7, + To: nil, + Value: big.NewInt(8), + Data: []byte{0, 1, 2, 3, 4}, + AccessList: types.AccessList{}, + V: big.NewInt(32), + R: big.NewInt(10), + S: big.NewInt(11), }, } } @@ -355,7 +200,10 @@ type testBackend struct { func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { var ( - engine = dummy.NewCoinbaseFaker() + engine = dummy.NewCoinbaseFaker() + backend = &testBackend{ + db: rawdb.NewMemoryDatabase(), + } cacheConfig = &core.CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, @@ -364,21 +212,15 @@ func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i i } ) // Generate blocks for testing - db, blocks, _, _ := core.GenerateChainWithGenesis(gspec, engine, n, 10, generator) - chain, err := core.NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, gspec.ToBlock().Hash(), false) + _, blocks, _, _ := core.GenerateChainWithGenesis(gspec, engine, n, 10, generator) + chain, err := core.NewBlockChain(backend.db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } if n, err := chain.InsertChain(blocks); err != nil { t.Fatalf("block %d: failed to insert into chain: %v", n, err) } - for _, block := range blocks { - if err := chain.Accept(block); err != nil { - t.Fatalf("block %d: failed to accept into chain: %v", block.NumberU64(), err) - } - } - - backend := &testBackend{db: db, chain: chain} + backend.chain = chain return backend } @@ -406,16 +248,10 @@ func (b testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types return b.chain.GetHeaderByHash(hash), nil } func (b testBackend) HeaderByNumberOrHash(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { - if blockNr, ok := blockNrOrHash.Number(); ok { - return b.HeaderByNumber(ctx, blockNr) - } - if blockHash, ok := blockNrOrHash.Hash(); ok { - return b.HeaderByHash(ctx, blockHash) - } - panic("unknown type rpc.BlockNumberOrHash") + panic("implement me") } -func (b testBackend) CurrentHeader() *types.Header { return b.chain.CurrentBlock() } -func (b testBackend) CurrentBlock() *types.Header { return b.chain.CurrentBlock() } +func (b testBackend) CurrentHeader() *types.Header { panic("implement me") } +func (b testBackend) CurrentBlock() *types.Header { panic("implement me") } func (b testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { if number == rpc.LatestBlockNumber { head := b.chain.CurrentBlock() @@ -467,6 +303,7 @@ func (b testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.R receipts := rawdb.ReadReceipts(b.db, hash, header.Number.Uint64(), header.Time, b.chain.Config()) return receipts, nil } +func (b testBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { panic("implement me") } func (b testBackend) GetEVM(ctx context.Context, msg *core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config, blockContext *vm.BlockContext) (*vm.EVM, func() error) { vmError := func() error { return nil } if vmConfig == nil { @@ -495,8 +332,7 @@ func (b testBackend) SendTx(ctx context.Context, signedTx *types.Transaction) er panic("implement me") } func (b testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { - tx, blockHash, blockNumber, index := rawdb.ReadTransaction(b.db, txHash) - return tx, blockHash, blockNumber, index, nil + panic("implement me") } func (b testBackend) GetPoolTransactions() (types.Transactions, error) { panic("implement me") } func (b testBackend) GetPoolTransaction(txHash common.Hash) *types.Transaction { panic("implement me") } @@ -504,10 +340,10 @@ func (b testBackend) GetPoolNonce(ctx context.Context, addr common.Address) (uin panic("implement me") } func (b testBackend) Stats() (pending int, queued int) { panic("implement me") } -func (b testBackend) TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) { +func (b testBackend) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) { panic("implement me") } -func (b testBackend) TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) { +func (b testBackend) TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) { panic("implement me") } func (b testBackend) SubscribeNewTxsEvent(events chan<- core.NewTxsEvent) event.Subscription { @@ -566,7 +402,6 @@ func TestEstimateGas(t *testing.T) { var testSuite = []struct { blockNumber rpc.BlockNumber call TransactionArgs - overrides StateOverride expectErr error want uint64 }{ @@ -599,30 +434,9 @@ func TestEstimateGas(t *testing.T) { expectErr: nil, want: 53000, }, - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{}, - overrides: StateOverride{ - randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(params.Ether)))}, - }, - expectErr: nil, - want: 53000, - }, - { - blockNumber: rpc.LatestBlockNumber, - call: TransactionArgs{ - From: &randomAccounts[0].addr, - To: &randomAccounts[1].addr, - Value: (*hexutil.Big)(big.NewInt(1000)), - }, - overrides: StateOverride{ - randomAccounts[0].addr: OverrideAccount{Balance: newRPCBalance(big.NewInt(0))}, - }, - expectErr: core.ErrInsufficientFunds, - }, } for i, tc := range testSuite { - result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}, &tc.overrides) + result, err := api.EstimateGas(context.Background(), tc.call, &rpc.BlockNumberOrHash{BlockNumber: &tc.blockNumber}) if tc.expectErr != nil { if err == nil { t.Errorf("test %d: want error %v, have nothing", i, tc.expectErr) @@ -817,13 +631,19 @@ type Account struct { addr common.Address } -func newAccounts(n int) (accounts []Account) { +type Accounts []Account + +func (a Accounts) Len() int { return len(a) } +func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 } + +func newAccounts(n int) (accounts Accounts) { for i := 0; i < n; i++ { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) accounts = append(accounts, Account{key: key, addr: addr}) } - slices.SortFunc(accounts, func(a, b Account) int { return a.addr.Cmp(b.addr) }) + sort.Sort(accounts) return accounts } @@ -837,8 +657,32 @@ func hex2Bytes(str string) *hexutil.Bytes { return &rpcBytes } +// testHasher is the helper tool for transaction/receipt list hashing. +// The original hasher is trie, in order to get rid of import cycle, +// use the testing hasher instead. +type testHasher struct { + hasher hash.Hash +} + +func newHasher() *testHasher { + return &testHasher{hasher: sha3.NewLegacyKeccak256()} +} + +func (h *testHasher) Reset() { + h.hasher.Reset() +} + +func (h *testHasher) Update(key, val []byte) error { + h.hasher.Write(key) + h.hasher.Write(val) + return nil +} + +func (h *testHasher) Hash() common.Hash { + return common.BytesToHash(h.hasher.Sum(nil)) +} + func TestRPCMarshalBlock(t *testing.T) { - t.Parallel() var ( txs []*types.Transaction to = common.BytesToAddress([]byte{0x11}) @@ -867,7 +711,7 @@ func TestRPCMarshalBlock(t *testing.T) { } txs = append(txs, tx) } - block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, blocktest.NewHasher()) + block := types.NewBlock(&types.Header{Number: big.NewInt(100)}, txs, nil, nil, newHasher()) var testSuite = []struct { inclTx bool @@ -878,1037 +722,37 @@ func TestRPCMarshalBlock(t *testing.T) { { inclTx: false, fullTx: false, - want: `{ - "difficulty": "0x0", - "extraData": "0x", - "gasLimit": "0x0", - "gasUsed": "0x0", - "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x64", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x296", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x0", - "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", - "uncles": [] - }`, + want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, }, // only tx hashes { inclTx: true, fullTx: false, - want: `{ - "difficulty": "0x0", - "extraData": "0x", - "gasLimit": "0x0", - "gasUsed": "0x0", - "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x64", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x296", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x0", - "transactions": [ - "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605", - "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4", - "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5", - "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1" - ], - "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", - "uncles": [] - }`, + want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":["0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1"],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, }, + // full tx details { inclTx: true, fullTx: true, - want: `{ - "difficulty": "0x0", - "extraData": "0x", - "gasLimit": "0x0", - "gasUsed": "0x0", - "hash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x64", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x296", - "stateRoot": "0x0000000000000000000000000000000000000000000000000000000000000000", - "timestamp": "0x0", - "transactions": [ - { - "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "blockNumber": "0x64", - "from": "0x0000000000000000000000000000000000000000", - "gas": "0x457", - "gasPrice": "0x2b67", - "hash": "0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605", - "input": "0x111111", - "nonce": "0x1", - "to": "0x0000000000000000000000000000000000000011", - "transactionIndex": "0x0", - "value": "0x6f", - "type": "0x1", - "accessList": [], - "chainId": "0x539", - "v": "0x0", - "r": "0x0", - "s": "0x0", - "yParity": "0x0" - }, - { - "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "blockNumber": "0x64", - "from": "0x0000000000000000000000000000000000000000", - "gas": "0x457", - "gasPrice": "0x2b67", - "hash": "0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4", - "input": "0x111111", - "nonce": "0x2", - "to": "0x0000000000000000000000000000000000000011", - "transactionIndex": "0x1", - "value": "0x6f", - "type": "0x0", - "chainId": "0x7fffffffffffffee", - "v": "0x0", - "r": "0x0", - "s": "0x0" - }, - { - "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "blockNumber": "0x64", - "from": "0x0000000000000000000000000000000000000000", - "gas": "0x457", - "gasPrice": "0x2b67", - "hash": "0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5", - "input": "0x111111", - "nonce": "0x3", - "to": "0x0000000000000000000000000000000000000011", - "transactionIndex": "0x2", - "value": "0x6f", - "type": "0x1", - "accessList": [], - "chainId": "0x539", - "v": "0x0", - "r": "0x0", - "s": "0x0", - "yParity": "0x0" - }, - { - "blockHash": "0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee", - "blockNumber": "0x64", - "from": "0x0000000000000000000000000000000000000000", - "gas": "0x457", - "gasPrice": "0x2b67", - "hash": "0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1", - "input": "0x111111", - "nonce": "0x4", - "to": "0x0000000000000000000000000000000000000011", - "transactionIndex": "0x3", - "value": "0x6f", - "type": "0x0", - "chainId": "0x7fffffffffffffee", - "v": "0x0", - "r": "0x0", - "s": "0x0" - } - ], - "transactionsRoot": "0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e", - "uncles": [] - }`, + want: `{"difficulty":"0x0","extraData":"0x","gasLimit":"0x0","gasUsed":"0x0","hash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","miner":"0x0000000000000000000000000000000000000000","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x0000000000000000","number":"0x64","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421","sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","size":"0x296","stateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","timestamp":"0x0","transactions":[{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x7d39df979e34172322c64983a9ad48302c2b889e55bda35324afecf043a77605","input":"0x111111","nonce":"0x1","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x0","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x9bba4c34e57c875ff57ac8d172805a26ae912006985395dc1bdf8f44140a7bf4","input":"0x111111","nonce":"0x2","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x1","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x98909ea1ff040da6be56bc4231d484de1414b3c1dac372d69293a4beb9032cb5","input":"0x111111","nonce":"0x3","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x2","value":"0x6f","type":"0x1","accessList":[],"chainId":"0x539","v":"0x0","r":"0x0","s":"0x0"},{"blockHash":"0x9b73c83b25d0faf7eab854e3684c7e394336d6e135625aafa5c183f27baa8fee","blockNumber":"0x64","from":"0x0000000000000000000000000000000000000000","gas":"0x457","gasPrice":"0x2b67","hash":"0x12e1f81207b40c3bdcc13c0ee18f5f86af6d31754d57a0ea1b0d4cfef21abef1","input":"0x111111","nonce":"0x4","to":"0x0000000000000000000000000000000000000011","transactionIndex":"0x3","value":"0x6f","type":"0x0","chainId":"0x7fffffffffffffee","v":"0x0","r":"0x0","s":"0x0"}],"transactionsRoot":"0x661a9febcfa8f1890af549b874faf9fa274aede26ef489d9db0b25daa569450e","uncles":[]}`, }, } for i, tc := range testSuite { - resp := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.TestSubnetEVMConfig) - out, err := json.Marshal(resp) + resp, err := RPCMarshalBlock(block, tc.inclTx, tc.fullTx, params.TestChainConfig) if err != nil { - t.Errorf("test %d: json marshal error: %v", i, err) + t.Errorf("test %d: got error %v", i, err) continue } - assert.JSONEqf(t, tc.want, string(out), "test %d", i) - } -} - -func TestRPCGetBlockOrHeader(t *testing.T) { - t.Parallel() - - // Initialize test accounts - var ( - acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) - genesis = &core.Genesis{ - Config: params.TestSubnetEVMConfig, - Alloc: core.GenesisAlloc{ - acc1Addr: {Balance: big.NewInt(params.Ether)}, - acc2Addr: {Balance: big.NewInt(params.Ether)}, - }, - } - genBlocks = 10 - signer = types.HomesteadSigner{} - tx = types.NewTx(&types.LegacyTx{ - Nonce: 11, - GasPrice: big.NewInt(11111), - Gas: 1111, - To: &acc2Addr, - Value: big.NewInt(111), - Data: []byte{0x11, 0x11, 0x11}, - }) - pending = types.NewBlock(&types.Header{Number: big.NewInt(11), Time: 42}, []*types.Transaction{tx}, nil, nil, blocktest.NewHasher()) - ) - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { - // Transfer from account[0] to account[1] - // value: 1000 wei - // fee: 0 wei - tx, _ := types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), signer, acc1Key) - b.AddTx(tx) - }) - api := NewBlockChainAPI(backend) - blockHashes := make([]common.Hash, genBlocks+1) - ctx := context.Background() - for i := 0; i <= genBlocks; i++ { - header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) - if err != nil { - t.Errorf("failed to get block: %d err: %v", i, err) - } - blockHashes[i] = header.Hash() - } - pendingHash := pending.Hash() - - var testSuite = []struct { - blockNumber rpc.BlockNumber - blockHash *common.Hash - fullTx bool - reqHeader bool - want string - expectErr error - }{ - // 0. latest header - { - blockNumber: rpc.LatestBlockNumber, - reqHeader: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x1ec39e7ec46f8df1fb31cfca53fbf71a01869af8bd8f9a1bccbffc16ffa1461d", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0xa", - "parentHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "stateRoot": "0x7e06187d15d50badf60930290fb292ebe43e79553ad8b7d8f1b614316631def7", - "timestamp": "0x64", - "totalDifficulty": "0xa", - "transactionsRoot": "0x69ff8003291e1cd08f75d174f070618f7291e4540b2e33f60b3375743e3fda01" - }`, - }, - // 1. genesis header - { - blockNumber: rpc.BlockNumber(0), - reqHeader: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "difficulty": "0x20000", - "extraData": "0x", - "gasLimit": "0x47e7c4", - "gasUsed": "0x0", - "hash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2", - "timestamp": "0x0", - "totalDifficulty": "0x0", - "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - }`, - }, - // 2. #1 header - { - blockNumber: rpc.BlockNumber(1), - reqHeader: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x0f67ad1fc8052afad4c24551748600c164091cf37e068adef76315025d3c78e7", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x1", - "parentHash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "stateRoot": "0x6b830601767ac4968163193facbe20123435180e325910b2c50efa21f778c697", - "timestamp": "0xa", - "totalDifficulty": "0x1", - "transactionsRoot": "0x87c65a3f1a98dafe282ace11eaf88b8f31bf41fe6794d401d2f986c1af84bcd5" - }`, - }, - // 3. latest-1 header - { - blockNumber: rpc.BlockNumber(9), - reqHeader: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x9", - "parentHash": "0x2fab5c6892c66668842683ced6b384c2ee83bfd6096a58f451290cabaf57a63e", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "stateRoot": "0x3703d70c6443e809ce035c2a8212dbf9813f6b7d1b0f597766e9023867a852f5", - "timestamp": "0x5a", - "totalDifficulty": "0x9", - "transactionsRoot": "0xe16929d9c7efab0f962c1ed8c1295ddff42d3026779ed1318ea079ca580ee4cb" - }`, - }, - // 4. latest+1 header - { - blockNumber: rpc.BlockNumber(11), - reqHeader: true, - want: "null", - }, - // 5. pending header - { - blockNumber: rpc.PendingBlockNumber, - reqHeader: true, - want: "null", - }, - // 6. latest block - { - blockNumber: rpc.LatestBlockNumber, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x1ec39e7ec46f8df1fb31cfca53fbf71a01869af8bd8f9a1bccbffc16ffa1461d", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0xa", - "parentHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x2bb", - "stateRoot": "0x7e06187d15d50badf60930290fb292ebe43e79553ad8b7d8f1b614316631def7", - "timestamp": "0x64", - "totalDifficulty": "0xa", - "transactions": [ - "0x71be223424ab6e3457513a760b196d43b094414c32a70ff929b2b720a16b832d" - ], - "transactionsRoot": "0x69ff8003291e1cd08f75d174f070618f7291e4540b2e33f60b3375743e3fda01", - "uncles": [] - }`, - }, - // 7. genesis block - { - blockNumber: rpc.BlockNumber(0), - want: `{ - "baseFeePerGas": "0x5d21dba00", - "difficulty": "0x20000", - "extraData": "0x", - "gasLimit": "0x47e7c4", - "gasUsed": "0x0", - "hash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x201", - "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2", - "timestamp": "0x0", - "totalDifficulty": "0x0", - "transactions": [], - "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "uncles": [] - }`, - }, - // 8. #1 block - { - blockNumber: rpc.BlockNumber(1), - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x0f67ad1fc8052afad4c24551748600c164091cf37e068adef76315025d3c78e7", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x1", - "parentHash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x2bb", - "stateRoot": "0x6b830601767ac4968163193facbe20123435180e325910b2c50efa21f778c697", - "timestamp": "0xa", - "totalDifficulty": "0x1", - "transactions": [ - "0xdf92bc7c4c0341ecbdcd2a3ca7011fe9e21df4b8553bf0c8caabe6cb4a1aee26" - ], - "transactionsRoot": "0x87c65a3f1a98dafe282ace11eaf88b8f31bf41fe6794d401d2f986c1af84bcd5", - "uncles": [] - }`, - }, - // 9. latest-1 block - { - blockNumber: rpc.BlockNumber(9), - fullTx: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x9", - "parentHash": "0x2fab5c6892c66668842683ced6b384c2ee83bfd6096a58f451290cabaf57a63e", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x2bb", - "stateRoot": "0x3703d70c6443e809ce035c2a8212dbf9813f6b7d1b0f597766e9023867a852f5", - "timestamp": "0x5a", - "totalDifficulty": "0x9", - "transactions": [ - { - "blockHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "blockNumber": "0x9", - "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", - "gas": "0x5208", - "gasPrice": "0x5d21dba00", - "hash": "0x237f95840187a93f8aaf8d6f1515f8a8ac9d9359fcb0c220cdb3d642d6b9a19a", - "input": "0x", - "nonce": "0x8", - "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", - "transactionIndex": "0x0", - "value": "0x3e8", - "type": "0x0", - "v": "0x1c", - "r": "0xd7cdc527490b7ba29c515aae3bbe80c67729cda7f736e6515652cfc40e9da68f", - "s": "0x4d0a4a59bef165b16f910bdadd41efaaad1b73549bacc35eaf6d073eb1fb92b7" - } - ], - "transactionsRoot": "0xe16929d9c7efab0f962c1ed8c1295ddff42d3026779ed1318ea079ca580ee4cb", - "uncles": [] - }`, - }, - // 10. latest+1 block - { - blockNumber: rpc.BlockNumber(11), - fullTx: true, - want: "null", - }, - // 11. pending block - { - blockNumber: rpc.PendingBlockNumber, - want: "null", - }, - // 12. pending block + fullTx - { - blockNumber: rpc.PendingBlockNumber, - fullTx: true, - want: "null", - }, - // 13. latest header by hash - { - blockHash: &blockHashes[len(blockHashes)-1], - reqHeader: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x1ec39e7ec46f8df1fb31cfca53fbf71a01869af8bd8f9a1bccbffc16ffa1461d", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0xa", - "parentHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "stateRoot": "0x7e06187d15d50badf60930290fb292ebe43e79553ad8b7d8f1b614316631def7", - "timestamp": "0x64", - "totalDifficulty": "0xa", - "transactionsRoot": "0x69ff8003291e1cd08f75d174f070618f7291e4540b2e33f60b3375743e3fda01" - }`, - }, - // 14. genesis header by hash - { - blockHash: &blockHashes[0], - reqHeader: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "difficulty": "0x20000", - "extraData": "0x", - "gasLimit": "0x47e7c4", - "gasUsed": "0x0", - "hash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2", - "timestamp": "0x0", - "totalDifficulty": "0x0", - "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421" - }`, - }, - // 15. #1 header - { - blockHash: &blockHashes[1], - reqHeader: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x0f67ad1fc8052afad4c24551748600c164091cf37e068adef76315025d3c78e7", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x1", - "parentHash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "stateRoot": "0x6b830601767ac4968163193facbe20123435180e325910b2c50efa21f778c697", - "timestamp": "0xa", - "totalDifficulty": "0x1", - "transactionsRoot": "0x87c65a3f1a98dafe282ace11eaf88b8f31bf41fe6794d401d2f986c1af84bcd5" - }`, - }, - // 16. latest-1 header - { - blockHash: &blockHashes[len(blockHashes)-2], - reqHeader: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x9", - "parentHash": "0x2fab5c6892c66668842683ced6b384c2ee83bfd6096a58f451290cabaf57a63e", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "stateRoot": "0x3703d70c6443e809ce035c2a8212dbf9813f6b7d1b0f597766e9023867a852f5", - "timestamp": "0x5a", - "totalDifficulty": "0x9", - "transactionsRoot": "0xe16929d9c7efab0f962c1ed8c1295ddff42d3026779ed1318ea079ca580ee4cb" - }`, - }, - // 17. empty hash - { - blockHash: &common.Hash{}, - reqHeader: true, - want: "null", - }, - // 18. pending hash - { - blockHash: &pendingHash, - reqHeader: true, - want: `null`, - }, - // 19. latest block - { - blockHash: &blockHashes[len(blockHashes)-1], - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x1ec39e7ec46f8df1fb31cfca53fbf71a01869af8bd8f9a1bccbffc16ffa1461d", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0xa", - "parentHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x2bb", - "stateRoot": "0x7e06187d15d50badf60930290fb292ebe43e79553ad8b7d8f1b614316631def7", - "timestamp": "0x64", - "totalDifficulty": "0xa", - "transactions": [ - "0x71be223424ab6e3457513a760b196d43b094414c32a70ff929b2b720a16b832d" - ], - "transactionsRoot": "0x69ff8003291e1cd08f75d174f070618f7291e4540b2e33f60b3375743e3fda01", - "uncles": [] - }`, - }, - // 20. genesis block - { - blockHash: &blockHashes[0], - want: `{ - "baseFeePerGas": "0x5d21dba00", - "difficulty": "0x20000", - "extraData": "0x", - "gasLimit": "0x47e7c4", - "gasUsed": "0x0", - "hash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x0", - "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x201", - "stateRoot": "0xfe168c5e9584a85927212e5bea5304bb7d0d8a893453b4b2c52176a72f585ae2", - "timestamp": "0x0", - "totalDifficulty": "0x0", - "transactions": [], - "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "uncles": [] - }`, - }, - // 21. #1 block - { - blockHash: &blockHashes[1], - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x0f67ad1fc8052afad4c24551748600c164091cf37e068adef76315025d3c78e7", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x1", - "parentHash": "0x3ead7923676a44500c46ad2192a0fc084aa42063b1703e6866f138a47fb1a9ca", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x2bb", - "stateRoot": "0x6b830601767ac4968163193facbe20123435180e325910b2c50efa21f778c697", - "timestamp": "0xa", - "totalDifficulty": "0x1", - "transactions": [ - "0xdf92bc7c4c0341ecbdcd2a3ca7011fe9e21df4b8553bf0c8caabe6cb4a1aee26" - ], - "transactionsRoot": "0x87c65a3f1a98dafe282ace11eaf88b8f31bf41fe6794d401d2f986c1af84bcd5", - "uncles": [] - }`, - }, - // 22. latest-1 block - { - blockHash: &blockHashes[len(blockHashes)-2], - fullTx: true, - want: `{ - "baseFeePerGas": "0x5d21dba00", - "blockGasCost": "0x0", - "difficulty": "0x1", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "0x7a1200", - "gasUsed": "0x5208", - "hash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "miner": "0x0000000000000000000000000000000000000000", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "nonce": "0x0000000000000000", - "number": "0x9", - "parentHash": "0x2fab5c6892c66668842683ced6b384c2ee83bfd6096a58f451290cabaf57a63e", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "size": "0x2bb", - "stateRoot": "0x3703d70c6443e809ce035c2a8212dbf9813f6b7d1b0f597766e9023867a852f5", - "timestamp": "0x5a", - "totalDifficulty": "0x9", - "transactions": [ - { - "blockHash": "0x0583a9d630632001771b4ecc7d62574aec3825aff47e2a680b0ea4ddb79e7365", - "blockNumber": "0x9", - "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", - "gas": "0x5208", - "gasPrice": "0x5d21dba00", - "hash": "0x237f95840187a93f8aaf8d6f1515f8a8ac9d9359fcb0c220cdb3d642d6b9a19a", - "input": "0x", - "nonce": "0x8", - "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", - "transactionIndex": "0x0", - "value": "0x3e8", - "type": "0x0", - "v": "0x1c", - "r": "0xd7cdc527490b7ba29c515aae3bbe80c67729cda7f736e6515652cfc40e9da68f", - "s": "0x4d0a4a59bef165b16f910bdadd41efaaad1b73549bacc35eaf6d073eb1fb92b7" - } - ], - "transactionsRoot": "0xe16929d9c7efab0f962c1ed8c1295ddff42d3026779ed1318ea079ca580ee4cb", - "uncles": [] - }`, - }, - // 23. empty hash + body - { - blockHash: &common.Hash{}, - fullTx: true, - want: "null", - }, - // 24. pending block - { - blockHash: &pendingHash, - want: `null`, - }, - // 25. pending block + fullTx - { - blockHash: &pendingHash, - fullTx: true, - want: `null`, - }, - } - - for i, tt := range testSuite { - var ( - result map[string]interface{} - err error - ) - if tt.blockHash != nil { - if tt.reqHeader { - result = api.GetHeaderByHash(context.Background(), *tt.blockHash) - } else { - result, err = api.GetBlockByHash(context.Background(), *tt.blockHash, tt.fullTx) - } - } else { - if tt.reqHeader { - result, err = api.GetHeaderByNumber(context.Background(), tt.blockNumber) - } else { - result, err = api.GetBlockByNumber(context.Background(), tt.blockNumber, tt.fullTx) - } - } - if tt.expectErr != nil { - if err == nil { - t.Errorf("test %d: want error %v, have nothing", i, tt.expectErr) - continue - } - if !errors.Is(err, tt.expectErr) { - t.Errorf("test %d: error mismatch, want %v, have %v", i, tt.expectErr, err) - } - continue - } - if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) - continue - } - data, err := json.Marshal(result) - if err != nil { - t.Errorf("test %d: json marshal error", i) - continue - } - want, have := tt.want, string(data) - require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have) - } -} - -func TestRPCGetTransactionReceipt(t *testing.T) { - t.Parallel() - - // Initialize test accounts - var ( - acc1Key, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr = crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr = crypto.PubkeyToAddress(acc2Key.PublicKey) - contract = common.HexToAddress("0000000000000000000000000000000000031ec7") - genesis = &core.Genesis{ - Config: params.TestSubnetEVMConfig, - Alloc: core.GenesisAlloc{ - acc1Addr: {Balance: big.NewInt(params.Ether)}, - acc2Addr: {Balance: big.NewInt(params.Ether)}, - // // SPDX-License-Identifier: GPL-3.0 - // pragma solidity >=0.7.0 <0.9.0; - // - // contract Token { - // event Transfer(address indexed from, address indexed to, uint256 value); - // function transfer(address to, uint256 value) public returns (bool) { - // emit Transfer(msg.sender, to, value); - // return true; - // } - // } - contract: {Balance: big.NewInt(params.Ether), Code: common.FromHex("0x608060405234801561001057600080fd5b506004361061002b5760003560e01c8063a9059cbb14610030575b600080fd5b61004a6004803603810190610045919061016a565b610060565b60405161005791906101c5565b60405180910390f35b60008273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516100bf91906101ef565b60405180910390a36001905092915050565b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000610101826100d6565b9050919050565b610111816100f6565b811461011c57600080fd5b50565b60008135905061012e81610108565b92915050565b6000819050919050565b61014781610134565b811461015257600080fd5b50565b6000813590506101648161013e565b92915050565b60008060408385031215610181576101806100d1565b5b600061018f8582860161011f565b92505060206101a085828601610155565b9150509250929050565b60008115159050919050565b6101bf816101aa565b82525050565b60006020820190506101da60008301846101b6565b92915050565b6101e981610134565b82525050565b600060208201905061020460008301846101e0565b9291505056fea2646970667358221220b469033f4b77b9565ee84e0a2f04d496b18160d26034d54f9487e57788fd36d564736f6c63430008120033")}, - }, - } - genBlocks = 5 - signer = types.LatestSignerForChainID(params.TestChainConfig.ChainID) - txHashes = make([]common.Hash, genBlocks) - ) - backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { - var ( - tx *types.Transaction - err error - ) - switch i { - case 0: - // transfer 1000wei - tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &acc2Addr, Value: big.NewInt(1000), Gas: params.TxGas, GasPrice: b.BaseFee(), Data: nil}), types.HomesteadSigner{}, acc1Key) - case 1: - // create contract - tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: nil, Gas: 53100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040")}), signer, acc1Key) - case 2: - // with logs - // transfer(address to, uint256 value) - data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) - tx, err = types.SignTx(types.NewTx(&types.LegacyTx{Nonce: uint64(i), To: &contract, Gas: 60000, GasPrice: b.BaseFee(), Data: common.FromHex(data)}), signer, acc1Key) - case 3: - // dynamic fee with logs - // transfer(address to, uint256 value) - data := fmt.Sprintf("0xa9059cbb%s%s", common.HexToHash(common.BigToAddress(big.NewInt(int64(i + 1))).Hex()).String()[2:], common.BytesToHash([]byte{byte(i + 11)}).String()[2:]) - fee := big.NewInt(500) - fee.Add(fee, b.BaseFee()) - tx, err = types.SignTx(types.NewTx(&types.DynamicFeeTx{Nonce: uint64(i), To: &contract, Gas: 60000, Value: big.NewInt(1), GasTipCap: big.NewInt(500), GasFeeCap: fee, Data: common.FromHex(data)}), signer, acc1Key) - case 4: - // access list with contract create - accessList := types.AccessList{{ - Address: contract, - StorageKeys: []common.Hash{{0}}, - }} - tx, err = types.SignTx(types.NewTx(&types.AccessListTx{Nonce: uint64(i), To: nil, Gas: 58100, GasPrice: b.BaseFee(), Data: common.FromHex("0x60806040"), AccessList: accessList}), signer, acc1Key) - } - if err != nil { - t.Errorf("failed to sign tx: %v", err) - } - if tx != nil { - b.AddTx(tx) - txHashes[i] = tx.Hash() - } - }) - api := NewTransactionAPI(backend, new(AddrLocker)) - blockHashes := make([]common.Hash, genBlocks+1) - ctx := context.Background() - for i := 0; i <= genBlocks; i++ { - header, err := backend.HeaderByNumber(ctx, rpc.BlockNumber(i)) - if err != nil { - t.Errorf("failed to get block: %d err: %v", i, err) - } - blockHashes[i] = header.Hash() - } - - var testSuite = []struct { - txHash common.Hash - want string - }{ - // 0. normal success - { - txHash: txHashes[0], - want: `{ - "blockHash": "0xcc27e155b6eadfa892992a8cd8adaf3c929a6ec6d98c4dfbc60258883c73568e", - "blockNumber": "0x1", - "contractAddress": null, - "cumulativeGasUsed": "0x5208", - "effectiveGasPrice": "0x5d21dba00", - "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", - "gasUsed": "0x5208", - "logs": [], - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "status": "0x1", - "to": "0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e", - "transactionHash": "0xdf92bc7c4c0341ecbdcd2a3ca7011fe9e21df4b8553bf0c8caabe6cb4a1aee26", - "transactionIndex": "0x0", - "type": "0x0" - }`, - }, - // 1. create contract - { - txHash: txHashes[1], - want: `{ - "blockHash": "0xbea66b509ec6e5639279ca696def697d47d0c40ecfa00bbcdb5e31a492491c83", - "blockNumber": "0x2", - "contractAddress": "0xae9bea628c4ce503dcfd7e305cab4e29e7476592", - "cumulativeGasUsed": "0xcf4e", - "effectiveGasPrice": "0x5d21dba00", - "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", - "gasUsed": "0xcf4e", - "logs": [], - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "status": "0x1", - "to": null, - "transactionHash": "0x22aa617165f83a9f8c191c2b7724ae43eeb1249bee06c98c03c7624c21d27dc8", - "transactionIndex": "0x0", - "type": "0x0" - }`, - }, - // 2. with logs success - { - txHash: txHashes[2], - want: `{ - "blockHash": "0x139eee6b02792c6bee20be4d0aa72b3876f22cf8fe8e2bf45e1a0cee94aa3cf1", - "blockNumber": "0x3", - "contractAddress": null, - "cumulativeGasUsed": "0x5e28", - "effectiveGasPrice": "0x5d21dba00", - "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", - "gasUsed": "0x5e28", - "logs": [ - { - "address": "0x0000000000000000000000000000000000031ec7", - "topics": [ - "0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef", - "0x000000000000000000000000703c4b2bd70c169f5717101caee543299fc946c7", - "0x0000000000000000000000000000000000000000000000000000000000000003" - ], - "data": "0x000000000000000000000000000000000000000000000000000000000000000d", - "blockNumber": "0x3", - "transactionHash": "0x7366a7738f47e32f5b6d292ca064b6b66f295d3931533a3745975be1191fccdf", - "transactionIndex": "0x0", - "blockHash": "0x139eee6b02792c6bee20be4d0aa72b3876f22cf8fe8e2bf45e1a0cee94aa3cf1", - "logIndex": "0x0", - "removed": false - } - ], - "logsBloom": "0x00000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000800000000000000008000000000000000000000000000000000020000000080000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000400000000002000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000", - "status": "0x1", - "to": "0x0000000000000000000000000000000000031ec7", - "transactionHash": "0x7366a7738f47e32f5b6d292ca064b6b66f295d3931533a3745975be1191fccdf", - "transactionIndex": "0x0", - "type": "0x0" - }`, - }, - // 3. dynamic tx with logs success - { - txHash: txHashes[3], - want: `{ - "blockHash": "0xba48c351b0aa848ab2ec889f5794f0db779e0840af80472d1c29df54b22288c8", - "blockNumber": "0x4", - "contractAddress": null, - "cumulativeGasUsed": "0x538d", - "effectiveGasPrice": "0x5d21dbbf4", - "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", - "gasUsed": "0x538d", - "logs": [], - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "status": "0x0", - "to": "0x0000000000000000000000000000000000031ec7", - "transactionHash": "0x4e1e9194ca6f9d4e1736e9e441f66104f273548ed6d91b236a5f9c2ea10fa06d", - "transactionIndex": "0x0", - "type": "0x2" - }`, - }, - // 4. access list tx with create contract - { - txHash: txHashes[4], - want: `{ - "blockHash": "0x83f2712dfaeab6ab6239bf060bccfc49652e4afdc3b80b22a8373816a2047bd3", - "blockNumber": "0x5", - "contractAddress": "0xfdaa97661a584d977b4d3abb5370766ff5b86a18", - "cumulativeGasUsed": "0xe01a", - "effectiveGasPrice": "0x5d21dba00", - "from": "0x703c4b2bd70c169f5717101caee543299fc946c7", - "gasUsed": "0xe01a", - "logs": [], - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "status": "0x1", - "to": null, - "transactionHash": "0x8afe030574f663fe5096371d6f58a6287bfb3e0c73a5050220f5775a08e7abc9", - "transactionIndex": "0x0", - "type": "0x1" - }`, - }, - // 5. txhash empty - { - txHash: common.Hash{}, - want: `null`, - }, - // 6. txhash not found - { - txHash: common.HexToHash("deadbeef"), - want: `null`, - }, - } - - for i, tt := range testSuite { - var ( - result interface{} - err error - ) - result, err = api.GetTransactionReceipt(context.Background(), tt.txHash) + out, err := json.Marshal(resp) if err != nil { - t.Errorf("test %d: want no error, have %v", i, err) + t.Errorf("test %d: json marshal error: %v", i, err) continue } - data, err := json.Marshal(result) - if err != nil { - t.Errorf("test %d: json marshal error", i) - continue + if have := string(out); have != tc.want { + t.Errorf("test %d: want: %s have: %s", i, tc.want, have) } - want, have := tt.want, string(data) - require.JSONEqf(t, want, have, "test %d: json not match, want: %s, have: %s", i, want, have) } } diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index cb4d6ca04e..cf27d124fb 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -58,10 +58,9 @@ type Backend interface { ChainDb() ethdb.Database AccountManager() *accounts.Manager ExtRPCEnabled() bool - RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection - RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection - RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs - + RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection + RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection + RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs UnprotectedAllowed(tx *types.Transaction) bool // allows only for EIP155 transactions. // Blockchain API @@ -90,8 +89,8 @@ type Backend interface { GetPoolTransaction(txHash common.Hash) *types.Transaction GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) Stats() (pending int, queued int) - TxPoolContent() (map[common.Address][]*types.Transaction, map[common.Address][]*types.Transaction) - TxPoolContentFrom(addr common.Address) ([]*types.Transaction, []*types.Transaction) + TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) + TxPoolContentFrom(addr common.Address) (types.Transactions, types.Transactions) SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription ChainConfig() *params.ChainConfig diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 5c6c682552..1ce56fa5b4 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -90,6 +90,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { if err := args.setFeeDefaults(ctx, b); err != nil { return err } + if args.Value == nil { args.Value = new(hexutil.Big) } @@ -122,7 +123,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { AccessList: args.AccessList, } pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, nil, b.RPCGasCap()) + estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap()) if err != nil { return err } @@ -257,7 +258,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (* gasPrice = args.GasPrice.ToInt() gasFeeCap, gasTipCap = gasPrice, gasPrice } else { - // User specified 1559 gas fields (or none), use those + // User specified 1559 gas feilds (or none), use those gasFeeCap = new(big.Int) if args.MaxFeePerGas != nil { gasFeeCap = args.MaxFeePerGas.ToInt() diff --git a/internal/flags/categories.go b/internal/flags/categories.go index d7500157e3..02d063a65a 100644 --- a/internal/flags/categories.go +++ b/internal/flags/categories.go @@ -33,8 +33,7 @@ const ( LightCategory = "LIGHT CLIENT" DevCategory = "DEVELOPER CHAIN" EthashCategory = "ETHASH" - TxPoolCategory = "TRANSACTION POOL (EVM)" - BlobPoolCategory = "TRANSACTION POOL (BLOB)" + TxPoolCategory = "TRANSACTION POOL" PerfCategory = "PERFORMANCE TUNING" AccountCategory = "ACCOUNT" APICategory = "API AND CONSOLE" diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go index 8e23c8eeea..e5327d3bd3 100644 --- a/metrics/resetting_timer.go +++ b/metrics/resetting_timer.go @@ -2,10 +2,9 @@ package metrics import ( "math" + "sort" "sync" "time" - - "golang.org/x/exp/slices" ) // Initial slice capacity for the values stored in a ResettingTimer @@ -66,7 +65,7 @@ func (NilResettingTimer) Snapshot() ResettingTimer { } // Time is a no-op. -func (NilResettingTimer) Time(f func()) { f() } +func (NilResettingTimer) Time(func()) {} // Update is a no-op. func (NilResettingTimer) Update(time.Duration) {} @@ -187,7 +186,7 @@ func (t *ResettingTimerSnapshot) Mean() float64 { } func (t *ResettingTimerSnapshot) calc(percentiles []float64) { - slices.Sort(t.values) + sort.Sort(Int64Slice(t.values)) count := len(t.values) if count > 0 { @@ -233,3 +232,10 @@ func (t *ResettingTimerSnapshot) calc(percentiles []float64) { t.calculated = true } + +// Int64Slice attaches the methods of sort.Interface to []int64, sorting in increasing order. +type Int64Slice []int64 + +func (s Int64Slice) Len() int { return len(s) } +func (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/metrics/sample.go b/metrics/sample.go index 252a878f58..afcaa21184 100644 --- a/metrics/sample.go +++ b/metrics/sample.go @@ -3,10 +3,9 @@ package metrics import ( "math" "math/rand" + "sort" "sync" "time" - - "golang.org/x/exp/slices" ) const rescaleThreshold = time.Hour @@ -283,17 +282,17 @@ func SampleMin(values []int64) int64 { } // SamplePercentiles returns an arbitrary percentile of the slice of int64. -func SamplePercentile(values []int64, p float64) float64 { +func SamplePercentile(values int64Slice, p float64) float64 { return SamplePercentiles(values, []float64{p})[0] } // SamplePercentiles returns a slice of arbitrary percentiles of the slice of // int64. -func SamplePercentiles(values []int64, ps []float64) []float64 { +func SamplePercentiles(values int64Slice, ps []float64) []float64 { scores := make([]float64, len(ps)) size := len(values) if size > 0 { - slices.Sort(values) + sort.Sort(values) for i, p := range ps { pos := p * float64(size+1) if pos < 1.0 { @@ -634,3 +633,9 @@ func (h *expDecaySampleHeap) down(i, n int) { i = j } } + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/metrics/timer.go b/metrics/timer.go index 2e1a9be472..a63c9dfb6c 100644 --- a/metrics/timer.go +++ b/metrics/timer.go @@ -123,7 +123,7 @@ func (NilTimer) Stop() {} func (NilTimer) Sum() int64 { return 0 } // Time is a no-op. -func (NilTimer) Time(f func()) { f() } +func (NilTimer) Time(func()) {} // Update is a no-op. func (NilTimer) Update(time.Duration) {} diff --git a/metrics/writer.go b/metrics/writer.go index 82434e9d1d..256fbd14c9 100644 --- a/metrics/writer.go +++ b/metrics/writer.go @@ -3,10 +3,8 @@ package metrics import ( "fmt" "io" - "strings" + "sort" "time" - - "golang.org/x/exp/slices" ) // Write sorts writes each metric in the given registry periodically to the @@ -20,11 +18,12 @@ func Write(r Registry, d time.Duration, w io.Writer) { // WriteOnce sorts and writes metrics in the given registry to the given // io.Writer. func WriteOnce(r Registry, w io.Writer) { - var namedMetrics []namedMetric + var namedMetrics namedMetricSlice r.Each(func(name string, i interface{}) { namedMetrics = append(namedMetrics, namedMetric{name, i}) }) - slices.SortFunc(namedMetrics, namedMetric.cmp) + + sort.Sort(namedMetrics) for _, namedMetric := range namedMetrics { switch metric := namedMetric.m.(type) { case Counter: @@ -92,6 +91,13 @@ type namedMetric struct { m interface{} } -func (m namedMetric) cmp(other namedMetric) int { - return strings.Compare(m.name, other.name) +// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. +type namedMetricSlice []namedMetric + +func (nms namedMetricSlice) Len() int { return len(nms) } + +func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } + +func (nms namedMetricSlice) Less(i, j int) bool { + return nms[i].name < nms[j].name } diff --git a/metrics/writer_test.go b/metrics/writer_test.go index 8376bf8975..1aacc28712 100644 --- a/metrics/writer_test.go +++ b/metrics/writer_test.go @@ -1,20 +1,19 @@ package metrics import ( + "sort" "testing" - - "golang.org/x/exp/slices" ) func TestMetricsSorting(t *testing.T) { - var namedMetrics = []namedMetric{ + var namedMetrics = namedMetricSlice{ {name: "zzz"}, {name: "bbb"}, {name: "fff"}, {name: "ggg"}, } - slices.SortFunc(namedMetrics, namedMetric.cmp) + sort.Sort(namedMetrics) for i, name := range []string{"bbb", "fff", "ggg", "zzz"} { if namedMetrics[i].name != name { t.Fail() diff --git a/miner/ordering.go b/miner/ordering.go deleted file mode 100644 index 70a2a42eb6..0000000000 --- a/miner/ordering.go +++ /dev/null @@ -1,157 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "container/heap" - "math/big" - - "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/math" -) - -// txWithMinerFee wraps a transaction with its gas price or effective miner gasTipCap -type txWithMinerFee struct { - tx *txpool.LazyTransaction - from common.Address - fees *big.Int -} - -// newTxWithMinerFee creates a wrapped transaction, calculating the effective -// miner gasTipCap if a base fee is provided. -// Returns error in case of a negative effective miner gasTipCap. -func newTxWithMinerFee(tx *txpool.LazyTransaction, from common.Address, baseFee *big.Int) (*txWithMinerFee, error) { - tip := new(big.Int).Set(tx.GasTipCap) - if baseFee != nil { - if tx.GasFeeCap.Cmp(baseFee) < 0 { - return nil, types.ErrGasFeeCapTooLow - } - tip = math.BigMin(tx.GasTipCap, new(big.Int).Sub(tx.GasFeeCap, baseFee)) - } - return &txWithMinerFee{ - tx: tx, - from: from, - fees: tip, - }, nil -} - -// txByPriceAndTime implements both the sort and the heap interface, making it useful -// for all at once sorting as well as individually adding and removing elements. -type txByPriceAndTime []*txWithMinerFee - -func (s txByPriceAndTime) Len() int { return len(s) } -func (s txByPriceAndTime) Less(i, j int) bool { - // If the prices are equal, use the time the transaction was first seen for - // deterministic sorting - cmp := s[i].fees.Cmp(s[j].fees) - if cmp == 0 { - return s[i].tx.Time.Before(s[j].tx.Time) - } - return cmp > 0 -} -func (s txByPriceAndTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -func (s *txByPriceAndTime) Push(x interface{}) { - *s = append(*s, x.(*txWithMinerFee)) -} - -func (s *txByPriceAndTime) Pop() interface{} { - old := *s - n := len(old) - x := old[n-1] - old[n-1] = nil - *s = old[0 : n-1] - return x -} - -// transactionsByPriceAndNonce represents a set of transactions that can return -// transactions in a profit-maximizing sorted order, while supporting removing -// entire batches of transactions for non-executable accounts. -type transactionsByPriceAndNonce struct { - txs map[common.Address][]*txpool.LazyTransaction // Per account nonce-sorted list of transactions - heads txByPriceAndTime // Next transaction for each unique account (price heap) - signer types.Signer // Signer for the set of transactions - baseFee *big.Int // Current base fee -} - -// newTransactionsByPriceAndNonce creates a transaction set that can retrieve -// price sorted transactions in a nonce-honouring way. -// -// Note, the input map is reowned so the caller should not interact any more with -// if after providing it to the constructor. -func newTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, baseFee *big.Int) *transactionsByPriceAndNonce { - // Initialize a price and received time based heap with the head transactions - heads := make(txByPriceAndTime, 0, len(txs)) - for from, accTxs := range txs { - wrapped, err := newTxWithMinerFee(accTxs[0], from, baseFee) - if err != nil { - delete(txs, from) - continue - } - heads = append(heads, wrapped) - txs[from] = accTxs[1:] - } - heap.Init(&heads) - - // Assemble and return the transaction set - return &transactionsByPriceAndNonce{ - txs: txs, - heads: heads, - signer: signer, - baseFee: baseFee, - } -} - -// Peek returns the next transaction by price. -func (t *transactionsByPriceAndNonce) Peek() *txpool.LazyTransaction { - if len(t.heads) == 0 { - return nil - } - return t.heads[0].tx -} - -// Shift replaces the current best head with the next one from the same account. -func (t *transactionsByPriceAndNonce) Shift() { - acc := t.heads[0].from - if txs, ok := t.txs[acc]; ok && len(txs) > 0 { - if wrapped, err := newTxWithMinerFee(txs[0], acc, t.baseFee); err == nil { - t.heads[0], t.txs[acc] = wrapped, txs[1:] - heap.Fix(&t.heads, 0) - return - } - } - heap.Pop(&t.heads) -} - -// Pop removes the best transaction, *not* replacing it with the next one from -// the same account. This should be used when a transaction cannot be executed -// and hence all subsequent ones should be discarded from the same account. -func (t *transactionsByPriceAndNonce) Pop() { - heap.Pop(&t.heads) -} diff --git a/miner/ordering_ext.go b/miner/ordering_ext.go deleted file mode 100644 index 4bf4a8367c..0000000000 --- a/miner/ordering_ext.go +++ /dev/null @@ -1,15 +0,0 @@ -package miner - -import ( - "math/big" - - "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ethereum/go-ethereum/common" -) - -type TransactionsByPriceAndNonce = transactionsByPriceAndNonce - -func NewTransactionsByPriceAndNonce(signer types.Signer, txs map[common.Address][]*txpool.LazyTransaction, baseFee *big.Int) *TransactionsByPriceAndNonce { - return newTransactionsByPriceAndNonce(signer, txs, baseFee) -} diff --git a/miner/ordering_test.go b/miner/ordering_test.go deleted file mode 100644 index ea0f62f913..0000000000 --- a/miner/ordering_test.go +++ /dev/null @@ -1,198 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package miner - -import ( - "crypto/ecdsa" - "math/big" - "math/rand" - "testing" - "time" - - "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -func TestTransactionPriceNonceSortLegacy(t *testing.T) { - testTransactionPriceNonceSort(t, nil) -} - -func TestTransactionPriceNonceSort1559(t *testing.T) { - testTransactionPriceNonceSort(t, big.NewInt(0)) - testTransactionPriceNonceSort(t, big.NewInt(5)) - testTransactionPriceNonceSort(t, big.NewInt(50)) -} - -// Tests that transactions can be correctly sorted according to their price in -// decreasing order, but at the same time with increasing nonces when issued by -// the same account. -func testTransactionPriceNonceSort(t *testing.T, baseFee *big.Int) { - // Generate a batch of accounts to start with - keys := make([]*ecdsa.PrivateKey, 25) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - signer := types.LatestSignerForChainID(common.Big1) - - // Generate a batch of transactions with overlapping values, but shifted nonces - groups := map[common.Address][]*txpool.LazyTransaction{} - expectedCount := 0 - for start, key := range keys { - addr := crypto.PubkeyToAddress(key.PublicKey) - count := 25 - for i := 0; i < 25; i++ { - var tx *types.Transaction - gasFeeCap := rand.Intn(50) - if baseFee == nil { - tx = types.NewTx(&types.LegacyTx{ - Nonce: uint64(start + i), - To: &common.Address{}, - Value: big.NewInt(100), - Gas: 100, - GasPrice: big.NewInt(int64(gasFeeCap)), - Data: nil, - }) - } else { - tx = types.NewTx(&types.DynamicFeeTx{ - Nonce: uint64(start + i), - To: &common.Address{}, - Value: big.NewInt(100), - Gas: 100, - GasFeeCap: big.NewInt(int64(gasFeeCap)), - GasTipCap: big.NewInt(int64(rand.Intn(gasFeeCap + 1))), - Data: nil, - }) - if count == 25 && int64(gasFeeCap) < baseFee.Int64() { - count = i - } - } - tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to sign tx: %s", err) - } - groups[addr] = append(groups[addr], &txpool.LazyTransaction{ - Hash: tx.Hash(), - Tx: &txpool.Transaction{Tx: tx}, - Time: tx.Time(), - GasFeeCap: tx.GasFeeCap(), - GasTipCap: tx.GasTipCap(), - }) - } - expectedCount += count - } - // Sort the transactions and cross check the nonce ordering - txset := newTransactionsByPriceAndNonce(signer, groups, baseFee) - - txs := types.Transactions{} - for tx := txset.Peek(); tx != nil; tx = txset.Peek() { - txs = append(txs, tx.Tx.Tx) - txset.Shift() - } - if len(txs) != expectedCount { - t.Errorf("expected %d transactions, found %d", expectedCount, len(txs)) - } - for i, txi := range txs { - fromi, _ := types.Sender(signer, txi) - - // Make sure the nonce order is valid - for j, txj := range txs[i+1:] { - fromj, _ := types.Sender(signer, txj) - if fromi == fromj && txi.Nonce() > txj.Nonce() { - t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce()) - } - } - // If the next tx has different from account, the price must be lower than the current one - if i+1 < len(txs) { - next := txs[i+1] - fromNext, _ := types.Sender(signer, next) - tip, err := txi.EffectiveGasTip(baseFee) - nextTip, nextErr := next.EffectiveGasTip(baseFee) - if err != nil || nextErr != nil { - t.Errorf("error calculating effective tip: %v, %v", err, nextErr) - } - if fromi != fromNext && tip.Cmp(nextTip) < 0 { - t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) - } - } - } -} - -// Tests that if multiple transactions have the same price, the ones seen earlier -// are prioritized to avoid network spam attacks aiming for a specific ordering. -func TestTransactionTimeSort(t *testing.T) { - // Generate a batch of accounts to start with - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - } - signer := types.HomesteadSigner{} - - // Generate a batch of transactions with overlapping prices, but different creation times - groups := map[common.Address][]*txpool.LazyTransaction{} - for start, key := range keys { - addr := crypto.PubkeyToAddress(key.PublicKey) - - tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 100, big.NewInt(1), nil), signer, key) - tx.SetTime(time.Unix(0, int64(len(keys)-start))) - - groups[addr] = append(groups[addr], &txpool.LazyTransaction{ - Hash: tx.Hash(), - Tx: &txpool.Transaction{Tx: tx}, - Time: tx.Time(), - GasFeeCap: tx.GasFeeCap(), - GasTipCap: tx.GasTipCap(), - }) - } - // Sort the transactions and cross check the nonce ordering - txset := newTransactionsByPriceAndNonce(signer, groups, nil) - - txs := types.Transactions{} - for tx := txset.Peek(); tx != nil; tx = txset.Peek() { - txs = append(txs, tx.Tx.Tx) - txset.Shift() - } - if len(txs) != len(keys) { - t.Errorf("expected %d transactions, found %d", len(keys), len(txs)) - } - for i, txi := range txs { - fromi, _ := types.Sender(signer, txi) - if i+1 < len(txs) { - next := txs[i+1] - fromNext, _ := types.Sender(signer, next) - - if txi.GasPrice().Cmp(next.GasPrice()) < 0 { - t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) - } - // Make sure time order is ascending if the txs have the same gas price - if txi.GasPrice().Cmp(next.GasPrice()) == 0 && txi.Time().After(next.Time()) { - t.Errorf("invalid received time ordering: tx #%d (A=%x T=%v) > tx #%d (A=%x T=%v)", i, fromi[:4], txi.Time(), i+1, fromNext[:4], next.Time()) - } - } - } -} diff --git a/miner/worker.go b/miner/worker.go index 8a6124c1a8..a2cdaab174 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -42,7 +42,6 @@ import ( "github.com/ava-labs/subnet-evm/consensus/dummy" "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/core/state" - "github.com/ava-labs/subnet-evm/core/txpool" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/core/vm" "github.com/ava-labs/subnet-evm/params" @@ -59,7 +58,8 @@ const ( // environment is the worker's current environment and holds all of the current state information. type environment struct { - signer types.Signer + signer types.Signer + state *state.StateDB // apply state changes here tcount int // tx count in cycle gasPool *core.GasPool // available gas used to pack transactions @@ -213,7 +213,7 @@ func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateConte pending := w.eth.TxPool().PendingWithBaseFee(true, header.BaseFee) // Split the pending transactions into locals and remotes - localTxs := make(map[common.Address][]*txpool.LazyTransaction) + localTxs := make(map[common.Address]types.Transactions) remoteTxs := pending for _, account := range w.eth.TxPool().Locals() { if txs := remoteTxs[account]; len(txs) > 0 { @@ -222,11 +222,11 @@ func (w *worker) commitNewWork(predicateContext *precompileconfig.PredicateConte } } if len(localTxs) > 0 { - txs := newTransactionsByPriceAndNonce(env.signer, localTxs, header.BaseFee) + txs := types.NewTransactionsByPriceAndNonce(env.signer, localTxs, header.BaseFee) w.commitTransactions(env, txs, header.Coinbase) } if len(remoteTxs) > 0 { - txs := newTransactionsByPriceAndNonce(env.signer, remoteTxs, header.BaseFee) + txs := types.NewTransactionsByPriceAndNonce(env.signer, remoteTxs, header.BaseFee) w.commitTransactions(env, txs, header.Coinbase) } @@ -246,14 +246,14 @@ func (w *worker) createCurrentEnvironment(predicateContext *precompileconfig.Pre header: header, tcount: 0, gasPool: new(core.GasPool).AddGas(header.GasLimit), - rules: w.chainConfig.Rules(header.Number, header.Time), + rules: w.chainConfig.AvalancheRules(header.Number, header.Time), predicateContext: predicateContext, predicateResults: predicate.NewResults(), start: tstart, }, nil } -func (w *worker) commitTransaction(env *environment, tx *txpool.Transaction, coinbase common.Address) ([]*types.Log, error) { +func (w *worker) commitTransaction(env *environment, tx *types.Transaction, coinbase common.Address) ([]*types.Log, error) { var ( snap = env.state.Snapshot() gp = env.gasPool.Gas() @@ -261,33 +261,33 @@ func (w *worker) commitTransaction(env *environment, tx *txpool.Transaction, coi ) if env.rules.IsDurango { - results, err := core.CheckPredicates(env.rules, env.predicateContext, tx.Tx) + results, err := core.CheckPredicates(env.rules, env.predicateContext, tx) if err != nil { - log.Debug("Transaction predicate failed verification in miner", "tx", tx.Tx.Hash(), "err", err) + log.Debug("Transaction predicate failed verification in miner", "tx", tx.Hash(), "err", err) return nil, err } - env.predicateResults.SetTxResults(tx.Tx.Hash(), results) + env.predicateResults.SetTxResults(tx.Hash(), results) blockContext = core.NewEVMBlockContextWithPredicateResults(env.header, w.chain, &coinbase, env.predicateResults) } else { blockContext = core.NewEVMBlockContext(env.header, w.chain, &coinbase) } - receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, blockContext, env.gasPool, env.state, env.header, tx.Tx, &env.header.GasUsed, *w.chain.GetVMConfig()) + receipt, err := core.ApplyTransaction(w.chainConfig, w.chain, blockContext, env.gasPool, env.state, env.header, tx, &env.header.GasUsed, *w.chain.GetVMConfig()) if err != nil { env.state.RevertToSnapshot(snap) env.gasPool.SetGas(gp) - env.predicateResults.DeleteTxResults(tx.Tx.Hash()) + env.predicateResults.DeleteTxResults(tx.Hash()) return nil, err } - env.txs = append(env.txs, tx.Tx) + env.txs = append(env.txs, tx) env.receipts = append(env.receipts, receipt) - env.size += tx.Tx.Size() + env.size += tx.Size() return receipt.Logs, nil } -func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAndNonce, coinbase common.Address) { +func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByPriceAndNonce, coinbase common.Address) { for { // If we don't have enough gas for any further transactions then we're done. if env.gasPool.Gas() < params.TxGas { @@ -295,45 +295,38 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn break } // Retrieve the next transaction and abort if all done. - ltx := txs.Peek() - if ltx == nil { - break - } - tx := ltx.Resolve() + tx := txs.Peek() if tx == nil { - log.Warn("Ignoring evicted transaction") - - txs.Pop() - continue + break } // Abort transaction if it won't fit in the block and continue to search for a smaller // transction that will fit. - if totalTxsSize := env.size + tx.Tx.Size(); totalTxsSize > targetTxsSize { - log.Trace("Skipping transaction that would exceed target size", "hash", tx.Tx.Hash(), "totalTxsSize", totalTxsSize, "txSize", tx.Tx.Size()) + if totalTxsSize := env.size + tx.Size(); totalTxsSize > targetTxsSize { + log.Trace("Skipping transaction that would exceed target size", "hash", tx.Hash(), "totalTxsSize", totalTxsSize, "txSize", tx.Size()) txs.Pop() continue } // Error may be ignored here. The error has already been checked // during transaction acceptance is the transaction pool. - from, _ := types.Sender(env.signer, tx.Tx) + from, _ := types.Sender(env.signer, tx) // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. - if tx.Tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { - log.Trace("Ignoring reply protected transaction", "hash", tx.Tx.Hash(), "eip155", w.chainConfig.EIP155Block) + if tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { + log.Trace("Ignoring reply protected transaction", "hash", tx.Hash(), "eip155", w.chainConfig.EIP155Block) txs.Pop() continue } // Start executing the transaction - env.state.SetTxContext(tx.Tx.Hash(), env.tcount) + env.state.SetTxContext(tx.Hash(), env.tcount) _, err := w.commitTransaction(env, tx, coinbase) switch { case errors.Is(err, core.ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift - log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Tx.Nonce()) + log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) txs.Shift() case errors.Is(err, nil): @@ -343,7 +336,7 @@ func (w *worker) commitTransactions(env *environment, txs *transactionsByPriceAn default: // Transaction is regarded as invalid, drop all consecutive transactions from // the same sender because of `nonce-too-high` clause. - log.Debug("Transaction failed, account skipped", "hash", tx.Tx.Hash(), "err", err) + log.Debug("Transaction failed, account skipped", "hash", tx.Hash(), "err", err) txs.Pop() } } diff --git a/node/config.go b/node/config.go index bf67d774ff..1d4a418cd1 100644 --- a/node/config.go +++ b/node/config.go @@ -60,12 +60,6 @@ type Config struct { // InsecureUnlockAllowed allows user to unlock accounts in unsafe http environment. InsecureUnlockAllowed bool `toml:",omitempty"` - // BatchRequestLimit is the maximum number of requests in a batch. - BatchRequestLimit int `toml:",omitempty"` - - // BatchResponseMaxSize is the maximum number of bytes returned from a batched rpc call. - BatchResponseMaxSize int `toml:",omitempty"` - SubnetEVMVersion string } diff --git a/params/config.go b/params/config.go index 3f7a29efc8..1b2fe0930e 100644 --- a/params/config.go +++ b/params/config.go @@ -33,7 +33,9 @@ import ( "math/big" "time" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/subnet-evm/commontype" "github.com/ava-labs/subnet-evm/precompile/modules" "github.com/ava-labs/subnet-evm/precompile/precompileconfig" @@ -153,7 +155,7 @@ var ( UpgradeConfig: UpgradeConfig{}, } - TestRules = TestChainConfig.Rules(new(big.Int), 0) + TestRules = TestChainConfig.AvalancheRules(new(big.Int), 0) ) func getUpgradeTime(networkID uint32, upgradeTimes map[uint32]time.Time) *uint64 { @@ -165,13 +167,46 @@ func getUpgradeTime(networkID uint32, upgradeTimes map[uint32]time.Time) *uint64 return utils.NewUint64(0) } +// GetMandatoryNetworkUpgrades returns the mandatory network upgrades for the specified network ID. +func GetMandatoryNetworkUpgrades(networkID uint32) MandatoryNetworkUpgrades { + return MandatoryNetworkUpgrades{ + SubnetEVMTimestamp: utils.NewUint64(0), + DurangoTimestamp: getUpgradeTime(networkID, version.DurangoTimes), + } +} + +// UpgradeConfig includes the following configs that may be specified in upgradeBytes: +// - Timestamps that enable avalanche network upgrades, +// - Enabling or disabling precompiles as network upgrades. +type UpgradeConfig struct { + // Config for optional timestamps that enable network upgrades. + // Note: if OptionalUpgrades is specified in the JSON all previously activated + // forks must be present or upgradeBytes will be rejected. + OptionalNetworkUpgrades *OptionalNetworkUpgrades `json:"networkUpgrades,omitempty"` + + // Config for modifying state as a network upgrade. + StateUpgrades []StateUpgrade `json:"stateUpgrades,omitempty"` + + // Config for enabling and disabling precompiles as network upgrades. + PrecompileUpgrades []PrecompileUpgrade `json:"precompileUpgrades,omitempty"` +} + +// AvalancheContext provides Avalanche specific context directly into the EVM. +type AvalancheContext struct { + SnowCtx *snow.Context +} + // ChainConfig is the core config which determines the blockchain settings. // // ChainConfig is stored in the database on a per block basis. This means // that any network, identified by its genesis block, can have its own // set of configuration options. type ChainConfig struct { - ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection + AvalancheContext `json:"-"` // Avalanche specific context set during VM initialization. Not serialized. + + ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection + FeeConfig commontype.FeeConfig `json:"feeConfig"` // Set the configuration for the dynamic fee algorithm + AllowFeeRecipients bool `json:"allowFeeRecipients,omitempty"` // Allows fees to be collected by block builders. HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead) @@ -186,16 +221,58 @@ type ChainConfig struct { IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul) MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated) - MandatoryNetworkUpgrades // Config for timestamps that enable mandatory network upgrades. Skip encoding/decoding directly into ChainConfig. - OptionalNetworkUpgrades // Config for optional timestamps that enable network upgrades + MandatoryNetworkUpgrades // Config for timestamps that enable mandatory network upgrades. Skip encoding/decoding directly into ChainConfig. + OptionalNetworkUpgrades // Config for optional timestamps that enable network upgrades + GenesisPrecompiles Precompiles `json:"-"` // Config for enabling precompiles from genesis. JSON encode/decode will be handled by the custom marshaler/unmarshaler. + UpgradeConfig `json:"-"` // Config specified in upgradeBytes (avalanche network upgrades or enable/disabling precompiles). Skip encoding/decoding directly into ChainConfig. +} + +// UnmarshalJSON parses the JSON-encoded data and stores the result in the +// object pointed to by c. +// This is a custom unmarshaler to handle the Precompiles field. +// Precompiles was presented as an inline object in the JSON. +// This custom unmarshaler ensures backwards compatibility with the old format. +func (c *ChainConfig) UnmarshalJSON(data []byte) error { + // Alias ChainConfig to avoid recursion + type _ChainConfig ChainConfig + tmp := _ChainConfig{} + if err := json.Unmarshal(data, &tmp); err != nil { + return err + } - AvalancheContext `json:"-"` // Avalanche specific context set during VM initialization. Not serialized. + // At this point we have populated all fields except PrecompileUpgrade + *c = ChainConfig(tmp) - FeeConfig commontype.FeeConfig `json:"feeConfig"` // Set the configuration for the dynamic fee algorithm - AllowFeeRecipients bool `json:"allowFeeRecipients,omitempty"` // Allows fees to be collected by block builders. + // Unmarshal inlined PrecompileUpgrade + return json.Unmarshal(data, &c.GenesisPrecompiles) +} + +// MarshalJSON returns the JSON encoding of c. +// This is a custom marshaler to handle the Precompiles field. +func (c ChainConfig) MarshalJSON() ([]byte, error) { + // Alias ChainConfig to avoid recursion + type _ChainConfig ChainConfig + tmp, err := json.Marshal(_ChainConfig(c)) + if err != nil { + return nil, err + } + + // To include PrecompileUpgrades, we unmarshal the json representing c + // then directly add the corresponding keys to the json. + raw := make(map[string]json.RawMessage) + if err := json.Unmarshal(tmp, &raw); err != nil { + return nil, err + } + + for key, value := range c.GenesisPrecompiles { + conf, err := json.Marshal(value) + if err != nil { + return nil, err + } + raw[key] = conf + } - GenesisPrecompiles Precompiles `json:"-"` // Config for enabling precompiles from genesis. JSON encode/decode will be handled by the custom marshaler/unmarshaler. - UpgradeConfig `json:"-"` // Config specified in upgradeBytes (avalanche network upgrades or enable/disabling precompiles). Skip encoding/decoding directly into ChainConfig. + return json.Marshal(raw) } // Description returns a human-readable description of ChainConfig. @@ -208,7 +285,7 @@ func (c *ChainConfig) Description() string { // Create a list of forks with a short description of them. Forks that only // makes sense for mainnet should be optional at printing to avoid bloating // the output for testnets and private networks. - banner += "Hard Forks (block based):\n" + banner += "Hard Forks:\n" banner += fmt.Sprintf(" - Homestead: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/homestead.md)\n", c.HomesteadBlock) banner += fmt.Sprintf(" - Tangerine Whistle (EIP 150): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/tangerine-whistle.md)\n", c.EIP150Block) banner += fmt.Sprintf(" - Spurious Dragon/1 (EIP 155): #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/spurious-dragon.md)\n", c.EIP155Block) @@ -220,13 +297,10 @@ func (c *ChainConfig) Description() string { if c.MuirGlacierBlock != nil { banner += fmt.Sprintf(" - Muir Glacier: #%-8v (https://github.com/ethereum/execution-specs/blob/master/network-upgrades/mainnet-upgrades/muir-glacier.md)\n", c.MuirGlacierBlock) } - - banner += "Hard forks (timestamp based):\n" - banner += fmt.Sprintf(" - Cancun Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.12.0)\n", ptrToString(c.CancunTime)) - - banner += "Mandatory Avalanche Upgrades (timestamp based):\n" + banner += "Mandatory Upgrades:\n" banner += fmt.Sprintf(" - SubnetEVM Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0)\n", ptrToString(c.SubnetEVMTimestamp)) banner += fmt.Sprintf(" - Durango Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0)\n", ptrToString(c.DurangoTimestamp)) + banner += fmt.Sprintf(" - Cancun Timestamp: @%-10v (https://github.com/ava-labs/avalanchego/releases/tag/v1.12.0)\n", ptrToString(c.CancunTime)) banner += "\n" // Add Subnet-EVM custom fields @@ -324,7 +398,7 @@ func (c *ChainConfig) IsDurango(time uint64) bool { // IsCancun returns whether [time] represents a block // with a timestamp after the Cancun upgrade time. -func (c *ChainConfig) IsCancun(num *big.Int, time uint64) bool { +func (c *ChainConfig) IsCancun(time uint64) bool { return utils.IsTimestampForked(c.CancunTime, time) } @@ -533,6 +607,15 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, height *big.Int, time return nil } +// getOptionalNetworkUpgrades returns OptionalNetworkUpgrades from upgrade config if set there, +// otherwise it falls back to the genesis chain config. +func (c *ChainConfig) getOptionalNetworkUpgrades() *OptionalNetworkUpgrades { + if upgradeConfigOverride := c.UpgradeConfig.OptionalNetworkUpgrades; upgradeConfigOverride != nil { + return upgradeConfigOverride + } + return &c.OptionalNetworkUpgrades +} + // isForkBlockIncompatible returns true if a fork scheduled at s1 cannot be rescheduled to // block s2 because head is already past the fork. func isForkBlockIncompatible(s1, s2, head *big.Int) bool { @@ -691,13 +774,13 @@ func (c *ChainConfig) rules(num *big.Int, timestamp uint64) Rules { IsConstantinople: c.IsConstantinople(num), IsPetersburg: c.IsPetersburg(num), IsIstanbul: c.IsIstanbul(num), - IsCancun: c.IsCancun(num, timestamp), + IsCancun: c.IsCancun(timestamp), } } -// Rules returns the Avalanche modified rules to support Avalanche +// AvalancheRules returns the Avalanche modified rules to support Avalanche // network upgrades -func (c *ChainConfig) Rules(blockNum *big.Int, timestamp uint64) Rules { +func (c *ChainConfig) AvalancheRules(blockNum *big.Int, timestamp uint64) Rules { rules := c.rules(blockNum, timestamp) rules.IsSubnetEVM = c.IsSubnetEVM(timestamp) @@ -734,11 +817,71 @@ func (c *ChainConfig) AllowedFeeRecipients() bool { return c.AllowFeeRecipients } -// getOptionalNetworkUpgrades returns OptionalNetworkUpgrades from upgrade config if set there, -// otherwise it falls back to the genesis chain config. -func (c *ChainConfig) getOptionalNetworkUpgrades() *OptionalNetworkUpgrades { - if upgradeConfigOverride := c.UpgradeConfig.OptionalNetworkUpgrades; upgradeConfigOverride != nil { - return upgradeConfigOverride +type ChainConfigWithUpgradesJSON struct { + ChainConfig + UpgradeConfig UpgradeConfig `json:"upgrades,omitempty"` +} + +// MarshalJSON implements json.Marshaler. This is a workaround for the fact that +// the embedded ChainConfig struct has a MarshalJSON method, which prevents +// the default JSON marshalling from working for UpgradeConfig. +// TODO: consider removing this method by allowing external tag for the embedded +// ChainConfig struct. +func (cu ChainConfigWithUpgradesJSON) MarshalJSON() ([]byte, error) { + // embed the ChainConfig struct into the response + chainConfigJSON, err := json.Marshal(cu.ChainConfig) + if err != nil { + return nil, err + } + if len(chainConfigJSON) > maxJSONLen { + return nil, errors.New("value too large") + } + + type upgrades struct { + UpgradeConfig UpgradeConfig `json:"upgrades"` + } + + upgradeJSON, err := json.Marshal(upgrades{cu.UpgradeConfig}) + if err != nil { + return nil, err + } + if len(upgradeJSON) > maxJSONLen { + return nil, errors.New("value too large") + } + + // merge the two JSON objects + mergedJSON := make([]byte, 0, len(chainConfigJSON)+len(upgradeJSON)+1) + mergedJSON = append(mergedJSON, chainConfigJSON[:len(chainConfigJSON)-1]...) + mergedJSON = append(mergedJSON, ',') + mergedJSON = append(mergedJSON, upgradeJSON[1:]...) + return mergedJSON, nil +} + +func (cu *ChainConfigWithUpgradesJSON) UnmarshalJSON(input []byte) error { + var cc ChainConfig + if err := json.Unmarshal(input, &cc); err != nil { + return err + } + + type upgrades struct { + UpgradeConfig UpgradeConfig `json:"upgrades"` + } + + var u upgrades + if err := json.Unmarshal(input, &u); err != nil { + return err + } + cu.ChainConfig = cc + cu.UpgradeConfig = u.UpgradeConfig + return nil +} + +// ToWithUpgradesJSON converts the ChainConfig to ChainConfigWithUpgradesJSON with upgrades explicitly displayed. +// ChainConfig does not include upgrades in its JSON output. +// This is a workaround for showing upgrades in the JSON output. +func (c *ChainConfig) ToWithUpgradesJSON() *ChainConfigWithUpgradesJSON { + return &ChainConfigWithUpgradesJSON{ + ChainConfig: *c, + UpgradeConfig: c.UpgradeConfig, } - return &c.OptionalNetworkUpgrades } diff --git a/params/config_extra.go b/params/config_extra.go deleted file mode 100644 index f043babb0d..0000000000 --- a/params/config_extra.go +++ /dev/null @@ -1,149 +0,0 @@ -// (c) 2024 Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package params - -import ( - "encoding/json" - "errors" - - "github.com/ava-labs/avalanchego/snow" -) - -// UpgradeConfig includes the following configs that may be specified in upgradeBytes: -// - Timestamps that enable avalanche network upgrades, -// - Enabling or disabling precompiles as network upgrades. -type UpgradeConfig struct { - // Config for optional timestamps that enable network upgrades. - // Note: if OptionalUpgrades is specified in the JSON all previously activated - // forks must be present or upgradeBytes will be rejected. - OptionalNetworkUpgrades *OptionalNetworkUpgrades `json:"networkUpgrades,omitempty"` - - // Config for modifying state as a network upgrade. - StateUpgrades []StateUpgrade `json:"stateUpgrades,omitempty"` - - // Config for enabling and disabling precompiles as network upgrades. - PrecompileUpgrades []PrecompileUpgrade `json:"precompileUpgrades,omitempty"` -} - -// AvalancheContext provides Avalanche specific context directly into the EVM. -type AvalancheContext struct { - SnowCtx *snow.Context -} - -// UnmarshalJSON parses the JSON-encoded data and stores the result in the -// object pointed to by c. -// This is a custom unmarshaler to handle the Precompiles field. -// Precompiles was presented as an inline object in the JSON. -// This custom unmarshaler ensures backwards compatibility with the old format. -func (c *ChainConfig) UnmarshalJSON(data []byte) error { - // Alias ChainConfig to avoid recursion - type _ChainConfig ChainConfig - tmp := _ChainConfig{} - if err := json.Unmarshal(data, &tmp); err != nil { - return err - } - - // At this point we have populated all fields except PrecompileUpgrade - *c = ChainConfig(tmp) - - // Unmarshal inlined PrecompileUpgrade - return json.Unmarshal(data, &c.GenesisPrecompiles) -} - -// MarshalJSON returns the JSON encoding of c. -// This is a custom marshaler to handle the Precompiles field. -func (c ChainConfig) MarshalJSON() ([]byte, error) { - // Alias ChainConfig to avoid recursion - type _ChainConfig ChainConfig - tmp, err := json.Marshal(_ChainConfig(c)) - if err != nil { - return nil, err - } - - // To include PrecompileUpgrades, we unmarshal the json representing c - // then directly add the corresponding keys to the json. - raw := make(map[string]json.RawMessage) - if err := json.Unmarshal(tmp, &raw); err != nil { - return nil, err - } - - for key, value := range c.GenesisPrecompiles { - conf, err := json.Marshal(value) - if err != nil { - return nil, err - } - raw[key] = conf - } - - return json.Marshal(raw) -} - -type ChainConfigWithUpgradesJSON struct { - ChainConfig - UpgradeConfig UpgradeConfig `json:"upgrades,omitempty"` -} - -// MarshalJSON implements json.Marshaler. This is a workaround for the fact that -// the embedded ChainConfig struct has a MarshalJSON method, which prevents -// the default JSON marshalling from working for UpgradeConfig. -// TODO: consider removing this method by allowing external tag for the embedded -// ChainConfig struct. -func (cu ChainConfigWithUpgradesJSON) MarshalJSON() ([]byte, error) { - // embed the ChainConfig struct into the response - chainConfigJSON, err := json.Marshal(cu.ChainConfig) - if err != nil { - return nil, err - } - if len(chainConfigJSON) > maxJSONLen { - return nil, errors.New("value too large") - } - - type upgrades struct { - UpgradeConfig UpgradeConfig `json:"upgrades"` - } - - upgradeJSON, err := json.Marshal(upgrades{cu.UpgradeConfig}) - if err != nil { - return nil, err - } - if len(upgradeJSON) > maxJSONLen { - return nil, errors.New("value too large") - } - - // merge the two JSON objects - mergedJSON := make([]byte, 0, len(chainConfigJSON)+len(upgradeJSON)+1) - mergedJSON = append(mergedJSON, chainConfigJSON[:len(chainConfigJSON)-1]...) - mergedJSON = append(mergedJSON, ',') - mergedJSON = append(mergedJSON, upgradeJSON[1:]...) - return mergedJSON, nil -} - -func (cu *ChainConfigWithUpgradesJSON) UnmarshalJSON(input []byte) error { - var cc ChainConfig - if err := json.Unmarshal(input, &cc); err != nil { - return err - } - - type upgrades struct { - UpgradeConfig UpgradeConfig `json:"upgrades"` - } - - var u upgrades - if err := json.Unmarshal(input, &u); err != nil { - return err - } - cu.ChainConfig = cc - cu.UpgradeConfig = u.UpgradeConfig - return nil -} - -// ToWithUpgradesJSON converts the ChainConfig to ChainConfigWithUpgradesJSON with upgrades explicitly displayed. -// ChainConfig does not include upgrades in its JSON output. -// This is a workaround for showing upgrades in the JSON output. -func (c *ChainConfig) ToWithUpgradesJSON() *ChainConfigWithUpgradesJSON { - return &ChainConfigWithUpgradesJSON{ - ChainConfig: *c, - UpgradeConfig: c.UpgradeConfig, - } -} diff --git a/params/config_test.go b/params/config_test.go index 1c535bcd21..066cc21192 100644 --- a/params/config_test.go +++ b/params/config_test.go @@ -157,15 +157,15 @@ func TestConfigRules(t *testing.T) { } var stamp uint64 - if r := c.Rules(big.NewInt(0), stamp); r.IsSubnetEVM { + if r := c.AvalancheRules(big.NewInt(0), stamp); r.IsSubnetEVM { t.Errorf("expected %v to not be subnet-evm", stamp) } stamp = 500 - if r := c.Rules(big.NewInt(0), stamp); !r.IsSubnetEVM { + if r := c.AvalancheRules(big.NewInt(0), stamp); !r.IsSubnetEVM { t.Errorf("expected %v to be subnet-evm", stamp) } stamp = math.MaxInt64 - if r := c.Rules(big.NewInt(0), stamp); !r.IsSubnetEVM { + if r := c.AvalancheRules(big.NewInt(0), stamp); !r.IsSubnetEVM { t.Errorf("expected %v to be subnet-evm", stamp) } } @@ -250,10 +250,10 @@ func TestActivePrecompiles(t *testing.T) { }, } - rules0 := config.Rules(common.Big0, 0) + rules0 := config.AvalancheRules(common.Big0, 0) require.True(t, rules0.IsPrecompileEnabled(nativeminter.Module.Address)) - rules1 := config.Rules(common.Big0, 1) + rules1 := config.AvalancheRules(common.Big0, 1) require.False(t, rules1.IsPrecompileEnabled(nativeminter.Module.Address)) } diff --git a/params/network_upgrades.go b/params/network_upgrades.go index b22b9fa626..41a4366cb5 100644 --- a/params/network_upgrades.go +++ b/params/network_upgrades.go @@ -3,11 +3,6 @@ package params -import ( - "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/subnet-evm/utils" -) - // MandatoryNetworkUpgrades contains timestamps that enable mandatory network upgrades. // These upgrades are mandatory, meaning that if a node does not upgrade by the // specified timestamp, it will be unable to participate in consensus. @@ -43,14 +38,6 @@ func (m *MandatoryNetworkUpgrades) mandatoryForkOrder() []fork { } } -// GetMandatoryNetworkUpgrades returns the mandatory network upgrades for the specified network ID. -func GetMandatoryNetworkUpgrades(networkID uint32) MandatoryNetworkUpgrades { - return MandatoryNetworkUpgrades{ - SubnetEVMTimestamp: utils.NewUint64(0), - DurangoTimestamp: getUpgradeTime(networkID, version.DurangoTimes), - } -} - // OptionalNetworkUpgrades includes overridable and optional Subnet-EVM network upgrades. // These can be specified in genesis and upgrade configs. // Timestamps can be different for each subnet network. diff --git a/params/protocol_params.go b/params/protocol_params.go index 5668eaa8b2..a3cb8e2604 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -161,20 +161,9 @@ const ( Bls12381MapG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation Bls12381MapG2Gas uint64 = 110000 // Gas price for BLS12-381 mapping field element to G2 operation - // The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529, - // up to half the consumed gas could be refunded. Redefined as 1/5th in EIP-3529 - RefundQuotient uint64 = 2 - RefundQuotientEIP3529 uint64 = 5 - - BlobTxBytesPerFieldElement = 32 // Size in bytes of a field element - BlobTxFieldElementsPerBlob = 4096 // Number of field elements stored in a single data blob - BlobTxHashVersion = 0x01 // Version byte of the commitment hash - BlobTxMaxBlobGasPerBlock = 1 << 19 // Maximum consumable blob gas for data blobs per block - BlobTxTargetBlobGasPerBlock = 1 << 18 // Target consumable blob gas for data blobs per block (for 1559-like pricing) - BlobTxBlobGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size) - BlobTxMinBlobGasprice = 1 // Minimum gas price for data blobs - BlobTxBlobGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for blob gas price - BlobTxPointEvaluationPrecompileGas = 50000 // Gas price for the point evaluation precompile. + BlobTxDataGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size) + BlobTxMinDataGasprice = 1 // Minimum gas price for data blobs + BlobTxDataGaspriceUpdateFraction = 2225652 // Controls the maximum rate of change for data gas price ) // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations diff --git a/params/version.go b/params/version.go index f7ce5902fe..b63bd113a8 100644 --- a/params/version.go +++ b/params/version.go @@ -33,7 +33,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 12 // Minor version component of the current release - VersionPatch = 2 // Patch version component of the current release + VersionPatch = 0 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/plugin/evm/block.go b/plugin/evm/block.go index 683b21d67c..0c414ebcf1 100644 --- a/plugin/evm/block.go +++ b/plugin/evm/block.go @@ -67,7 +67,7 @@ func (b *Block) Accept(context.Context) error { // take place before the accepted log is emitted to subscribers. Use of the // sharedMemoryWriter ensures shared memory requests generated by // precompiles are committed atomically with the vm's lastAcceptedKey. - rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()) + rules := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp()) sharedMemoryWriter := NewSharedMemoryWriter() if err := b.handlePrecompileAccept(rules, sharedMemoryWriter); err != nil { return err @@ -168,7 +168,7 @@ func (b *Block) syntacticVerify() error { } header := b.ethBlock.Header() - rules := b.vm.chainConfig.Rules(header.Number, header.Time) + rules := b.vm.chainConfig.AvalancheRules(header.Number, header.Time) return b.vm.syntacticBlockValidator.SyntacticVerify(b, rules) } @@ -182,7 +182,7 @@ func (b *Block) Verify(context.Context) error { // ShouldVerifyWithContext implements the block.WithVerifyContext interface func (b *Block) ShouldVerifyWithContext(context.Context) (bool, error) { - predicates := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters + predicates := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp()).Predicaters // Short circuit early if there are no predicates to verify if len(predicates) == 0 { return false, nil @@ -248,7 +248,7 @@ func (b *Block) verify(predicateContext *precompileconfig.PredicateContext, writ // verifyPredicates verifies the predicates in the block are valid according to predicateContext. func (b *Block) verifyPredicates(predicateContext *precompileconfig.PredicateContext) error { - rules := b.vm.chainConfig.Rules(b.ethBlock.Number(), b.ethBlock.Timestamp()) + rules := b.vm.chainConfig.AvalancheRules(b.ethBlock.Number(), b.ethBlock.Timestamp()) switch { case !rules.IsDurango && rules.PredicatersExist(): diff --git a/plugin/evm/block_verification.go b/plugin/evm/block_verification.go index 0b8ac3d8b7..60611dd031 100644 --- a/plugin/evm/block_verification.go +++ b/plugin/evm/block_verification.go @@ -141,19 +141,13 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } } - // Verify the existence / non-existence of excessBlobGas - cancun := rules.IsCancun - if !cancun && ethHeader.ExcessBlobGas != nil { - return fmt.Errorf("invalid excessBlobGas: have %d, expected nil", ethHeader.ExcessBlobGas) + // Verify the existence / non-existence of excessDataGas + if rules.IsCancun && ethHeader.ExcessDataGas == nil { + return errors.New("missing excessDataGas") } - if !cancun && ethHeader.BlobGasUsed != nil { - return fmt.Errorf("invalid blobGasUsed: have %d, expected nil", ethHeader.BlobGasUsed) - } - if cancun && ethHeader.ExcessBlobGas == nil { - return errors.New("header is missing excessBlobGas") - } - if cancun && ethHeader.BlobGasUsed == nil { - return errors.New("header is missing blobGasUsed") + if !rules.IsCancun && ethHeader.ExcessDataGas != nil { + return fmt.Errorf("invalid excessDataGas: have %d, expected nil", ethHeader.ExcessDataGas) } + return nil } diff --git a/plugin/evm/config.go b/plugin/evm/config.go index 5020114445..4871415327 100644 --- a/plugin/evm/config.go +++ b/plugin/evm/config.go @@ -8,7 +8,7 @@ import ( "fmt" "time" - "github.com/ava-labs/subnet-evm/core/txpool/legacypool" + "github.com/ava-labs/subnet-evm/core/txpool" "github.com/ava-labs/subnet-evm/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -106,11 +106,13 @@ type Config struct { RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` // Cache settings - TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) - TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) - TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) - TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once - SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) + TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) + TrieCleanJournal string `json:"trie-clean-journal"` // Directory to use to save the trie clean cache (must be populated to enable journaling the trie clean cache) + TrieCleanRejournal Duration `json:"trie-clean-rejournal"` // Frequency to re-journal the trie clean cache to disk (minimum 1 minute, must be populated to enable journaling the trie clean cache) + TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) + TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) + TriePrefetcherParallelism int `json:"trie-prefetcher-parallelism"` // Max concurrent disk reads trie prefetcher should perform at once + SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) // Eth Settings Preimages bool `json:"preimages-enabled"` @@ -237,13 +239,13 @@ func (c *Config) SetDefaults() { c.RPCTxFeeCap = defaultRpcTxFeeCap c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled - c.TxPoolPriceLimit = legacypool.DefaultConfig.PriceLimit - c.TxPoolPriceBump = legacypool.DefaultConfig.PriceBump - c.TxPoolAccountSlots = legacypool.DefaultConfig.AccountSlots - c.TxPoolGlobalSlots = legacypool.DefaultConfig.GlobalSlots - c.TxPoolAccountQueue = legacypool.DefaultConfig.AccountQueue - c.TxPoolGlobalQueue = legacypool.DefaultConfig.GlobalQueue - c.TxPoolLifetime.Duration = legacypool.DefaultConfig.Lifetime + c.TxPoolPriceLimit = txpool.DefaultConfig.PriceLimit + c.TxPoolPriceBump = txpool.DefaultConfig.PriceBump + c.TxPoolAccountSlots = txpool.DefaultConfig.AccountSlots + c.TxPoolGlobalSlots = txpool.DefaultConfig.GlobalSlots + c.TxPoolAccountQueue = txpool.DefaultConfig.AccountQueue + c.TxPoolGlobalQueue = txpool.DefaultConfig.GlobalQueue + c.TxPoolLifetime.Duration = txpool.DefaultConfig.Lifetime c.APIMaxDuration.Duration = defaultApiMaxDuration c.WSCPURefillRate.Duration = defaultWsCpuRefillRate diff --git a/plugin/evm/gossip.go b/plugin/evm/gossip.go index 65f68fd4d2..e87bb354d3 100644 --- a/plugin/evm/gossip.go +++ b/plugin/evm/gossip.go @@ -133,8 +133,8 @@ func (g *GossipEthTxPool) Subscribe(ctx context.Context) { if reset { log.Debug("resetting bloom filter", "reason", "reached max filled ratio") - g.mempool.IteratePending(func(tx *txpool.Transaction) bool { - g.bloom.Add(&GossipEthTx{Tx: tx.Tx}) + g.mempool.IteratePending(func(tx *types.Transaction) bool { + g.bloom.Add(&GossipEthTx{Tx: tx}) return true }) } @@ -147,7 +147,7 @@ func (g *GossipEthTxPool) Subscribe(ctx context.Context) { // Add enqueues the transaction to the mempool. Subscribe should be called // to receive an event if tx is actually added to the mempool or not. func (g *GossipEthTxPool) Add(tx *GossipEthTx) error { - return g.mempool.Add([]*txpool.Transaction{{Tx: tx.Tx}}, false, false)[0] + return g.mempool.AddRemotes([]*types.Transaction{tx.Tx})[0] } // Has should just return whether or not the [txID] is still in the mempool, @@ -157,8 +157,8 @@ func (g *GossipEthTxPool) Has(txID ids.ID) bool { } func (g *GossipEthTxPool) Iterate(f func(tx *GossipEthTx) bool) { - g.mempool.IteratePending(func(tx *txpool.Transaction) bool { - return f(&GossipEthTx{Tx: tx.Tx}) + g.mempool.IteratePending(func(tx *types.Transaction) bool { + return f(&GossipEthTx{Tx: tx}) }) } diff --git a/plugin/evm/gossip_test.go b/plugin/evm/gossip_test.go index b62953203a..e1b47ec972 100644 --- a/plugin/evm/gossip_test.go +++ b/plugin/evm/gossip_test.go @@ -14,7 +14,6 @@ import ( "github.com/ava-labs/subnet-evm/core" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/core/txpool" - "github.com/ava-labs/subnet-evm/core/txpool/legacypool" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/core/vm" "github.com/ava-labs/subnet-evm/params" @@ -47,8 +46,8 @@ func TestGossipSubscribe(t *testing.T) { require.NoError(err) txPool := setupPoolWithConfig(t, params.TestChainConfig, addr) - defer txPool.Close() - txPool.SetGasTip(common.Big1) + defer txPool.Stop() + txPool.SetGasPrice(common.Big1) txPool.SetMinFee(common.Big0) gossipTxPool, err := NewGossipEthTxPool(txPool, prometheus.NewRegistry()) @@ -98,11 +97,8 @@ func setupPoolWithConfig(t *testing.T, config *params.ChainConfig, fundedAddress } chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) require.NoError(t, err) - testTxPoolConfig := legacypool.DefaultConfig - legacyPool := legacypool.New(testTxPoolConfig, chain) + testTxPoolConfig := txpool.DefaultConfig + pool := txpool.NewTxPool(testTxPoolConfig, config, chain) - txPool, err := txpool.New(new(big.Int).SetUint64(testTxPoolConfig.PriceLimit), chain, []txpool.SubPool{legacyPool}) - require.NoError(t, err) - - return txPool + return pool } diff --git a/plugin/evm/gossiper_eth_gossiping_test.go b/plugin/evm/gossiper_eth_gossiping_test.go index 1ce8bc753b..a2bb95c93b 100644 --- a/plugin/evm/gossiper_eth_gossiping_test.go +++ b/plugin/evm/gossiper_eth_gossiping_test.go @@ -65,7 +65,7 @@ func getValidEthTxs(key *ecdsa.PrivateKey, count int, gasPrice *big.Int) []*type gasPrice, []byte(strings.Repeat("aaaaaaaaaa", 100))), types.HomesteadSigner{}, key) - tx.SetTime(time.Now().Add(-1 * time.Minute)) + tx.SetFirstSeen(time.Now().Add(-1 * time.Minute)) res[i] = tx } return res @@ -92,7 +92,7 @@ func TestMempoolEthTxsAppGossipHandling(t *testing.T) { err := vm.Shutdown(context.Background()) assert.NoError(err) }() - vm.txPool.SetGasTip(common.Big1) + vm.txPool.SetGasPrice(common.Big1) vm.txPool.SetMinFee(common.Big0) var ( diff --git a/plugin/evm/handler.go b/plugin/evm/handler.go index 2c955b5e83..2915d422a2 100644 --- a/plugin/evm/handler.go +++ b/plugin/evm/handler.go @@ -55,11 +55,7 @@ func (h *GossipHandler) HandleEthTxs(nodeID ids.NodeID, msg message.EthTxsGossip return nil } h.stats.IncEthTxsGossipReceived() - wrapped := make([]*txpool.Transaction, len(txs)) - for i, tx := range txs { - wrapped[i] = &txpool.Transaction{Tx: tx} - } - errs := h.txPool.Add(wrapped, false, false) + errs := h.txPool.AddRemotes(txs) for i, err := range errs { if err != nil { log.Trace( diff --git a/plugin/evm/tx_gossip_test.go b/plugin/evm/tx_gossip_test.go index c2ef03df62..fc9355ed84 100644 --- a/plugin/evm/tx_gossip_test.go +++ b/plugin/evm/tx_gossip_test.go @@ -27,7 +27,6 @@ import ( "google.golang.org/protobuf/proto" - "github.com/ava-labs/subnet-evm/core/txpool" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/utils" ) @@ -116,7 +115,7 @@ func TestEthTxGossip(t *testing.T) { signedTx, err := types.SignTx(tx, types.NewEIP155Signer(vm.chainConfig.ChainID), key) require.NoError(err) - errs := vm.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, true) + errs := vm.txPool.AddLocals([]*types.Transaction{signedTx}) require.Len(errs, 1) require.Nil(errs[0]) @@ -182,7 +181,7 @@ func TestEthTxPushGossipOutbound(t *testing.T) { require.NoError(err) // issue a tx - require.NoError(vm.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, true)[0]) + require.NoError(vm.txPool.AddLocal(signedTx)) vm.ethTxPushGossiper.Get().Add(&GossipEthTx{signedTx}) sent := <-sender.SentAppGossip diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 6ac0ce42d7..37c4aa764e 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -403,6 +403,8 @@ func (vm *VM) Initialize( vm.ethConfig.Preimages = vm.config.Preimages vm.ethConfig.Pruning = vm.config.Pruning vm.ethConfig.TrieCleanCache = vm.config.TrieCleanCache + vm.ethConfig.TrieCleanJournal = vm.config.TrieCleanJournal + vm.ethConfig.TrieCleanRejournal = vm.config.TrieCleanRejournal.Duration vm.ethConfig.TrieDirtyCache = vm.config.TrieDirtyCache vm.ethConfig.TrieDirtyCommitTarget = vm.config.TrieDirtyCommitTarget vm.ethConfig.TriePrefetcherParallelism = vm.config.TriePrefetcherParallelism @@ -544,7 +546,7 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash, ethConfig ethconfig. vm.eth.SetEtherbase(ethConfig.Miner.Etherbase) vm.txPool = vm.eth.TxPool() vm.txPool.SetMinFee(vm.chainConfig.FeeConfig.MinBaseFee) - vm.txPool.SetGasTip(big.NewInt(0)) + vm.txPool.SetGasPrice(big.NewInt(0)) vm.blockChain = vm.eth.BlockChain() vm.miner = vm.eth.Miner() diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 3cba6fd4ea..ea96cc7689 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -304,7 +304,7 @@ func TestVMUpgrades(t *testing.T) { t.Run(test.name, func(t *testing.T) { _, vm, _, _ := GenesisVM(t, true, test.genesis, "", "") - if gasPrice := vm.txPool.GasTip(); gasPrice.Cmp(test.expectedGasPrice) != 0 { + if gasPrice := vm.txPool.GasPrice(); gasPrice.Cmp(test.expectedGasPrice) != 0 { t.Fatalf("Expected pool gas price to be %d but found %d", test.expectedGasPrice, gasPrice) } defer func() { @@ -2082,7 +2082,7 @@ func TestBuildSubnetEVMBlock(t *testing.T) { } txs[i] = signedTx } - errs := vm.txPool.AddRemotesSync(txs) + errs := vm.txPool.AddRemotes(txs) for i, err := range errs { if err != nil { t.Fatalf("Failed to add tx at index %d: %s", i, err) @@ -2644,7 +2644,7 @@ func TestFeeManagerChangeFee(t *testing.T) { t.Fatal(err) } - err = vm.txPool.AddRemotesSync([]*types.Transaction{signedTx2})[0] + err = vm.txPool.AddRemote(signedTx2) require.ErrorIs(t, err, txpool.ErrUnderpriced) } diff --git a/precompile/contract/interfaces.go b/precompile/contract/interfaces.go index 5ac6baa486..20b4ab6b2f 100644 --- a/precompile/contract/interfaces.go +++ b/precompile/contract/interfaces.go @@ -39,6 +39,8 @@ type StateDB interface { GetTxHash() common.Hash + Suicide(common.Address) bool + Snapshot() int RevertToSnapshot(int) } diff --git a/precompile/contract/mocks.go b/precompile/contract/mocks.go index 6510d2d738..00e726032e 100644 --- a/precompile/contract/mocks.go +++ b/precompile/contract/mocks.go @@ -383,3 +383,17 @@ func (mr *MockStateDBMockRecorder) Snapshot() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Snapshot", reflect.TypeOf((*MockStateDB)(nil).Snapshot)) } + +// Suicide mocks base method. +func (m *MockStateDB) Suicide(arg0 common.Address) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Suicide", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Suicide indicates an expected call of Suicide. +func (mr *MockStateDBMockRecorder) Suicide(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Suicide", reflect.TypeOf((*MockStateDB)(nil).Suicide), arg0) +} diff --git a/rpc/client.go b/rpc/client.go index 7a9047f310..989441a6f8 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -43,15 +43,14 @@ import ( var ( ErrBadResult = errors.New("bad result in JSON-RPC response") ErrClientQuit = errors.New("client is closed") - ErrNoResult = errors.New("JSON-RPC response has no result") - ErrMissingBatchResponse = errors.New("response batch did not contain a response to this call") + ErrNoResult = errors.New("no result in JSON-RPC response") ErrSubscriptionQueueOverflow = errors.New("subscription queue overflow") errClientReconnected = errors.New("client reconnected") errDead = errors.New("connection lost") ) -// Timeouts const ( + // Timeouts defaultDialTimeout = 10 * time.Second // used if context has no deadline subscribeTimeout = 10 * time.Second // overall timeout eth_subscribe, rpc_modules calls ) @@ -94,10 +93,6 @@ type Client struct { // This function, if non-nil, is called when the connection is lost. reconnectFunc reconnectFunc - // config fields - batchItemLimit int - batchResponseMaxSize int - // writeConn is used for writing to the connection on the caller's goroutine. It should // only be accessed outside of dispatch, with the write lock held. The write lock is // taken by sending on reqInit and released by sending on reqSent. @@ -128,7 +123,7 @@ func (c *Client) newClientConn(conn ServerCodec, apiMaxDuration, refillRate, max ctx := context.Background() ctx = context.WithValue(ctx, clientContextKey{}, c) ctx = context.WithValue(ctx, peerInfoContextKey{}, conn.peerInfo()) - handler := newHandler(ctx, conn, c.idgen, c.services, c.batchItemLimit, c.batchResponseMaxSize) + handler := newHandler(ctx, conn, c.idgen, c.services) // When [apiMaxDuration] or [refillRate]/[maxStored] is 0 (as is the case for // all client invocations of this function), it is ignored. @@ -147,17 +142,14 @@ type readOp struct { batch bool } -// requestOp represents a pending request. This is used for both batch and non-batch -// requests. type requestOp struct { - ids []json.RawMessage - err error - resp chan []*jsonrpcMessage // the response goes here - sub *ClientSubscription // set for Subscribe requests. - hadResponse bool // true when the request was responded to + ids []json.RawMessage + err error + resp chan *jsonrpcMessage // receives up to len(ids) responses + sub *ClientSubscription // only set for EthSubscribe requests } -func (op *requestOp) wait(ctx context.Context, c *Client) ([]*jsonrpcMessage, error) { +func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, error) { select { case <-ctx.Done(): // Send the timeout to dispatch so it can remove the request IDs. @@ -233,7 +225,7 @@ func DialOptions(ctx context.Context, rawurl string, options ...ClientOption) (* return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme) } - return newClient(ctx, cfg, reconnect) + return newClient(ctx, reconnect) } // ClientFromContext retrieves the client from the context, if any. This can be used to perform @@ -243,43 +235,34 @@ func ClientFromContext(ctx context.Context) (*Client, bool) { return client, ok } -func newClient(initctx context.Context, cfg *clientConfig, connect reconnectFunc) (*Client, error) { +func newClient(initctx context.Context, connect reconnectFunc) (*Client, error) { conn, err := connect(initctx) if err != nil { return nil, err } - c := initClient(conn, new(serviceRegistry), cfg, 0, 0, 0) + c := initClient(conn, randomIDGenerator(), new(serviceRegistry), 0, 0, 0) c.reconnectFunc = connect return c, nil } -func initClient(conn ServerCodec, services *serviceRegistry, cfg *clientConfig, apiMaxDuration, refillRate, maxStored time.Duration) *Client { +func initClient(conn ServerCodec, idgen func() ID, services *serviceRegistry, apiMaxDuration, refillRate, maxStored time.Duration) *Client { _, isHTTP := conn.(*httpConn) c := &Client{ - isHTTP: isHTTP, - services: services, - idgen: cfg.idgen, - batchItemLimit: cfg.batchItemLimit, - batchResponseMaxSize: cfg.batchResponseLimit, - writeConn: conn, - close: make(chan struct{}), - closing: make(chan struct{}), - didClose: make(chan struct{}), - reconnected: make(chan ServerCodec), - readOp: make(chan readOp), - readErr: make(chan error), - reqInit: make(chan *requestOp), - reqSent: make(chan error, 1), - reqTimeout: make(chan *requestOp), - } - - // Set defaults. - if c.idgen == nil { - c.idgen = randomIDGenerator() - } - - // Launch the main loop. - if !isHTTP { + idgen: idgen, + isHTTP: isHTTP, + services: services, + writeConn: conn, + close: make(chan struct{}), + closing: make(chan struct{}), + didClose: make(chan struct{}), + reconnected: make(chan ServerCodec), + readOp: make(chan readOp), + readErr: make(chan error), + reqInit: make(chan *requestOp), + reqSent: make(chan error, 1), + reqTimeout: make(chan *requestOp), + } + if !c.isHTTP { go c.dispatch(conn, apiMaxDuration, refillRate, maxStored) } return c @@ -356,10 +339,7 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str if err != nil { return err } - op := &requestOp{ - ids: []json.RawMessage{msg.ID}, - resp: make(chan []*jsonrpcMessage, 1), - } + op := &requestOp{ids: []json.RawMessage{msg.ID}, resp: make(chan *jsonrpcMessage, 1)} if c.isHTTP { err = c.sendHTTP(ctx, op, msg) @@ -371,12 +351,9 @@ func (c *Client) CallContext(ctx context.Context, result interface{}, method str } // dispatch has accepted the request and will close the channel when it quits. - batchresp, err := op.wait(ctx, c) - if err != nil { + switch resp, err := op.wait(ctx, c); { + case err != nil: return err - } - resp := batchresp[0] - switch { case resp.Error != nil: return resp.Error case len(resp.Result) == 0: @@ -417,7 +394,7 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error { ) op := &requestOp{ ids: make([]json.RawMessage, len(b)), - resp: make(chan []*jsonrpcMessage, 1), + resp: make(chan *jsonrpcMessage, len(b)), } for i, elem := range b { msg, err := c.newMessage(elem.Method, elem.Args...) @@ -435,48 +412,28 @@ func (c *Client) BatchCallContext(ctx context.Context, b []BatchElem) error { } else { err = c.send(ctx, op, msgs) } - if err != nil { - return err - } - - batchresp, err := op.wait(ctx, c) - if err != nil { - return err - } // Wait for all responses to come back. - for n := 0; n < len(batchresp) && err == nil; n++ { - resp := batchresp[n] - if resp == nil { - // Ignore null responses. These can happen for batches sent via HTTP. - continue + for n := 0; n < len(b) && err == nil; n++ { + var resp *jsonrpcMessage + resp, err = op.wait(ctx, c) + if err != nil { + break } - // Find the element corresponding to this response. - index, ok := byID[string(resp.ID)] - if !ok { + // The element is guaranteed to be present because dispatch + // only sends valid IDs to our channel. + elem := &b[byID[string(resp.ID)]] + if resp.Error != nil { + elem.Error = resp.Error continue } - delete(byID, string(resp.ID)) - - // Assign result and error. - elem := &b[index] - switch { - case resp.Error != nil: - elem.Error = resp.Error - case resp.Result == nil: + if len(resp.Result) == 0 { elem.Error = ErrNoResult - default: - elem.Error = json.Unmarshal(resp.Result, elem.Result) + continue } + elem.Error = json.Unmarshal(resp.Result, elem.Result) } - - // Check that all expected responses have been received. - for _, index := range byID { - elem := &b[index] - elem.Error = ErrMissingBatchResponse - } - return err } @@ -537,7 +494,7 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf } op := &requestOp{ ids: []json.RawMessage{msg.ID}, - resp: make(chan []*jsonrpcMessage, 1), + resp: make(chan *jsonrpcMessage), sub: newClientSubscription(c, namespace, chanVal), } @@ -552,13 +509,6 @@ func (c *Client) Subscribe(ctx context.Context, namespace string, channel interf return op.sub, nil } -// SupportsSubscriptions reports whether subscriptions are supported by the client -// transport. When this returns false, Subscribe and related methods will return -// ErrNotificationsUnsupported. -func (c *Client) SupportsSubscriptions() bool { - return !c.isHTTP -} - func (c *Client) newMessage(method string, paramsIn ...interface{}) (*jsonrpcMessage, error) { msg := &jsonrpcMessage{Version: vsn, ID: c.nextID(), Method: method} if paramsIn != nil { // prevent sending "params":null diff --git a/rpc/client_opt.go b/rpc/client_opt.go index dfbef66b86..c1b9931253 100644 --- a/rpc/client_opt.go +++ b/rpc/client_opt.go @@ -38,18 +38,11 @@ type ClientOption interface { } type clientConfig struct { - // HTTP settings httpClient *http.Client httpHeaders http.Header httpAuth HTTPAuth - // WebSocket options wsDialer *websocket.Dialer - - // RPC handler options - idgen func() ID - batchItemLimit int - batchResponseLimit int } func (cfg *clientConfig) initHeaders() { @@ -121,25 +114,3 @@ func WithHTTPAuth(a HTTPAuth) ClientOption { // Usually, HTTPAuth functions will call h.Set("authorization", "...") to add // auth information to the request. type HTTPAuth func(h http.Header) error - -// WithBatchItemLimit changes the maximum number of items allowed in batch requests. -// -// Note: this option applies when processing incoming batch requests. It does not affect -// batch requests sent by the client. -func WithBatchItemLimit(limit int) ClientOption { - return optionFunc(func(cfg *clientConfig) { - cfg.batchItemLimit = limit - }) -} - -// WithBatchResponseSizeLimit changes the maximum number of response bytes that can be -// generated for batch requests. When this limit is reached, further calls in the batch -// will not be processed. -// -// Note: this option applies when processing incoming batch requests. It does not affect -// batch requests sent by the client. -func WithBatchResponseSizeLimit(sizeLimit int) ClientOption { - return optionFunc(func(cfg *clientConfig) { - cfg.batchResponseLimit = sizeLimit - }) -} diff --git a/rpc/client_test.go b/rpc/client_test.go index ede8045fac..4ec0ef4122 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -38,7 +38,6 @@ import ( "reflect" "runtime" "strings" - "sync" "testing" "time" @@ -178,12 +177,10 @@ func TestClientBatchRequest(t *testing.T) { } } -// This checks that, for HTTP connections, the length of batch responses is validated to -// match the request exactly. func TestClientBatchRequest_len(t *testing.T) { b, err := json.Marshal([]jsonrpcMessage{ - {Version: "2.0", ID: json.RawMessage("1"), Result: json.RawMessage(`"0x1"`)}, - {Version: "2.0", ID: json.RawMessage("2"), Result: json.RawMessage(`"0x2"`)}, + {Version: "2.0", ID: json.RawMessage("1"), Method: "foo", Result: json.RawMessage(`"0x1"`)}, + {Version: "2.0", ID: json.RawMessage("2"), Method: "bar", Result: json.RawMessage(`"0x2"`)}, }) if err != nil { t.Fatal("failed to encode jsonrpc message:", err) @@ -196,102 +193,37 @@ func TestClientBatchRequest_len(t *testing.T) { })) t.Cleanup(s.Close) - t.Run("too-few", func(t *testing.T) { - client, err := Dial(s.URL) - if err != nil { - t.Fatal("failed to dial test server:", err) - } - defer client.Close() + client, err := Dial(s.URL) + if err != nil { + t.Fatal("failed to dial test server:", err) + } + defer client.Close() + t.Run("too-few", func(t *testing.T) { batch := []BatchElem{ - {Method: "foo", Result: new(string)}, - {Method: "bar", Result: new(string)}, - {Method: "baz", Result: new(string)}, + {Method: "foo"}, + {Method: "bar"}, + {Method: "baz"}, } ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) defer cancelFn() - - if err := client.BatchCallContext(ctx, batch); err != nil { - t.Fatal("error:", err) - } - for i, elem := range batch[:2] { - if elem.Error != nil { - t.Errorf("expected no error for batch element %d, got %q", i, elem.Error) - } - } - for i, elem := range batch[2:] { - if elem.Error != ErrMissingBatchResponse { - t.Errorf("wrong error %q for batch element %d", elem.Error, i+2) - } + if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) { + t.Errorf("expected %q but got: %v", ErrBadResult, err) } }) t.Run("too-many", func(t *testing.T) { - client, err := Dial(s.URL) - if err != nil { - t.Fatal("failed to dial test server:", err) - } - defer client.Close() - batch := []BatchElem{ - {Method: "foo", Result: new(string)}, + {Method: "foo"}, } ctx, cancelFn := context.WithTimeout(context.Background(), time.Second) defer cancelFn() - - if err := client.BatchCallContext(ctx, batch); err != nil { - t.Fatal("error:", err) - } - for i, elem := range batch[:1] { - if elem.Error != nil { - t.Errorf("expected no error for batch element %d, got %q", i, elem.Error) - } - } - for i, elem := range batch[1:] { - if elem.Error != ErrMissingBatchResponse { - t.Errorf("wrong error %q for batch element %d", elem.Error, i+2) - } + if err := client.BatchCallContext(ctx, batch); !errors.Is(err, ErrBadResult) { + t.Errorf("expected %q but got: %v", ErrBadResult, err) } }) } -// This checks that the client can handle the case where the server doesn't -// respond to all requests in a batch. -func TestClientBatchRequestLimit(t *testing.T) { - server := newTestServer() - defer server.Stop() - server.SetBatchLimits(2, 100000) - client := DialInProc(server) - - batch := []BatchElem{ - {Method: "foo"}, - {Method: "bar"}, - {Method: "baz"}, - } - err := client.BatchCall(batch) - if err != nil { - t.Fatal("unexpected error:", err) - } - - // Check that the first response indicates an error with batch size. - var err0 Error - if !errors.As(batch[0].Error, &err0) { - t.Log("error zero:", batch[0].Error) - t.Fatalf("batch elem 0 has wrong error type: %T", batch[0].Error) - } else { - if err0.ErrorCode() != -32600 || err0.Error() != errMsgBatchTooLarge { - t.Fatalf("wrong error on batch elem zero: %v", err0) - } - } - - // Check that remaining response batch elements are reported as absent. - for i, elem := range batch[1:] { - if elem.Error != ErrMissingBatchResponse { - t.Fatalf("batch elem %d has unexpected error: %v", i+1, elem.Error) - } - } -} - func TestClientNotify(t *testing.T) { server := newTestServer() defer server.Stop() @@ -319,85 +251,99 @@ func testClientCancel(transport string, t *testing.T) { server := newTestServer() defer server.Stop() +} - // What we want to achieve is that the context gets canceled - // at various stages of request processing. The interesting cases - // are: - // - cancel during dial - // - cancel while performing a HTTP request - // - cancel while waiting for a response - // - // To trigger those, the times are chosen such that connections - // are killed within the deadline for every other call (maxKillTimeout - // is 2x maxCancelTimeout). - // - // Once a connection is dead, there is a fair chance it won't connect - // successfully because the accept is delayed by 1s. - maxContextCancelTimeout := 300 * time.Millisecond - fl := &flakeyListener{ - maxAcceptDelay: 1 * time.Second, - maxKillTimeout: 600 * time.Millisecond, - } - - var client *Client - switch transport { - case "ws", "http": - c, hs := httpTestClient(server, transport, fl) - defer hs.Close() - client = c - // case "ipc": - // c, l := ipcTestClient(server, fl) - // defer l.Close() - // client = c - default: - panic("unknown transport: " + transport) - } +// func TestClientCancelIPC(t *testing.T) { testClientCancel("ipc", t) } - // The actual test starts here. - var ( - wg sync.WaitGroup - nreqs = 10 - ncallers = 10 - ) - caller := func(index int) { - defer wg.Done() - for i := 0; i < nreqs; i++ { - var ( - ctx context.Context - cancel func() - timeout = time.Duration(rand.Int63n(int64(maxContextCancelTimeout))) - ) - if index < ncallers/2 { - // For half of the callers, create a context without deadline - // and cancel it later. - ctx, cancel = context.WithCancel(context.Background()) - time.AfterFunc(timeout, cancel) - } else { - // For the other half, create a context with a deadline instead. This is - // different because the context deadline is used to set the socket write - // deadline. - ctx, cancel = context.WithTimeout(context.Background(), timeout) - } +// // This test checks that requests made through CallContext can be canceled by canceling +// // the context. +// func testClientCancel(transport string, t *testing.T) { +// // These tests take a lot of time, run them all at once. +// // You probably want to run with -parallel 1 or comment out +// // the call to t.Parallel if you enable the logging. +// t.Parallel() + +// server := newTestServer() +// defer server.Stop() + +// // What we want to achieve is that the context gets canceled +// // at various stages of request processing. The interesting cases +// // are: +// // - cancel during dial +// // - cancel while performing a HTTP request +// // - cancel while waiting for a response +// // +// // To trigger those, the times are chosen such that connections +// // are killed within the deadline for every other call (maxKillTimeout +// // is 2x maxCancelTimeout). +// // +// // Once a connection is dead, there is a fair chance it won't connect +// // successfully because the accept is delayed by 1s. +// maxContextCancelTimeout := 300 * time.Millisecond +// fl := &flakeyListener{ +// maxAcceptDelay: 1 * time.Second, +// maxKillTimeout: 600 * time.Millisecond, +// } - // Now perform a call with the context. - // The key thing here is that no call will ever complete successfully. - err := client.CallContext(ctx, nil, "test_block") - switch { - case err == nil: - _, hasDeadline := ctx.Deadline() - t.Errorf("no error for call with %v wait time (deadline: %v)", timeout, hasDeadline) - // default: - // t.Logf("got expected error with %v wait time: %v", timeout, err) - } - cancel() - } - } - wg.Add(ncallers) - for i := 0; i < ncallers; i++ { - go caller(i) - } - wg.Wait() -} +// var client *Client +// switch transport { +// case "ws", "http": +// c, hs := httpTestClient(server, transport, fl) +// defer hs.Close() +// client = c +// case "ipc": +// c, l := ipcTestClient(server, fl) +// defer l.Close() +// client = c +// default: +// panic("unknown transport: " + transport) +// } + +// // The actual test starts here. +// var ( +// wg sync.WaitGroup +// nreqs = 10 +// ncallers = 10 +// ) +// caller := func(index int) { +// defer wg.Done() +// for i := 0; i < nreqs; i++ { +// var ( +// ctx context.Context +// cancel func() +// timeout = time.Duration(rand.Int63n(int64(maxContextCancelTimeout))) +// ) +// if index < ncallers/2 { +// // For half of the callers, create a context without deadline +// // and cancel it later. +// ctx, cancel = context.WithCancel(context.Background()) +// time.AfterFunc(timeout, cancel) +// } else { +// // For the other half, create a context with a deadline instead. This is +// // different because the context deadline is used to set the socket write +// // deadline. +// ctx, cancel = context.WithTimeout(context.Background(), timeout) +// } + +// // Now perform a call with the context. +// // The key thing here is that no call will ever complete successfully. +// err := client.CallContext(ctx, nil, "test_block") +// switch { +// case err == nil: +// _, hasDeadline := ctx.Deadline() +// t.Errorf("no error for call with %v wait time (deadline: %v)", timeout, hasDeadline) +// // default: +// // t.Logf("got expected error with %v wait time: %v", timeout, err) +// } +// cancel() +// } +// } +// wg.Add(ncallers) +// for i := 0; i < ncallers; i++ { +// go caller(i) +// } +// wg.Wait() +// } func TestClientSubscribeInvalidArg(t *testing.T) { server := newTestServer() @@ -564,8 +510,7 @@ func TestClientSubscriptionUnsubscribeServer(t *testing.T) { defer srv.Stop() // Create the client on the other end of the pipe. - cfg := new(clientConfig) - client, _ := newClient(context.Background(), cfg, func(context.Context) (ServerCodec, error) { + client, _ := newClient(context.Background(), func(context.Context) (ServerCodec, error) { return NewCodec(p2), nil }) defer client.Close() diff --git a/rpc/errors.go b/rpc/errors.go index 44094715e1..7e39510087 100644 --- a/rpc/errors.go +++ b/rpc/errors.go @@ -68,19 +68,15 @@ var ( ) const ( - errcodeDefault = -32000 - errcodeTimeout = -32002 - errcodeResponseTooLarge = -32003 - errcodePanic = -32603 - errcodeMarshalError = -32603 - - legacyErrcodeNotificationsUnsupported = -32001 + errcodeDefault = -32000 + errcodeNotificationsUnsupported = -32001 + errcodeTimeout = -32002 + errcodePanic = -32603 + errcodeMarshalError = -32603 ) const ( - errMsgTimeout = "request timed out" - errMsgResponseTooLarge = "response too large" - errMsgBatchTooLarge = "batch too large" + errMsgTimeout = "request timed out" ) type methodNotFoundError struct{ method string } @@ -91,34 +87,6 @@ func (e *methodNotFoundError) Error() string { return fmt.Sprintf("the method %s does not exist/is not available", e.method) } -type notificationsUnsupportedError struct{} - -func (e notificationsUnsupportedError) Error() string { - return "notifications not supported" -} - -func (e notificationsUnsupportedError) ErrorCode() int { return -32601 } - -// Is checks for equivalence to another error. Here we define that all errors with code -// -32601 (method not found) are equivalent to notificationsUnsupportedError. This is -// done to enable the following pattern: -// -// sub, err := client.Subscribe(...) -// if errors.Is(err, rpc.ErrNotificationsUnsupported) { -// // server doesn't support subscriptions -// } -func (e notificationsUnsupportedError) Is(other error) bool { - if other == (notificationsUnsupportedError{}) { - return true - } - rpcErr, ok := other.(Error) - if ok { - code := rpcErr.ErrorCode() - return code == -32601 || code == legacyErrcodeNotificationsUnsupported - } - return false -} - type subscriptionNotFoundError struct{ namespace, subscription string } func (e *subscriptionNotFoundError) ErrorCode() int { return -32601 } diff --git a/rpc/handler.go b/rpc/handler.go index 8ef948696f..00bafad427 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -61,19 +61,17 @@ import ( // h.removeRequestOp(op) // timeout, etc. // } type handler struct { - reg *serviceRegistry - unsubscribeCb *callback - idgen func() ID // subscription ID generator - respWait map[string]*requestOp // active client requests - clientSubs map[string]*ClientSubscription // active client subscriptions - callWG sync.WaitGroup // pending call goroutines - rootCtx context.Context // canceled by close() - cancelRoot func() // cancel function for rootCtx - conn jsonWriter // where responses will be sent - log log.Logger - allowSubscribe bool - batchRequestLimit int - batchResponseMaxSize int + reg *serviceRegistry + unsubscribeCb *callback + idgen func() ID // subscription ID generator + respWait map[string]*requestOp // active client requests + clientSubs map[string]*ClientSubscription // active client subscriptions + callWG sync.WaitGroup // pending call goroutines + rootCtx context.Context // canceled by close() + cancelRoot func() // cancel function for rootCtx + conn jsonWriter // where responses will be sent + log log.Logger + allowSubscribe bool subLock sync.Mutex serverSubs map[ID]*Subscription @@ -89,21 +87,19 @@ type callProc struct { procStart time.Time } -func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, batchRequestLimit, batchResponseMaxSize int) *handler { +func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry) *handler { rootCtx, cancelRoot := context.WithCancel(connCtx) h := &handler{ - reg: reg, - idgen: idgen, - conn: conn, - respWait: make(map[string]*requestOp), - clientSubs: make(map[string]*ClientSubscription), - rootCtx: rootCtx, - cancelRoot: cancelRoot, - allowSubscribe: true, - serverSubs: make(map[ID]*Subscription), - log: log.Root(), - batchRequestLimit: batchRequestLimit, - batchResponseMaxSize: batchResponseMaxSize, + reg: reg, + idgen: idgen, + conn: conn, + respWait: make(map[string]*requestOp), + clientSubs: make(map[string]*ClientSubscription), + rootCtx: rootCtx, + cancelRoot: cancelRoot, + allowSubscribe: true, + serverSubs: make(map[ID]*Subscription), + log: log.Root(), } if conn.remoteAddr() != "" { h.log = h.log.New("conn", conn.remoteAddr()) @@ -155,15 +151,16 @@ func (b *batchCallBuffer) write(ctx context.Context, conn jsonWriter) { b.doWrite(ctx, conn, false) } -// respondWithError sends the responses added so far. For the remaining unanswered call -// messages, it responds with the given error. -func (b *batchCallBuffer) respondWithError(ctx context.Context, conn jsonWriter, err error) { +// timeout sends the responses added so far. For the remaining unanswered call +// messages, it sends a timeout error response. +func (b *batchCallBuffer) timeout(ctx context.Context, conn jsonWriter) { b.mutex.Lock() defer b.mutex.Unlock() for _, msg := range b.calls { if !msg.isNotification() { - b.resp = append(b.resp, msg.errorResponse(err)) + resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) + b.resp = append(b.resp, resp) } } b.doWrite(ctx, conn, true) @@ -203,24 +200,17 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { }) return } - // Apply limit on total number of requests. - if h.batchRequestLimit != 0 && len(msgs) > h.batchRequestLimit { - h.startCallProc(func(cp *callProc) { - h.respondWithBatchTooLarge(cp, msgs) - }) - return - } - // Handle non-call messages first. - // Here we need to find the requestOp that sent the request batch. + // Handle non-call messages first: calls := make([]*jsonrpcMessage, 0, len(msgs)) - h.handleResponses(msgs, func(msg *jsonrpcMessage) { - calls = append(calls, msg) - }) + for _, msg := range msgs { + if handled := h.handleImmediate(msg); !handled { + calls = append(calls, msg) + } + } if len(calls) == 0 { return } - // Process calls on a goroutine because they may block indefinitely: h.startCallProc(func(cp *callProc) { var ( @@ -238,12 +228,10 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { if timeout, ok := ContextRequestTimeout(cp.ctx); ok { timer = time.AfterFunc(timeout, func() { cancel() - err := &internalServerError{errcodeTimeout, errMsgTimeout} - callBuffer.respondWithError(cp.ctx, h.conn, err) + callBuffer.timeout(cp.ctx, h.conn) }) } - responseBytes := 0 for { // No need to handle rest of calls if timed out. if cp.ctx.Err() != nil { @@ -255,86 +243,59 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { } resp := h.handleCallMsg(cp, msg) callBuffer.pushResponse(resp) - if resp != nil && h.batchResponseMaxSize != 0 { - responseBytes += len(resp.Result) - if responseBytes > h.batchResponseMaxSize { - err := &internalServerError{errcodeResponseTooLarge, errMsgResponseTooLarge} - callBuffer.respondWithError(cp.ctx, h.conn, err) - break - } - } } if timer != nil { timer.Stop() } - - h.addSubscriptions(cp.notifiers) callBuffer.write(cp.ctx, h.conn) + h.addSubscriptions(cp.notifiers) for _, n := range cp.notifiers { n.activate() } }) } -func (h *handler) respondWithBatchTooLarge(cp *callProc, batch []*jsonrpcMessage) { - resp := errorMessage(&invalidRequestError{errMsgBatchTooLarge}) - // Find the first call and add its "id" field to the error. - // This is the best we can do, given that the protocol doesn't have a way - // of reporting an error for the entire batch. - for _, msg := range batch { - if msg.isCall() { - resp.ID = msg.ID - break - } +// handleMsg handles a single message. +func (h *handler) handleMsg(msg *jsonrpcMessage) { + if ok := h.handleImmediate(msg); ok { + return } - h.conn.writeJSONSkipDeadline(cp.ctx, []*jsonrpcMessage{resp}, true, h.deadlineContext > 0) -} + h.startCallProc(func(cp *callProc) { + var ( + responded sync.Once + timer *time.Timer + cancel context.CancelFunc + ) + cp.ctx, cancel = context.WithCancel(cp.ctx) + defer cancel() -// handleMsg handles a single non-batch message. -func (h *handler) handleMsg(msg *jsonrpcMessage) { - msgs := []*jsonrpcMessage{msg} - h.handleResponses(msgs, func(msg *jsonrpcMessage) { - h.startCallProc(func(cp *callProc) { - h.handleNonBatchCall(cp, msg) - }) - }) -} + // Cancel the request context after timeout and send an error response. Since the + // running method might not return immediately on timeout, we must wait for the + // timeout concurrently with processing the request. + if timeout, ok := ContextRequestTimeout(cp.ctx); ok { + timer = time.AfterFunc(timeout, func() { + cancel() + responded.Do(func() { + resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) + h.conn.writeJSONSkipDeadline(cp.ctx, resp, true, h.deadlineContext > 0) + }) + }) + } -func (h *handler) handleNonBatchCall(cp *callProc, msg *jsonrpcMessage) { - var ( - responded sync.Once - timer *time.Timer - cancel context.CancelFunc - ) - cp.ctx, cancel = context.WithCancel(cp.ctx) - defer cancel() - - // Cancel the request context after timeout and send an error response. Since the - // running method might not return immediately on timeout, we must wait for the - // timeout concurrently with processing the request. - if timeout, ok := ContextRequestTimeout(cp.ctx); ok { - timer = time.AfterFunc(timeout, func() { - cancel() + answer := h.handleCallMsg(cp, msg) + if timer != nil { + timer.Stop() + } + h.addSubscriptions(cp.notifiers) + if answer != nil { responded.Do(func() { - resp := msg.errorResponse(&internalServerError{errcodeTimeout, errMsgTimeout}) - h.conn.writeJSONSkipDeadline(cp.ctx, resp, true, h.deadlineContext > 0) + h.conn.writeJSONSkipDeadline(cp.ctx, answer, false, h.deadlineContext > 0) }) - }) - } - - answer := h.handleCallMsg(cp, msg) - if timer != nil { - timer.Stop() - } - h.addSubscriptions(cp.notifiers) - if answer != nil { - responded.Do(func() { - h.conn.writeJSONSkipDeadline(cp.ctx, answer, false, h.deadlineContext > 0) - }) - } - for _, n := range cp.notifiers { - n.activate() - } + } + for _, n := range cp.notifiers { + n.activate() + } + }) } // close cancels all requests except for inflightReq and waits for @@ -476,60 +437,23 @@ func (h *handler) startCallProc(fn func(*callProc)) { } } -// handleResponse processes method call responses. -func (h *handler) handleResponses(batch []*jsonrpcMessage, handleCall func(*jsonrpcMessage)) { - var resolvedops []*requestOp - handleResp := func(msg *jsonrpcMessage) { - op := h.respWait[string(msg.ID)] - if op == nil { - h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID}) - return - } - resolvedops = append(resolvedops, op) - delete(h.respWait, string(msg.ID)) - - // For subscription responses, start the subscription if the server - // indicates success. EthSubscribe gets unblocked in either case through - // the op.resp channel. - if op.sub != nil { - if msg.Error != nil { - op.err = msg.Error - } else { - op.err = json.Unmarshal(msg.Result, &op.sub.subid) - if op.err == nil { - go op.sub.run() - h.clientSubs[op.sub.subid] = op.sub - } - } - } - - if !op.hadResponse { - op.hadResponse = true - op.resp <- batch - } - } - - for _, msg := range batch { - start := time.Now() - switch { - case msg.isResponse(): - handleResp(msg) - h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(start)) - - case msg.isNotification(): - if strings.HasSuffix(msg.Method, notificationMethodSuffix) { - h.handleSubscriptionResult(msg) - continue - } - handleCall(msg) - - default: - handleCall(msg) +// handleImmediate executes non-call messages. It returns false if the message is a +// call or requires a reply. +func (h *handler) handleImmediate(msg *jsonrpcMessage) bool { + execStart := time.Now() + switch { + case msg.isNotification(): + if strings.HasSuffix(msg.Method, notificationMethodSuffix) { + h.handleSubscriptionResult(msg) + return true } - } - - for _, op := range resolvedops { - h.removeRequestOp(op) + return false + case msg.isResponse(): + h.handleResponse(msg) + h.log.Trace("Handled RPC response", "reqid", idForLog{msg.ID}, "duration", time.Since(execStart)) + return true + default: + return false } } @@ -545,6 +469,33 @@ func (h *handler) handleSubscriptionResult(msg *jsonrpcMessage) { } } +// handleResponse processes method call responses. +func (h *handler) handleResponse(msg *jsonrpcMessage) { + op := h.respWait[string(msg.ID)] + if op == nil { + h.log.Debug("Unsolicited RPC response", "reqid", idForLog{msg.ID}) + return + } + delete(h.respWait, string(msg.ID)) + // For normal responses, just forward the reply to Call/BatchCall. + if op.sub == nil { + op.resp <- msg + return + } + // For subscription responses, start the subscription if the server + // indicates success. EthSubscribe gets unblocked in either case through + // the op.resp channel. + defer close(op.resp) + if msg.Error != nil { + op.err = msg.Error + return + } + if op.err = json.Unmarshal(msg.Result, &op.sub.subid); op.err == nil { + go op.sub.run() + h.clientSubs[op.sub.subid] = op.sub + } +} + // handleCallMsg executes a call message and returns the answer. func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMessage { // [callStart] is the time the message was enqueued for handler processing @@ -563,7 +514,6 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess h.handleCall(ctx, msg) h.log.Debug("Served "+msg.Method, "execTime", time.Since(execStart), "procTime", time.Since(procStart), "totalTime", time.Since(callStart)) return nil - case msg.isCall(): resp := h.handleCall(ctx, msg) var ctx []interface{} @@ -578,10 +528,8 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage) *jsonrpcMess h.log.Debug("Served "+msg.Method, ctx...) } return resp - case msg.hasValidID(): return msg.errorResponse(&invalidRequestError{"invalid request"}) - default: return errorMessage(&invalidRequestError{"invalid request"}) } @@ -601,14 +549,12 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage if callb == nil { return msg.errorResponse(&methodNotFoundError{method: msg.Method}) } - args, err := parsePositionalArguments(msg.Params, callb.argTypes) if err != nil { return msg.errorResponse(&invalidParamsError{err.Error()}) } start := time.Now() answer := h.runMethod(cp.ctx, msg, callb, args) - // Collect the statistics for RPC calls if metrics is enabled. // We only care about pure rpc call. Filter out subscription. if callb != h.unsubscribeCb { @@ -623,14 +569,16 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage updateServeTimeHistogram(msg.Method, answer.Error == nil, time.Since(start)) } } - return answer } // handleSubscribe processes *_subscribe method calls. func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage { if !h.allowSubscribe { - return msg.errorResponse(ErrNotificationsUnsupported) + return msg.errorResponse(&internalServerError{ + code: errcodeNotificationsUnsupported, + message: ErrNotificationsUnsupported.Error(), + }) } // Subscription method name is first argument. diff --git a/rpc/http.go b/rpc/http.go index a3ff1eac7f..56fea59f2c 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -153,7 +153,7 @@ func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) { var cfg clientConfig cfg.httpClient = client fn := newClientTransportHTTP(endpoint, &cfg) - return newClient(context.Background(), &cfg, fn) + return newClient(context.Background(), fn) } func newClientTransportHTTP(endpoint string, cfg *clientConfig) reconnectFunc { @@ -190,12 +190,11 @@ func (c *Client) sendHTTP(ctx context.Context, op *requestOp, msg interface{}) e } defer respBody.Close() - var resp jsonrpcMessage - batch := [1]*jsonrpcMessage{&resp} - if err := json.NewDecoder(respBody).Decode(&resp); err != nil { + var respmsg jsonrpcMessage + if err := json.NewDecoder(respBody).Decode(&respmsg); err != nil { return err } - op.resp <- batch[:] + op.resp <- &respmsg return nil } @@ -206,12 +205,16 @@ func (c *Client) sendBatchHTTP(ctx context.Context, op *requestOp, msgs []*jsonr return err } defer respBody.Close() - - var respmsgs []*jsonrpcMessage + var respmsgs []jsonrpcMessage if err := json.NewDecoder(respBody).Decode(&respmsgs); err != nil { return err } - op.resp <- respmsgs + if len(respmsgs) != len(msgs) { + return fmt.Errorf("batch has %d requests but response has %d: %w", len(msgs), len(respmsgs), ErrBadResult) + } + for i := 0; i < len(respmsgs); i++ { + op.resp <- &respmsgs[i] + } return nil } @@ -339,10 +342,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { connInfo.HTTP.UserAgent = r.Header.Get("User-Agent") ctx := r.Context() ctx = context.WithValue(ctx, peerInfoContextKey{}, connInfo) - // All checks passed, create a codec that reads directly from the request body // until EOF, writes the response to w, and orders the server to process a // single request. + w.Header().Set("content-type", contentType) codec := newHTTPServerConn(r, w) defer codec.close() diff --git a/rpc/inproc.go b/rpc/inproc.go index 6165af0a96..e008fd8804 100644 --- a/rpc/inproc.go +++ b/rpc/inproc.go @@ -34,8 +34,7 @@ import ( // DialInProc attaches an in-process connection to the given RPC server. func DialInProc(handler *Server) *Client { initctx := context.Background() - cfg := new(clientConfig) - c, _ := newClient(initctx, cfg, func(context.Context) (ServerCodec, error) { + c, _ := newClient(initctx, func(context.Context) (ServerCodec, error) { p1, p2 := net.Pipe() go handler.ServeCodec(NewCodec(p1), 0, 0, 0, 0) return NewCodec(p2), nil diff --git a/rpc/server.go b/rpc/server.go index a993fbe96e..13adf8112c 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -57,11 +57,9 @@ type Server struct { idgen func() ID maximumDuration time.Duration - mutex sync.Mutex - codecs map[ServerCodec]struct{} - run atomic.Bool - batchItemLimit int - batchResponseLimit int + mutex sync.Mutex + codecs map[ServerCodec]struct{} + run atomic.Bool } // NewServer creates a new server instance with no registered handlers. @@ -83,17 +81,6 @@ func NewServer(maximumDuration time.Duration) *Server { return server } -// SetBatchLimits sets limits applied to batch requests. There are two limits: 'itemLimit' -// is the maximum number of items in a batch. 'maxResponseSize' is the maximum number of -// response bytes across all requests in a batch. -// -// This method should be called before processing any requests via ServeCodec, ServeHTTP, -// ServeListener etc. -func (s *Server) SetBatchLimits(itemLimit, maxResponseSize int) { - s.batchItemLimit = itemLimit - s.batchResponseLimit = maxResponseSize -} - // RegisterName creates a service for the given receiver type under the given name. When no // methods on the given receiver match the criteria to be either a RPC method or a // subscription an error is returned. Otherwise a new service is created and added to the @@ -115,12 +102,7 @@ func (s *Server) ServeCodec(codec ServerCodec, options CodecOption, apiMaxDurati } defer s.untrackCodec(codec) - cfg := &clientConfig{ - idgen: s.idgen, - batchItemLimit: s.batchItemLimit, - batchResponseLimit: s.batchResponseLimit, - } - c := initClient(codec, &s.services, cfg, apiMaxDuration, refillRate, maxStored) + c := initClient(codec, s.idgen, &s.services, apiMaxDuration, refillRate, maxStored) <-codec.closed() c.Close() } @@ -152,7 +134,7 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { return } - h := newHandler(ctx, codec, s.idgen, &s.services, s.batchItemLimit, s.batchResponseLimit) + h := newHandler(ctx, codec, s.idgen, &s.services) h.deadlineContext = s.maximumDuration h.allowSubscribe = false defer h.close(io.EOF, nil) diff --git a/rpc/server_test.go b/rpc/server_test.go index 7702002085..e3b26623e1 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -79,7 +79,6 @@ func TestServer(t *testing.T) { func runTestScript(t *testing.T, file string) { server := newTestServer() - server.SetBatchLimits(4, 100000) content, err := os.ReadFile(file) if err != nil { t.Fatal(err) @@ -161,41 +160,3 @@ func runTestScript(t *testing.T, file string) { // } // } // } - -func TestServerBatchResponseSizeLimit(t *testing.T) { - server := newTestServer() - defer server.Stop() - server.SetBatchLimits(100, 60) - var ( - batch []BatchElem - client = DialInProc(server) - ) - for i := 0; i < 5; i++ { - batch = append(batch, BatchElem{ - Method: "test_echo", - Args: []any{"x", 1}, - Result: new(echoResult), - }) - } - if err := client.BatchCall(batch); err != nil { - t.Fatal("error sending batch:", err) - } - for i := range batch { - // We expect the first two queries to be ok, but after that the size limit takes effect. - if i < 2 { - if batch[i].Error != nil { - t.Fatalf("batch elem %d has unexpected error: %v", i, batch[i].Error) - } - continue - } - // After two, we expect an error. - re, ok := batch[i].Error.(Error) - if !ok { - t.Fatalf("batch elem %d has wrong error: %v", i, batch[i].Error) - } - wantedCode := errcodeResponseTooLarge - if re.ErrorCode() != wantedCode { - t.Errorf("batch elem %d wrong error code, have %d want %d", i, re.ErrorCode(), wantedCode) - } - } -} diff --git a/rpc/subscription.go b/rpc/subscription.go index 1174e7e2c0..3544a69ffb 100644 --- a/rpc/subscription.go +++ b/rpc/subscription.go @@ -42,17 +42,8 @@ import ( ) var ( - // ErrNotificationsUnsupported is returned by the client when the connection doesn't - // support notifications. You can use this error value to check for subscription - // support like this: - // - // sub, err := client.EthSubscribe(ctx, channel, "newHeads", true) - // if errors.Is(err, rpc.ErrNotificationsUnsupported) { - // // Server does not support subscriptions, fall back to polling. - // } - // - ErrNotificationsUnsupported = notificationsUnsupportedError{} - + // ErrNotificationsUnsupported is returned when the connection doesn't support notifications + ErrNotificationsUnsupported = errors.New("notifications not supported") // ErrSubscriptionNotFound is returned when the notification for the given id is not found ErrSubscriptionNotFound = errors.New("subscription not found") ) diff --git a/rpc/testdata/invalid-batch-toolarge.js b/rpc/testdata/invalid-batch-toolarge.js deleted file mode 100644 index 218fea58aa..0000000000 --- a/rpc/testdata/invalid-batch-toolarge.js +++ /dev/null @@ -1,13 +0,0 @@ -// This file checks the behavior of the batch item limit code. -// In tests, the batch item limit is set to 4. So to trigger the error, -// all batches in this file have 5 elements. - -// For batches that do not contain any calls, a response message with "id" == null -// is returned. - ---> [{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]}] -<-- [{"jsonrpc":"2.0","id":null,"error":{"code":-32600,"message":"batch too large"}}] - -// For batches with at least one call, the call's "id" is used. ---> [{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","id":3,"method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]},{"jsonrpc":"2.0","method":"test_echo","params":["x",99]}] -<-- [{"jsonrpc":"2.0","id":3,"error":{"code":-32600,"message":"batch too large"}}] diff --git a/rpc/websocket.go b/rpc/websocket.go index b43b1b5a1a..d753d2667b 100644 --- a/rpc/websocket.go +++ b/rpc/websocket.go @@ -211,7 +211,7 @@ func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, diale if err != nil { return nil, err } - return newClient(ctx, cfg, connect) + return newClient(ctx, connect) } // DialWebsocket creates a new RPC client that communicates with a JSON-RPC server @@ -228,7 +228,7 @@ func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error if err != nil { return nil, err } - return newClient(ctx, cfg, connect) + return newClient(ctx, connect) } func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, error) { diff --git a/scripts/avalanche_header.txt b/scripts/avalanche_header.txt deleted file mode 100644 index c848a208bd..0000000000 --- a/scripts/avalanche_header.txt +++ /dev/null @@ -1,10 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** diff --git a/scripts/build_test.sh b/scripts/build_test.sh index 9e6fb61e42..bd2ef3903b 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -24,4 +24,4 @@ source "$SUBNET_EVM_PATH"/scripts/constants.sh # parallelism, and test coverage. # DO NOT RUN tests from the top level "tests" directory since they are run by ginkgo # shellcheck disable=SC2046 -go test -shuffle=on -race -coverprofile=coverage.out -covermode=atomic -timeout="30m" "$@" $(go list ./... | grep -v github.com/ava-labs/subnet-evm/tests) +go test -coverprofile=coverage.out -covermode=atomic -timeout="30m" "$@" $(go list ./... | grep -v github.com/ava-labs/subnet-evm/tests) diff --git a/scripts/format_add_avalanche_header.sh b/scripts/format_add_avalanche_header.sh deleted file mode 100755 index 5c58906bf8..0000000000 --- a/scripts/format_add_avalanche_header.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail -set -x - -script_dir=$(dirname "$0") - -sed_command="1{/The go-ethereum Authors/{r ${script_dir}/avalanche_header.txt - N - } -}" -sed -i '' -e "${sed_command}" "$@" \ No newline at end of file diff --git a/scripts/format_as_fork.sh b/scripts/format_as_fork.sh deleted file mode 100755 index 1be9ce6248..0000000000 --- a/scripts/format_as_fork.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail -set -x - -script_dir=$(dirname "$0") - -commit_msg_remove_header="format: remove avalanche header" -commit_msg_remove_upstream="format: remove upstream go-ethereum" -commit_msg_rename_packages_as_fork="format: rename packages as fork" - -make_commit() { - if git diff-index --cached --quiet HEAD --; then - echo "No changes to commit." - else - git commit -m "$1" - fi -} - -revert_by_message() { - hash=$(git log --grep="$1" --format="%H" -n 1) - git revert --no-edit "$hash" -} - -if git status --porcelain | grep -q '^ M'; then - echo "There are edited files in the repository. Please commit or stash them before running this script." - exit 1 -fi - -upstream_dirs=$(sed -e 's/"github.com\/ethereum\/go-ethereum\/\(.*\)"/\1/' "${script_dir}"/geth-allowed-packages.txt | xargs) -for dir in ${upstream_dirs}; do - if [ -d "${dir}" ]; then - git rm -r "${dir}" - fi -done -git clean -df -- "${upstream_dirs}" -make_commit "${commit_msg_remove_upstream}" - -sed_command='s!\([^/]\)github.com/ethereum/go-ethereum!\1github.com/ava-labs/subnet-evm!g' -find . \( -name '*.go' -o -name 'go.mod' -o -name 'build_test.sh' \) -exec sed -i '' -e "${sed_command}" {} \; -for dir in ${upstream_dirs}; do - sed_command="s!\"github.com/ava-labs/subnet-evm/${dir}\"!\"github.com/ethereum/go-ethereum/${dir}\"!g" - find . -name '*.go' -exec sed -i '' -e "${sed_command}" {} \; -done -go get github.com/ethereum/go-ethereum@"$1" -gofmt -w . -go mod tidy -git add -u . -make_commit "${commit_msg_rename_packages_as_fork}" - -revert_by_message "${commit_msg_remove_header}" \ No newline at end of file diff --git a/scripts/format_as_upstream.sh b/scripts/format_as_upstream.sh deleted file mode 100755 index c4ba234609..0000000000 --- a/scripts/format_as_upstream.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail -set -x - -script_dir=$(dirname "$0") - -commit_msg_remove_header="format: remove avalanche header" -commit_msg_add_upstream="format: add upstream go-ethereum" -commit_msg_rename_packages_to_upstream="format: rename packages to upstream" - -make_commit() { - if git diff-index --cached --quiet HEAD --; then - echo "No changes to commit." - else - git commit -m "$1" - fi -} - -if git status --porcelain | grep -q '^ M'; then - echo "There are edited files in the repository. Please commit or stash them before running this script." - exit 1 -fi - -sed_command='/\/\/ (c) [0-9]*\(-[0-9]*\)\{0,1\}, Ava Labs, Inc.$/,+9d' -find . -name '*.go' -exec sed -i '' -e "${sed_command}" {} \; -git add -u . -make_commit "${commit_msg_remove_header}" - -upstream_tag=$(grep -o 'github.com/ethereum/go-ethereum v.*' go.mod | cut -f2 -d' ') -upstream_dirs=$(sed -e 's/"github.com\/ethereum\/go-ethereum\/\(.*\)"/\1/' "${script_dir}"/geth-allowed-packages.txt | xargs) -upstream_dirs_array=() -IFS=" " read -r -a upstream_dirs_array <<< "$upstream_dirs" - -git clean -f "${upstream_dirs_array[@]}" -git checkout "${upstream_tag}" -- "${upstream_dirs_array[@]}" -git add "${upstream_dirs_array[@]}" -make_commit "${commit_msg_add_upstream}" - -sed_command='s!\([^/]\)github.com/ava-labs/subnet-evm!\1github.com/ethereum/go-ethereum!g' -find . \( -name '*.go' -o -name 'go.mod' -o -name 'build_test.sh' \) -exec sed -i '' -e "${sed_command}" {} \; -gofmt -w . -go mod tidy -git add -u . -make_commit "${commit_msg_rename_packages_to_upstream}" \ No newline at end of file diff --git a/scripts/geth-allowed-packages.txt b/scripts/geth-allowed-packages.txt index c295d5d044..e39828ed57 100644 --- a/scripts/geth-allowed-packages.txt +++ b/scripts/geth-allowed-packages.txt @@ -10,7 +10,6 @@ "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bls12381" "github.com/ethereum/go-ethereum/crypto/bn256" -"github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/leveldb" "github.com/ethereum/go-ethereum/ethdb/memorydb" diff --git a/sync/handlers/leafs_request.go b/sync/handlers/leafs_request.go index e199d55d58..634b9be4fb 100644 --- a/sync/handlers/leafs_request.go +++ b/sync/handlers/leafs_request.go @@ -92,8 +92,8 @@ func (lrh *LeafsRequestHandler) OnLeafsRequest(ctx context.Context, nodeID ids.N // TODO: We should know the state root that accounts correspond to, // as this information will be necessary to access storage tries when // the trie is path based. - // stateRoot := common.Hash{} - t, err := trie.New(trie.TrieID(leafsRequest.Root), lrh.trieDB) + stateRoot := common.Hash{} + t, err := trie.New(trie.StorageTrieID(stateRoot, leafsRequest.Account, leafsRequest.Root), lrh.trieDB) if err != nil { log.Debug("error opening trie when processing request, dropping request", "nodeID", nodeID, "requestID", requestID, "root", leafsRequest.Root, "err", err) lrh.stats.IncMissingRoot() @@ -332,14 +332,14 @@ func (rb *responseBuilder) generateRangeProof(start []byte, keys [][]byte) (*mem start = bytes.Repeat([]byte{0x00}, rb.keyLength) } - if err := rb.t.Prove(start, proof); err != nil { + if err := rb.t.Prove(start, 0, proof); err != nil { _ = proof.Close() // closing memdb does not error return nil, err } if len(keys) > 0 { // If there is a non-zero number of keys, set [end] for the range proof to the last key. end := keys[len(keys)-1] - if err := rb.t.Prove(end, proof); err != nil { + if err := rb.t.Prove(end, 0, proof); err != nil { _ = proof.Close() // closing memdb does not error return nil, err } @@ -422,11 +422,7 @@ func (rb *responseBuilder) fillFromTrie(ctx context.Context, end []byte) (bool, defer func() { rb.trieReadTime += time.Since(startTime) }() // create iterator to iterate the trie - nodeIt, err := rb.t.NodeIterator(rb.nextKey()) - if err != nil { - return false, err - } - it := trie.NewIterator(nodeIt) + it := trie.NewIterator(rb.t.NodeIterator(rb.nextKey())) more := false for it.Next() { // if we're at the end, break this loop diff --git a/sync/handlers/leafs_request_test.go b/sync/handlers/leafs_request_test.go index cd1758a561..c2b8c33aae 100644 --- a/sync/handlers/leafs_request_test.go +++ b/sync/handlers/leafs_request_test.go @@ -20,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/rlp" "github.com/stretchr/testify/assert" ) @@ -472,12 +473,15 @@ func TestLeafsRequestHandler_OnLeafsRequest(t *testing.T) { } // modify one entry of 1 in 4 segments if i%(segmentLen*4) == 0 { - acc, err := types.FullAccount(it.Account()) - if err != nil { + var acc snapshot.Account + if err := rlp.DecodeBytes(it.Account(), &acc); err != nil { t.Fatalf("could not parse snapshot account: %v", err) } acc.Nonce++ - bytes := types.SlimAccountRLP(*acc) + bytes, err := rlp.EncodeToBytes(acc) + if err != nil { + t.Fatalf("coult not encode snapshot account to bytes: %v", err) + } rawdb.WriteAccountSnapshot(memdb, it.Hash(), bytes) } i++ diff --git a/sync/statesync/sync_helpers.go b/sync/statesync/sync_helpers.go index 45cfc02b90..c11844cf55 100644 --- a/sync/statesync/sync_helpers.go +++ b/sync/statesync/sync_helpers.go @@ -5,6 +5,7 @@ package statesync import ( "github.com/ava-labs/subnet-evm/core/rawdb" + "github.com/ava-labs/subnet-evm/core/state/snapshot" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/trie" "github.com/ethereum/go-ethereum/common" @@ -14,18 +15,14 @@ import ( // writeAccountSnapshot stores the account represented by [acc] to the snapshot at [accHash], using // SlimAccountRLP format (omitting empty code/storage). func writeAccountSnapshot(db ethdb.KeyValueWriter, accHash common.Hash, acc types.StateAccount) { - slimAccount := types.SlimAccountRLP(acc) + slimAccount := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) rawdb.WriteAccountSnapshot(db, accHash, slimAccount) } // writeAccountStorageSnapshotFromTrie iterates the trie at [storageTrie] and copies all entries // to the storage snapshot for [accountHash]. func writeAccountStorageSnapshotFromTrie(batch ethdb.Batch, batchSize int, accountHash common.Hash, storageTrie *trie.Trie) error { - nodeIt, err := storageTrie.NodeIterator(nil) - if err != nil { - return err - } - it := trie.NewIterator(nodeIt) + it := trie.NewIterator(storageTrie.NodeIterator(nil)) for it.Next() { rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(it.Key), it.Value) if batch.ValueSize() > batchSize { diff --git a/sync/statesync/sync_test.go b/sync/statesync/sync_test.go index 6f8e81f0be..7e7845e6e6 100644 --- a/sync/statesync/sync_test.go +++ b/sync/statesync/sync_test.go @@ -441,11 +441,7 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { if err != nil { t.Fatal(err) } - nodeIt, err := tr.NodeIterator(nil) - if err != nil { - t.Fatal(err) - } - it := trie.NewIterator(nodeIt) + it := trie.NewIterator(tr.NodeIterator(nil)) accountsWithStorage := 0 // keep track of storage tries we delete trie nodes from diff --git a/sync/statesync/test_sync.go b/sync/statesync/test_sync.go index a319e34c08..f606e7e9df 100644 --- a/sync/statesync/test_sync.go +++ b/sync/statesync/test_sync.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/subnet-evm/accounts/keystore" "github.com/ava-labs/subnet-evm/core/rawdb" + "github.com/ava-labs/subnet-evm/core/state/snapshot" "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/sync/syncutils" "github.com/ava-labs/subnet-evm/trie" @@ -47,7 +48,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database } // check snapshot consistency snapshotVal := rawdb.ReadAccountSnapshot(clientDB, accHash) - expectedSnapshotVal := types.SlimAccountRLP(acc) + expectedSnapshotVal := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) assert.Equal(t, expectedSnapshotVal, snapshotVal) // check code consistency diff --git a/sync/syncutils/iterators.go b/sync/syncutils/iterators.go index 45752ca72f..c546cccd37 100644 --- a/sync/syncutils/iterators.go +++ b/sync/syncutils/iterators.go @@ -5,7 +5,6 @@ package syncutils import ( "github.com/ava-labs/subnet-evm/core/state/snapshot" - "github.com/ava-labs/subnet-evm/core/types" "github.com/ethereum/go-ethereum/ethdb" ) @@ -27,7 +26,7 @@ func (it *AccountIterator) Next() bool { return false } for it.AccountIterator.Next() { - it.val, it.err = types.FullAccountRLP(it.Account()) + it.val, it.err = snapshot.FullAccountRLP(it.Account()) return it.err == nil } it.val = nil diff --git a/sync/syncutils/test_trie.go b/sync/syncutils/test_trie.go index 3714055f85..08c7516100 100644 --- a/sync/syncutils/test_trie.go +++ b/sync/syncutils/test_trie.go @@ -35,9 +35,8 @@ func GenerateTrie(t *testing.T, trieDB *trie.Database, numKeys int, keySize int) keys, values := FillTrie(t, numKeys, keySize, testTrie) // Commit the root to [trieDB] - root, nodes, err := testTrie.Commit(false) - assert.NoError(t, err) - err = trieDB.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := testTrie.Commit(false) + err := trieDB.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) assert.NoError(t, err) err = trieDB.Commit(root, false) assert.NoError(t, err) @@ -83,16 +82,8 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *trie.Database, t.Fatalf("error creating trieB, root=%s, err=%v", root, err) } - nodeItA, err := trieA.NodeIterator(nil) - if err != nil { - t.Fatalf("error creating node iterator for trieA, root=%s, err=%v", root, err) - } - nodeItB, err := trieB.NodeIterator(nil) - if err != nil { - t.Fatalf("error creating node iterator for trieB, root=%s, err=%v", root, err) - } - itA := trie.NewIterator(nodeItA) - itB := trie.NewIterator(nodeItB) + itA := trie.NewIterator(trieA.NodeIterator(nil)) + itB := trie.NewIterator(trieB.NodeIterator(nil)) count := 0 for itA.Next() && itB.Next() { count++ @@ -116,10 +107,7 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *trie.Database, func CorruptTrie(t *testing.T, diskdb ethdb.Batcher, tr *trie.Trie, n int) { // Delete some trie nodes batch := diskdb.NewBatch() - nodeIt, err := tr.NodeIterator(nil) - if err != nil { - t.Fatal(err) - } + nodeIt := tr.NodeIterator(nil) count := 0 for nodeIt.Next(true) { count++ @@ -181,11 +169,8 @@ func FillAccounts( accounts[key] = &acc } - newRoot, nodes, err := tr.Commit(false) - if err != nil { - t.Fatalf("error committing trie: %v", err) - } - if err := trieDB.Update(newRoot, root, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + newRoot, nodes := tr.Commit(false) + if err := trieDB.Update(newRoot, root, trienode.NewWithNodeSet(nodes)); err != nil { t.Fatalf("error updating trieDB: %v", err) } if err := trieDB.Commit(newRoot, false); err != nil { diff --git a/tests/init.go b/tests/init.go index 3cbd4d3511..6d2d5bae9e 100644 --- a/tests/init.go +++ b/tests/init.go @@ -197,22 +197,6 @@ var Forks = map[string]*params.ChainConfig{ DurangoTimestamp: utils.NewUint64(0), }, }, - "Cancun": { - ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(0), - EIP150Block: big.NewInt(0), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(0), - MandatoryNetworkUpgrades: params.MandatoryNetworkUpgrades{ - SubnetEVMTimestamp: utils.NewUint64(0), - DurangoTimestamp: utils.NewUint64(0), - CancunTime: utils.NewUint64(0), - }, - }, } // AvailableForks returns the set of defined fork names diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 848038397c..9b44009172 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -111,19 +111,6 @@ type stTransaction struct { GasLimit []uint64 `json:"gasLimit"` Value []string `json:"value"` PrivateKey []byte `json:"secretKey"` - BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"` - BlobGasFeeCap *big.Int `json:"maxFeePerBlobGas,omitempty"` -} - -// nolint: unused -type stTransactionMarshaling struct { - GasPrice *math.HexOrDecimal256 - MaxFeePerGas *math.HexOrDecimal256 - MaxPriorityFeePerGas *math.HexOrDecimal256 - Nonce math.HexOrDecimal64 - GasLimit []math.HexOrDecimal64 - PrivateKey hexutil.Bytes - BlobGasFeeCap *math.HexOrDecimal256 } // GetChainConfig takes a fork definition and returns a chain config. @@ -213,18 +200,13 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config, snapshotter bo } post := t.json.Post[subtest.Fork][subtest.Index] // N.B: We need to do this in a two-step process, because the first Commit takes care - // of self-destructs, and we need to touch the coinbase _after_ it has potentially self-destructed. + // of suicides, and we need to touch the coinbase _after_ it has potentially suicided. if root != common.Hash(post.Root) { return snaps, statedb, fmt.Errorf("post state root mismatch: got %x, want %x", root, post.Root) } if logs := rlpHash(statedb.Logs()); logs != common.Hash(post.Logs) { return snaps, statedb, fmt.Errorf("post state logs hash mismatch: got %x, want %x", logs, post.Logs) } - // Re-init the post-state instance for further operation - statedb, err = state.New(root, statedb.Database(), snaps) - if err != nil { - return nil, nil, err - } return snaps, statedb, nil } @@ -285,12 +267,14 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh } // Add 0-value mining reward. This only makes a difference in the cases // where - // - the coinbase self-destructed, or + // - the coinbase suicided, or // - there are only 'bad' transactions, which aren't executed. In those cases, // the coinbase gets no txfee, so isn't created, and thus needs to be touched statedb.AddBalance(block.Coinbase(), new(big.Int)) // Commit block - root, _ := statedb.Commit(block.NumberU64(), config.IsEIP158(block.Number()), false) + statedb.Commit(config.IsEIP158(block.Number()), false) + // And _now_ get the state root + root := statedb.IntermediateRoot(config.IsEIP158(block.Number())) return snaps, statedb, root, err } @@ -306,7 +290,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo } } // Commit and re-open to start with a clean state. - root, _ := statedb.Commit(0, false, false) + root, _ := statedb.Commit(false, false) var snaps *snapshot.Tree if snapshotter { @@ -409,18 +393,16 @@ func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (*core.Mess } msg := &core.Message{ - From: from, - To: to, - Nonce: tx.Nonce, - Value: value, - GasLimit: gasLimit, - GasPrice: gasPrice, - GasFeeCap: tx.MaxFeePerGas, - GasTipCap: tx.MaxPriorityFeePerGas, - Data: data, - AccessList: accessList, - BlobHashes: tx.BlobVersionedHashes, - BlobGasFeeCap: tx.BlobGasFeeCap, + From: from, + To: to, + Nonce: tx.Nonce, + Value: value, + GasLimit: gasLimit, + GasPrice: gasPrice, + GasFeeCap: tx.MaxFeePerGas, + GasTipCap: tx.MaxPriorityFeePerGas, + Data: data, + AccessList: accessList, } return msg, nil } diff --git a/trie/committer.go b/trie/committer.go index 1ce9ccf33d..b06a8b2c53 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -141,15 +141,22 @@ func (c *committer) store(path []byte, n node) node { // The node is embedded in its parent, in other words, this node // will not be stored in the database independently, mark it as // deleted only if the node was existent in database before. - _, ok := c.tracer.accessList[string(path)] + prev, ok := c.tracer.accessList[string(path)] if ok { - c.nodes.AddNode(path, trienode.NewDeleted()) + c.nodes.AddNode(path, trienode.NewWithPrev(common.Hash{}, nil, prev)) } return n } // Collect the dirty node to nodeset for return. - nhash := common.BytesToHash(hash) - c.nodes.AddNode(path, trienode.New(nhash, nodeToBytes(n))) + var ( + nhash = common.BytesToHash(hash) + node = trienode.NewWithPrev( + nhash, + nodeToBytes(n), + c.tracer.accessList[string(path)], + ) + ) + c.nodes.AddNode(path, node) // Collect the corresponding leaf node if it's required. We don't check // full node since it's impossible to store value in fullNode. The key diff --git a/trie/database_test.go b/trie/database_test.go index a3621392f6..19394b55fe 100644 --- a/trie/database_test.go +++ b/trie/database_test.go @@ -29,7 +29,6 @@ package trie import ( "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ava-labs/subnet-evm/trie/triedb/hashdb" - "github.com/ava-labs/subnet-evm/trie/triedb/pathdb" "github.com/ethereum/go-ethereum/ethdb" ) @@ -37,9 +36,10 @@ import ( func newTestDatabase(diskdb ethdb.Database, scheme string) *Database { db := prepare(diskdb, nil) if scheme == rawdb.HashScheme { - db.backend = hashdb.New(diskdb, nil, mptResolver{}) - } else { - db.backend = pathdb.New(diskdb, &pathdb.Config{}) // disable clean/dirty cache + db.backend = hashdb.New(diskdb, db.cleans, mptResolver{}) } + //} else { + // db.backend = snap.New(diskdb, db.cleans, nil) + //} return db } diff --git a/trie/database.go b/trie/database_wrap.go similarity index 76% rename from trie/database.go rename to trie/database_wrap.go index 4be40dc49f..dab26df692 100644 --- a/trie/database.go +++ b/trie/database_wrap.go @@ -18,14 +18,15 @@ package trie import ( "errors" + "runtime" + "time" "github.com/ava-labs/subnet-evm/trie/triedb/hashdb" - "github.com/ava-labs/subnet-evm/trie/triedb/pathdb" "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" "github.com/ava-labs/subnet-evm/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" ) const ( @@ -34,13 +35,10 @@ const ( // Config defines all necessary options for database. type Config struct { - Cache int // Memory allowance (MB) to use for caching trie nodes in memory - Preimages bool // Flag whether the preimage of trie key is recorded - StatsPrefix string // Prefix for cache stats (disabled if empty) - PathDB *pathdb.Config // Configs for experimental path-based scheme, not used yet. - - // Testing hooks - OnCommit func(states *triestate.Set) // Hook invoked when commit is performed + Cache int // Memory allowance (MB) to use for caching trie nodes in memory + Journal string // Journal of clean cache to survive node restarts + Preimages bool // Flag whether the preimage of trie key is recorded + StatsPrefix string // Prefix for cache stats (disabled if empty) } // backend defines the methods needed to access/update trie nodes in different @@ -60,10 +58,8 @@ type backend interface { // Update performs a state transition by committing dirty nodes contained // in the given set in order to update state from the specified parent to // the specified root. - // - // The passed in maps(nodes, states) will be retained to avoid copying - // everything. Therefore, these maps must not be changed afterwards. - Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error + Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error + UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error // Commit writes all relevant trie nodes belonging to the specified state // to disk. Report specifies whether logs will be displayed in info level. @@ -86,6 +82,7 @@ type cache interface { type Database struct { config *Config // Configuration for trie database diskdb ethdb.Database // Persistent database to store the snapshot + cleans cache // Megabytes permitted using for read caches preimages *preimageStore // The store for caching preimages backend backend // The backend for managing trie nodes } @@ -93,6 +90,10 @@ type Database struct { // prepare initializes the database with provided configs, but the // database backend is still left as nil. func prepare(diskdb ethdb.Database, config *Config) *Database { + var cleans cache + if config != nil && config.Cache > 0 { + cleans = utils.NewMeteredCache(config.Cache*1024*1024, config.Journal, config.StatsPrefix, cacheStatsUpdateFrequency) + } var preimages *preimageStore if config != nil && config.Preimages { preimages = newPreimageStore(diskdb) @@ -100,6 +101,7 @@ func prepare(diskdb ethdb.Database, config *Config) *Database { return &Database{ config: config, diskdb: diskdb, + cleans: cleans, preimages: preimages, } } @@ -114,53 +116,33 @@ func NewDatabase(diskdb ethdb.Database) *Database { // The path-based scheme is not activated yet, always initialized with legacy // hash-based scheme by default. func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database { - var cleans cache - if config != nil && config.Cache != 0 { - cleans = utils.NewMeteredCache(config.Cache*1024*1024, config.StatsPrefix, cacheStatsUpdateFrequency) - } db := prepare(diskdb, config) - db.backend = hashdb.New(diskdb, cleans, mptResolver{}) + db.backend = hashdb.New(diskdb, db.cleans, mptResolver{}) return db } // Reader returns a reader for accessing all trie nodes with provided state root. -// An error will be returned if the requested state is not available. -func (db *Database) Reader(blockRoot common.Hash) (Reader, error) { - switch b := db.backend.(type) { - case *hashdb.Database: - return b.Reader(blockRoot) - case *pathdb.Database: - return b.Reader(blockRoot) - } - return nil, errors.New("unknown backend") +// Nil is returned in case the state is not available. +func (db *Database) Reader(blockRoot common.Hash) Reader { + return db.backend.(*hashdb.Database).Reader(blockRoot) } // Update performs a state transition by committing dirty nodes contained in the // given set in order to update state from the specified parent to the specified // root. The held pre-images accumulated up to this point will be flushed in case // the size exceeds the threshold. -// -// The passed in maps(nodes, states) will be retained to avoid copying everything. -// Therefore, these maps must not be changed afterwards. -func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { - if db.config != nil && db.config.OnCommit != nil { - db.config.OnCommit(states) - } +func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { if db.preimages != nil { db.preimages.commit(false) } - return db.backend.Update(root, parent, block, nodes, states) + return db.backend.Update(root, parent, nodes) } -func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { +func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { if db.preimages != nil { db.preimages.commit(false) } - hdb, ok := db.backend.(*hashdb.Database) - if ok { - return hdb.UpdateAndReferenceRoot(root, parent, block, nodes, states) - } - return db.backend.Update(root, parent, block, nodes, states) + return db.backend.UpdateAndReferenceRoot(root, parent, nodes) } // Commit iterates over all the children of a particular node, writes them out @@ -202,14 +184,49 @@ func (db *Database) Scheme() string { // It is meant to be called when closing the blockchain object, so that all // resources held can be released correctly. func (db *Database) Close() error { - db.WritePreimages() + if db.preimages != nil { + db.preimages.commit(true) + } return db.backend.Close() } -// WritePreimages flushes all accumulated preimages to disk forcibly. -func (db *Database) WritePreimages() { - if db.preimages != nil { - db.preimages.commit(true) +// saveCache saves clean state cache to given directory path +// using specified CPU cores. +func (db *Database) saveCache(dir string, threads int) error { + if db.cleans == nil { + return nil + } + log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) + + start := time.Now() + err := db.cleans.SaveToFileConcurrent(dir, threads) + if err != nil { + log.Error("Failed to persist clean trie cache", "error", err) + return err + } + log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// SaveCache atomically saves fast cache data to the given dir using all +// available CPU cores. +func (db *Database) SaveCache(dir string) error { + return db.saveCache(dir, runtime.GOMAXPROCS(0)) +} + +// SaveCachePeriodically atomically saves fast cache data to the given dir with +// the specified interval. All dump operation will only use a single CPU core. +func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + db.saveCache(dir, 1) + case <-stopCh: + return + } } } diff --git a/trie/errors.go b/trie/errors.go index 307a5f8747..b6f90132b6 100644 --- a/trie/errors.go +++ b/trie/errors.go @@ -27,17 +27,11 @@ package trie import ( - "errors" "fmt" "github.com/ethereum/go-ethereum/common" ) -// ErrCommitted is returned when a already committed trie is requested for usage. -// The potential usages can be `Get`, `Update`, `Delete`, `NodeIterator`, `Prove` -// and so on. -var ErrCommitted = errors.New("trie is already committed") - // MissingNodeError is returned by the trie functions (Get, Update, Delete) // in the case where a trie node is not present in the local database. It contains // information necessary for retrieving the missing node. diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 6e14e9b29c..4e8f956d28 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -44,7 +44,7 @@ import ( func TestEmptyIterator(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - iter := trie.MustNodeIterator(nil) + iter := trie.NodeIterator(nil) seen := make(map[string]struct{}) for iter.Next(true) { @@ -72,12 +72,12 @@ func TestIterator(t *testing.T) { all[val.k] = val.v trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) trie, _ = New(TrieID(root), db) found := make(map[string]string) - it := NewIterator(trie.MustNodeIterator(nil)) + it := NewIterator(trie.NodeIterator(nil)) for it.Next() { found[string(it.Key)] = string(it.Value) } @@ -94,10 +94,6 @@ type kv struct { t bool } -func (k *kv) cmp(other *kv) int { - return bytes.Compare(k.k, other.k) -} - func TestIteratorLargeData(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) vals := make(map[string]*kv) @@ -111,7 +107,7 @@ func TestIteratorLargeData(t *testing.T) { vals[string(value2.k)] = value2 } - it := NewIterator(trie.MustNodeIterator(nil)) + it := NewIterator(trie.NodeIterator(nil)) for it.Next() { vals[string(it.Key)].t = true } @@ -140,7 +136,7 @@ type iterationElement struct { // Tests that the node iterator indeed walks over the entire database contents. func TestNodeIteratorCoverage(t *testing.T) { testNodeIteratorCoverage(t, rawdb.HashScheme) - testNodeIteratorCoverage(t, rawdb.PathScheme) + //testNodeIteratorCoverage(t, rawdb.PathScheme) } func testNodeIteratorCoverage(t *testing.T, scheme string) { @@ -149,7 +145,7 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) { // Gather all the node hashes found by the iterator var elements = make(map[common.Hash]iterationElement) - for it := trie.MustNodeIterator(nil); it.Next(true); { + for it := trie.NodeIterator(nil); it.Next(true); { if it.Hash() != (common.Hash{}) { elements[it.Hash()] = iterationElement{ hash: it.Hash(), @@ -159,12 +155,8 @@ func testNodeIteratorCoverage(t *testing.T, scheme string) { } } // Cross check the hashes and the database itself - reader, err := nodeDb.Reader(trie.Hash()) - if err != nil { - t.Fatalf("state is not available %x", trie.Hash()) - } for _, element := range elements { - if blob, err := reader.Node(common.Hash{}, element.path, element.hash); err != nil { + if blob, err := nodeDb.Reader(trie.Hash()).Node(common.Hash{}, element.path, element.hash); err != nil { t.Errorf("failed to retrieve reported node %x: %v", element.hash, err) } else if !bytes.Equal(blob, element.blob) { t.Errorf("node blob is different, want %v got %v", element.blob, blob) @@ -224,19 +216,19 @@ func TestIteratorSeek(t *testing.T) { } // Seek to the middle. - it := NewIterator(trie.MustNodeIterator([]byte("fab"))) + it := NewIterator(trie.NodeIterator([]byte("fab"))) if err := checkIteratorOrder(testdata1[4:], it); err != nil { t.Fatal(err) } // Seek to a non-existent key. - it = NewIterator(trie.MustNodeIterator([]byte("barc"))) + it = NewIterator(trie.NodeIterator([]byte("barc"))) if err := checkIteratorOrder(testdata1[1:], it); err != nil { t.Fatal(err) } // Seek beyond the end. - it = NewIterator(trie.MustNodeIterator([]byte("z"))) + it = NewIterator(trie.NodeIterator([]byte("z"))) if err := checkIteratorOrder(nil, it); err != nil { t.Fatal(err) } @@ -264,8 +256,8 @@ func TestDifferenceIterator(t *testing.T) { for _, val := range testdata1 { triea.MustUpdate([]byte(val.k), []byte(val.v)) } - rootA, nodesA, _ := triea.Commit(false) - dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) + rootA, nodesA := triea.Commit(false) + dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)) triea, _ = New(TrieID(rootA), dba) dbb := NewDatabase(rawdb.NewMemoryDatabase()) @@ -273,12 +265,12 @@ func TestDifferenceIterator(t *testing.T) { for _, val := range testdata2 { trieb.MustUpdate([]byte(val.k), []byte(val.v)) } - rootB, nodesB, _ := trieb.Commit(false) - dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) + rootB, nodesB := trieb.Commit(false) + dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB)) trieb, _ = New(TrieID(rootB), dbb) found := make(map[string]string) - di, _ := NewDifferenceIterator(triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)) + di, _ := NewDifferenceIterator(triea.NodeIterator(nil), trieb.NodeIterator(nil)) it := NewIterator(di) for it.Next() { found[string(it.Key)] = string(it.Value) @@ -306,8 +298,8 @@ func TestUnionIterator(t *testing.T) { for _, val := range testdata1 { triea.MustUpdate([]byte(val.k), []byte(val.v)) } - rootA, nodesA, _ := triea.Commit(false) - dba.Update(rootA, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesA), nil) + rootA, nodesA := triea.Commit(false) + dba.Update(rootA, types.EmptyRootHash, trienode.NewWithNodeSet(nodesA)) triea, _ = New(TrieID(rootA), dba) dbb := NewDatabase(rawdb.NewMemoryDatabase()) @@ -315,11 +307,11 @@ func TestUnionIterator(t *testing.T) { for _, val := range testdata2 { trieb.MustUpdate([]byte(val.k), []byte(val.v)) } - rootB, nodesB, _ := trieb.Commit(false) - dbb.Update(rootB, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodesB), nil) + rootB, nodesB := trieb.Commit(false) + dbb.Update(rootB, types.EmptyRootHash, trienode.NewWithNodeSet(nodesB)) trieb, _ = New(TrieID(rootB), dbb) - di, _ := NewUnionIterator([]NodeIterator{triea.MustNodeIterator(nil), trieb.MustNodeIterator(nil)}) + di, _ := NewUnionIterator([]NodeIterator{triea.NodeIterator(nil), trieb.NodeIterator(nil)}) it := NewIterator(di) all := []struct{ k, v string }{ @@ -358,15 +350,15 @@ func TestIteratorNoDups(t *testing.T) { for _, val := range testdata1 { tr.MustUpdate([]byte(val.k), []byte(val.v)) } - checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil) + checkIteratorNoDups(t, tr.NodeIterator(nil), nil) } // This test checks that nodeIterator.Next can be retried after inserting missing trie nodes. func TestIteratorContinueAfterError(t *testing.T) { testIteratorContinueAfterError(t, false, rawdb.HashScheme) testIteratorContinueAfterError(t, true, rawdb.HashScheme) - testIteratorContinueAfterError(t, false, rawdb.PathScheme) - testIteratorContinueAfterError(t, true, rawdb.PathScheme) + // testIteratorContinueAfterError(t, false, rawdb.PathScheme) + // testIteratorContinueAfterError(t, true, rawdb.PathScheme) } func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { @@ -377,13 +369,13 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { for _, val := range testdata1 { tr.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes, _ := tr.Commit(false) - tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := tr.Commit(false) + tdb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) if !memonly { tdb.Commit(root, false) } tr, _ = New(TrieID(root), tdb) - wantNodeCount := checkIteratorNoDups(t, tr.MustNodeIterator(nil), nil) + wantNodeCount := checkIteratorNoDups(t, tr.NodeIterator(nil), nil) var ( paths [][]byte @@ -442,7 +434,7 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { } // Iterate until the error is hit. seen := make(map[string]bool) - it := tr.MustNodeIterator(nil) + it := tr.NodeIterator(nil) checkIteratorNoDups(t, it, seen) missing, ok := it.Error().(*MissingNodeError) if !ok || missing.NodeHash != rhash { @@ -471,8 +463,8 @@ func testIteratorContinueAfterError(t *testing.T, memonly bool, scheme string) { func TestIteratorContinueAfterSeekError(t *testing.T) { testIteratorContinueAfterSeekError(t, false, rawdb.HashScheme) testIteratorContinueAfterSeekError(t, true, rawdb.HashScheme) - testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme) - testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme) + // testIteratorContinueAfterSeekError(t, false, rawdb.PathScheme) + // testIteratorContinueAfterSeekError(t, true, rawdb.PathScheme) } func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme string) { @@ -487,14 +479,14 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin for _, val := range testdata1 { ctr.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes, _ := ctr.Commit(false) + root, nodes := ctr.Commit(false) for path, n := range nodes.Nodes { if n.Hash == barNodeHash { barNodePath = []byte(path) break } } - triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) if !memonly { triedb.Commit(root, false) } @@ -510,7 +502,7 @@ func testIteratorContinueAfterSeekError(t *testing.T, memonly bool, scheme strin } // Create a new iterator that seeks to "bars". Seeking can't proceed because // the node is missing. - it := tr.MustNodeIterator([]byte("bars")) + it := tr.NodeIterator([]byte("bars")) missing, ok := it.Error().(*MissingNodeError) if !ok { t.Fatal("want MissingNodeError, got", it.Error()) @@ -544,7 +536,7 @@ func checkIteratorNoDups(t *testing.T, it NodeIterator, seen map[string]bool) in func TestIteratorNodeBlob(t *testing.T) { testIteratorNodeBlob(t, rawdb.HashScheme) - testIteratorNodeBlob(t, rawdb.PathScheme) + //testIteratorNodeBlob(t, rawdb.PathScheme) } type loggingDb struct { @@ -614,12 +606,9 @@ func makeLargeTestTrie() (*Database, *StateTrie, *loggingDb) { val = crypto.Keccak256(val) trie.MustUpdate(key, val) } - root, nodes, _ := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) - triedb.Commit(root, false) - + root, nodes := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) // Return the generated trie - trie, _ = NewStateTrie(TrieID(root), triedb) return triedb, trie, logDb } @@ -631,8 +620,8 @@ func TestNodeIteratorLargeTrie(t *testing.T) { // Do a seek operation trie.NodeIterator(common.FromHex("0x77667766776677766778855885885885")) // master: 24 get operations - // this pr: 6 get operations - if have, want := logDb.getCount, uint64(6); have != want { + // this pr: 5 get operations + if have, want := logDb.getCount, uint64(5); have != want { t.Fatalf("Too many lookups during seek, have %d want %d", have, want) } } @@ -657,13 +646,13 @@ func testIteratorNodeBlob(t *testing.T, scheme string) { all[val.k] = val.v trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes, _ := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) triedb.Commit(root, false) var found = make(map[common.Hash][]byte) trie, _ = New(TrieID(root), triedb) - it := trie.MustNodeIterator(nil) + it := trie.NodeIterator(nil) for it.Next(true) { if it.Hash() == (common.Hash{}) { continue diff --git a/trie/proof.go b/trie/proof.go index be0e8bc5c9..a90d76bb15 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -43,11 +43,7 @@ import ( // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root node), ending // with the node that proves the absence of the key. -func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return ErrCommitted - } +func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { // Collect all nodes on the path to key. var ( prefix []byte @@ -95,6 +91,10 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { defer returnHasherToPool(hasher) for i, n := range nodes { + if fromLevel > 0 { + fromLevel-- + continue + } var hn node n, hn = hasher.proofHash(n) if hash, ok := hn.(hashNode); ok || i == 0 { @@ -117,8 +117,8 @@ func (t *Trie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { // If the trie does not contain a value for key, the returned proof contains all // nodes of the longest existing prefix of the key (at least the root node), ending // with the node that proves the absence of the key. -func (t *StateTrie) Prove(key []byte, proofDb ethdb.KeyValueWriter) error { - return t.trie.Prove(key, proofDb) +func (t *StateTrie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error { + return t.trie.Prove(key, fromLevel, proofDb) } // VerifyProof checks merkle proofs. The given proof must contain the value for diff --git a/trie/proof_test.go b/trie/proof_test.go index 42d6fda662..b62668810c 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -32,13 +32,13 @@ import ( "encoding/binary" "fmt" mrand "math/rand" + "sort" "testing" "github.com/ava-labs/subnet-evm/core/rawdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb/memorydb" - "golang.org/x/exp/slices" ) // Prng is a pseudo random number generator seeded by strong randomness. @@ -67,13 +67,13 @@ func makeProvers(trie *Trie) []func(key []byte) *memorydb.Database { // Create a direct trie based Merkle prover provers = append(provers, func(key []byte) *memorydb.Database { proof := memorydb.New() - trie.Prove(key, proof) + trie.Prove(key, 0, proof) return proof }) // Create a leaf iterator based Merkle prover provers = append(provers, func(key []byte) *memorydb.Database { proof := memorydb.New() - if it := NewIterator(trie.MustNodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) { + if it := NewIterator(trie.NodeIterator(key)); it.Next() && bytes.Equal(key, it.Key) { for _, p := range it.Prove() { proof.Put(crypto.Keccak256(p), p) } @@ -160,7 +160,7 @@ func TestMissingKeyProof(t *testing.T) { for i, key := range []string{"a", "j", "l", "z"} { proof := memorydb.New() - trie.Prove([]byte(key), proof) + trie.Prove([]byte(key), 0, proof) if proof.Len() != 1 { t.Errorf("test %d: proof should have one element", i) @@ -175,24 +175,30 @@ func TestMissingKeyProof(t *testing.T) { } } +type entrySlice []*kv + +func (p entrySlice) Len() int { return len(p) } +func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 } +func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + // TestRangeProof tests normal range proof with both edge proofs // as the existent proof. The test cases are generated randomly. func TestRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -212,11 +218,11 @@ func TestRangeProof(t *testing.T) { // The test cases are generated randomly. func TestRangeProofWithNonExistentProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 @@ -240,10 +246,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { if bytes.Compare(last, entries[end-1].k) < 0 { continue } - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -261,10 +267,10 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { proof := memorydb.New() first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var k [][]byte @@ -284,21 +290,21 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { // - There exists a gap between the last element and the right edge proof func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) // Case 1 start, end := 100, 200 first := decreaseKey(common.CopyBytes(entries[start].k)) proof := memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } start = 105 // Gap created @@ -317,10 +323,10 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { start, end = 100, 200 last := increaseKey(common.CopyBytes(entries[end-1].k)) proof = memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } end = 195 // Capped slice @@ -341,17 +347,17 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { // non-existent one. func TestOneElementRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) // One element with existent edge proof, both edge proofs // point to the SAME key. start := 1000 proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } _, err := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) @@ -363,10 +369,10 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 first := decreaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) @@ -378,10 +384,10 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 last := increaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) @@ -393,10 +399,10 @@ func TestOneElementRangeProof(t *testing.T) { start = 1000 first, last = decreaseKey(common.CopyBytes(entries[start].k)), increaseKey(common.CopyBytes(entries[start].k)) proof = memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) @@ -412,10 +418,10 @@ func TestOneElementRangeProof(t *testing.T) { first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() last = entry.k proof = memorydb.New() - if err := tinyTrie.Prove(first, proof); err != nil { + if err := tinyTrie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := tinyTrie.Prove(last, proof); err != nil { + if err := tinyTrie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(tinyTrie.Hash(), first, last, [][]byte{entry.k}, [][]byte{entry.v}, proof) @@ -428,11 +434,11 @@ func TestOneElementRangeProof(t *testing.T) { // The edge proofs can be nil. func TestAllElementsProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var k [][]byte var v [][]byte @@ -447,10 +453,10 @@ func TestAllElementsProof(t *testing.T) { // With edge proofs, it should still work. proof := memorydb.New() - if err := trie.Prove(entries[0].k, proof); err != nil { + if err := trie.Prove(entries[0].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[len(entries)-1].k, proof); err != nil { + if err := trie.Prove(entries[len(entries)-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof) @@ -462,10 +468,10 @@ func TestAllElementsProof(t *testing.T) { proof = memorydb.New() first := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), first, last, k, v, proof) @@ -478,21 +484,21 @@ func TestAllElementsProof(t *testing.T) { func TestSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - var entries []*kv + var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} trie.MustUpdate(value.k, value.v) entries = append(entries, value) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1} for _, pos := range cases { proof := memorydb.New() - if err := trie.Prove(common.Hash{}.Bytes(), proof); err != nil { + if err := trie.Prove(common.Hash{}.Bytes(), 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[pos].k, proof); err != nil { + if err := trie.Prove(entries[pos].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } k := make([][]byte, 0) @@ -513,22 +519,22 @@ func TestSingleSideRangeProof(t *testing.T) { func TestReverseSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - var entries []*kv + var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} trie.MustUpdate(value.k, value.v) entries = append(entries, value) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var cases = []int{0, 1, 50, 100, 1000, 2000, len(entries) - 1} for _, pos := range cases { proof := memorydb.New() - if err := trie.Prove(entries[pos].k, proof); err != nil { + if err := trie.Prove(entries[pos].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } last := common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - if err := trie.Prove(last.Bytes(), proof); err != nil { + if err := trie.Prove(last.Bytes(), 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } k := make([][]byte, 0) @@ -549,20 +555,20 @@ func TestReverseSingleSideRangeProof(t *testing.T) { // The prover is expected to detect the error. func TestBadRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) for i := 0; i < 500; i++ { start := mrand.Intn(len(entries)) end := mrand.Intn(len(entries)-start) + start + 1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -628,10 +634,10 @@ func TestGappedRangeProof(t *testing.T) { } first, last := 2, 8 proof := memorydb.New() - if err := trie.Prove(entries[first].k, proof); err != nil { + if err := trie.Prove(entries[first].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[last-1].k, proof); err != nil { + if err := trie.Prove(entries[last-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -652,11 +658,11 @@ func TestGappedRangeProof(t *testing.T) { // TestSameSideProofs tests the element is not in the range covered by proofs func TestSameSideProofs(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) pos := 1000 first := decreaseKey(common.CopyBytes(entries[pos].k)) @@ -664,10 +670,10 @@ func TestSameSideProofs(t *testing.T) { last := decreaseKey(common.CopyBytes(entries[pos].k)) proof := memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) @@ -680,10 +686,10 @@ func TestSameSideProofs(t *testing.T) { last = increaseKey(last) proof = memorydb.New() - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(last, proof); err != nil { + if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) @@ -694,13 +700,13 @@ func TestSameSideProofs(t *testing.T) { func TestHasRightElement(t *testing.T) { trie := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - var entries []*kv + var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} trie.MustUpdate(value.k, value.v) entries = append(entries, value) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var cases = []struct { start int @@ -728,23 +734,23 @@ func TestHasRightElement(t *testing.T) { ) if c.start == -1 { firstKey, start = common.Hash{}.Bytes(), 0 - if err := trie.Prove(firstKey, proof); err != nil { + if err := trie.Prove(firstKey, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } else { firstKey = entries[c.start].k - if err := trie.Prove(entries[c.start].k, proof); err != nil { + if err := trie.Prove(entries[c.start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } if c.end == -1 { lastKey, end = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").Bytes(), len(entries) - if err := trie.Prove(lastKey, proof); err != nil { + if err := trie.Prove(lastKey, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } else { lastKey = entries[c.end-1].k - if err := trie.Prove(entries[c.end-1].k, proof); err != nil { + if err := trie.Prove(entries[c.end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } } @@ -768,11 +774,11 @@ func TestHasRightElement(t *testing.T) { // The first edge proof must be a non-existent proof. func TestEmptyRangeProof(t *testing.T) { trie, vals := randomTrie(4096) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var cases = []struct { pos int @@ -784,7 +790,7 @@ func TestEmptyRangeProof(t *testing.T) { for _, c := range cases { proof := memorydb.New() first := increaseKey(common.CopyBytes(entries[c.pos].k)) - if err := trie.Prove(first, proof); err != nil { + if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof) @@ -803,11 +809,11 @@ func TestEmptyRangeProof(t *testing.T) { func TestBloatedProof(t *testing.T) { // Use a small trie trie, kvs := nonRandomTrie(100) - var entries []*kv + var entries entrySlice for _, kv := range kvs { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var keys [][]byte var vals [][]byte @@ -815,7 +821,7 @@ func TestBloatedProof(t *testing.T) { // In the 'malicious' case, we add proofs for every single item // (but only one key/value pair used as leaf) for i, entry := range entries { - trie.Prove(entry.k, proof) + trie.Prove(entry.k, 0, proof) if i == 50 { keys = append(keys, entry.k) vals = append(vals, entry.v) @@ -824,8 +830,8 @@ func TestBloatedProof(t *testing.T) { // For reference, we use the same function, but _only_ prove the first // and last element want := memorydb.New() - trie.Prove(keys[0], want) - trie.Prove(keys[len(keys)-1], want) + trie.Prove(keys[0], 0, want) + trie.Prove(keys[len(keys)-1], 0, want) if _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof); err != nil { t.Fatalf("expected bloated proof to succeed, got %v", err) @@ -837,11 +843,11 @@ func TestBloatedProof(t *testing.T) { // noop technically, but practically should be rejected. func TestEmptyValueRangeProof(t *testing.T) { trie, values := randomTrie(512) - var entries []*kv + var entries entrySlice for _, kv := range values { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) // Create a new entry with a slightly modified key mid := len(entries) / 2 @@ -858,10 +864,10 @@ func TestEmptyValueRangeProof(t *testing.T) { start, end := 1, len(entries)-1 proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -881,11 +887,11 @@ func TestEmptyValueRangeProof(t *testing.T) { // practically should be rejected. func TestAllElementsEmptyValueRangeProof(t *testing.T) { trie, values := randomTrie(512) - var entries []*kv + var entries entrySlice for _, kv := range values { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) // Create a new entry with a slightly modified key mid := len(entries) / 2 @@ -953,7 +959,7 @@ func BenchmarkProve(b *testing.B) { for i := 0; i < b.N; i++ { kv := vals[keys[i%len(keys)]] proofs := memorydb.New() - if trie.Prove(kv.k, proofs); proofs.Len() == 0 { + if trie.Prove(kv.k, 0, proofs); proofs.Len() == 0 { b.Fatalf("zero length proof for %x", kv.k) } } @@ -967,7 +973,7 @@ func BenchmarkVerifyProof(b *testing.B) { for k := range vals { keys = append(keys, k) proof := memorydb.New() - trie.Prove([]byte(k), proof) + trie.Prove([]byte(k), 0, proof) proofs = append(proofs, proof) } @@ -987,19 +993,19 @@ func BenchmarkVerifyRangeProof5000(b *testing.B) { benchmarkVerifyRangeProof(b, func benchmarkVerifyRangeProof(b *testing.B, size int) { trie, vals := randomTrie(8192) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) start := 2 end := start + size proof := memorydb.New() - if err := trie.Prove(entries[start].k, proof); err != nil { + if err := trie.Prove(entries[start].k, 0, proof); err != nil { b.Fatalf("Failed to prove the first node %v", err) } - if err := trie.Prove(entries[end-1].k, proof); err != nil { + if err := trie.Prove(entries[end-1].k, 0, proof); err != nil { b.Fatalf("Failed to prove the last node %v", err) } var keys [][]byte @@ -1024,11 +1030,11 @@ func BenchmarkVerifyRangeNoProof1000(b *testing.B) { benchmarkVerifyRangeNoProof func benchmarkVerifyRangeNoProof(b *testing.B, size int) { trie, vals := randomTrie(size) - var entries []*kv + var entries entrySlice for _, kv := range vals { entries = append(entries, kv) } - slices.SortFunc(entries, (*kv).cmp) + sort.Sort(entries) var keys [][]byte var values [][]byte @@ -1098,10 +1104,10 @@ func TestRangeProofKeysWithSharedPrefix(t *testing.T) { proof := memorydb.New() start := common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000000000") end := common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") - if err := trie.Prove(start, proof); err != nil { + if err := trie.Prove(start, 0, proof); err != nil { t.Fatalf("failed to prove start: %v", err) } - if err := trie.Prove(end, proof); err != nil { + if err := trie.Prove(end, 0, proof); err != nil { t.Fatalf("failed to prove end: %v", err) } diff --git a/trie/secure_trie.go b/trie/secure_trie.go index ef29bb8404..21c4f83075 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -96,12 +96,7 @@ func (t *StateTrie) MustGet(key []byte) []byte { // If the specified storage slot is not in the trie, nil will be returned. // If a trie node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) GetStorage(_ common.Address, key []byte) ([]byte, error) { - enc, err := t.trie.Get(t.hashKey(key)) - if err != nil || len(enc) == 0 { - return nil, err - } - _, content, _, err := rlp.Split(enc) - return content, err + return t.trie.Get(t.hashKey(key)) } // GetAccount attempts to retrieve an account with provided account address. @@ -163,8 +158,7 @@ func (t *StateTrie) MustUpdate(key, value []byte) { // If a node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) UpdateStorage(_ common.Address, key, value []byte) error { hk := t.hashKey(key) - v, _ := rlp.EncodeToBytes(value) - err := t.trie.Update(hk, v) + err := t.trie.Update(hk, value) if err != nil { return err } @@ -186,10 +180,6 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun return nil } -func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error { - return nil -} - // MustDelete removes any existing value for key from the trie. This function // will omit any encountered error but just print out an error message. func (t *StateTrie) MustDelete(key []byte) { @@ -233,7 +223,7 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte { // All cached preimages will be also flushed if preimages recording is enabled. // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { +func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { // Write all the pre-images to the actual disk database if len(t.getSecKeyCache()) > 0 { if t.preimages != nil { @@ -264,18 +254,12 @@ func (t *StateTrie) Copy() *StateTrie { } } -// NodeIterator returns an iterator that returns nodes of the underlying trie. -// Iteration starts at the key after the given start key. -func (t *StateTrie) NodeIterator(start []byte) (NodeIterator, error) { +// NodeIterator returns an iterator that returns nodes of the underlying trie. Iteration +// starts at the key after the given start key. +func (t *StateTrie) NodeIterator(start []byte) NodeIterator { return t.trie.NodeIterator(start) } -// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered -// error but just print out an error message. -func (t *StateTrie) MustNodeIterator(start []byte) NodeIterator { - return t.trie.MustNodeIterator(start) -} - // hashKey returns the hash of key as an ephemeral buffer. // The caller must not hold onto the return value because it will become // invalid on the next call to hashKey or secKey. diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index 99935378e0..ff17ed2ddf 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -70,8 +70,8 @@ func makeTestStateTrie() (*Database, *StateTrie, map[string][]byte) { trie.MustUpdate(key, val) } } - root, nodes, _ := trie.Commit(false) - if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + root, nodes := trie.Commit(false) + if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } // Re-create the trie based on the new state diff --git a/trie/sync_test.go b/trie/sync_test.go index 1fda202276..fcf4863a02 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -62,8 +62,8 @@ func makeTestTrie(scheme string) (ethdb.Database, *Database, *StateTrie, map[str trie.MustUpdate(key, val) } } - root, nodes, _ := trie.Commit(false) - if err := triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil); err != nil { + root, nodes := trie.Commit(false) + if err := triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)); err != nil { panic(fmt.Errorf("failed to commit db %v", err)) } if err := triedb.Commit(root, false); err != nil { diff --git a/trie/testutil/utils.go b/trie/testutil/utils.go deleted file mode 100644 index 88411efec5..0000000000 --- a/trie/testutil/utils.go +++ /dev/null @@ -1,71 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package testutil - -import ( - crand "crypto/rand" - "encoding/binary" - mrand "math/rand" - - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -// Prng is a pseudo random number generator seeded by strong randomness. -// The randomness is printed on startup in order to make failures reproducible. -var prng = initRand() - -func initRand() *mrand.Rand { - var seed [8]byte - crand.Read(seed[:]) - rnd := mrand.New(mrand.NewSource(int64(binary.LittleEndian.Uint64(seed[:])))) - return rnd -} - -// RandBytes generates a random byte slice with specified length. -func RandBytes(n int) []byte { - r := make([]byte, n) - prng.Read(r) - return r -} - -// RandomHash generates a random blob of data and returns it as a hash. -func RandomHash() common.Hash { - return common.BytesToHash(RandBytes(common.HashLength)) -} - -// RandomAddress generates a random blob of data and returns it as an address. -func RandomAddress() common.Address { - return common.BytesToAddress(RandBytes(common.AddressLength)) -} - -// RandomNode generates a random node. -func RandomNode() *trienode.Node { - val := RandBytes(100) - return trienode.New(crypto.Keccak256Hash(val), val) -} diff --git a/trie/tracer.go b/trie/tracer.go index 5786af4d3e..e847050805 100644 --- a/trie/tracer.go +++ b/trie/tracer.go @@ -17,6 +17,7 @@ package trie import ( + "github.com/ava-labs/subnet-evm/trie/trienode" "github.com/ethereum/go-ethereum/common" ) @@ -113,18 +114,16 @@ func (t *tracer) copy() *tracer { } } -// deletedNodes returns a list of node paths which are deleted from the trie. -func (t *tracer) deletedNodes() []string { - var paths []string +// markDeletions puts all tracked deletions into the provided nodeset. +func (t *tracer) markDeletions(set *trienode.NodeSet) { for path := range t.deletes { // It's possible a few deleted nodes were embedded // in their parent before, the deletions can be no // effect by deleting nothing, filter them out. - _, ok := t.accessList[path] + prev, ok := t.accessList[path] if !ok { continue } - paths = append(paths, path) + set.AddNode([]byte(path), trienode.NewWithPrev(common.Hash{}, nil, prev)) } - return paths } diff --git a/trie/tracer_test.go b/trie/tracer_test.go index 06e48578d7..d5a55e38ce 100644 --- a/trie/tracer_test.go +++ b/trie/tracer_test.go @@ -70,8 +70,8 @@ func testTrieTracer(t *testing.T, vals []struct{ k, v string }) { } insertSet := copySet(trie.tracer.inserts) // copy before commit deleteSet := copySet(trie.tracer.deletes) // copy before commit - root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) seen := setKeys(iterNodes(db, root)) if !compareSet(insertSet, seen) { @@ -136,8 +136,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { for _, val := range vals { trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -151,8 +151,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { for _, val := range vals { trie.MustUpdate([]byte(val.k), randBytes(32)) } - root, nodes, _ = trie.Commit(false) - db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(nodes)) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -169,8 +169,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { keys = append(keys, string(key)) trie.MustUpdate(key, randBytes(32)) } - root, nodes, _ = trie.Commit(false) - db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(nodes)) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -184,8 +184,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { for _, key := range keys { trie.MustUpdate([]byte(key), nil) } - root, nodes, _ = trie.Commit(false) - db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(nodes)) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -199,8 +199,8 @@ func testAccessList(t *testing.T, vals []struct{ k, v string }) { for _, val := range vals { trie.MustUpdate([]byte(val.k), nil) } - root, nodes, _ = trie.Commit(false) - db.Update(root, parent, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(nodes)) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, nodes); err != nil { @@ -218,22 +218,22 @@ func TestAccessListLeak(t *testing.T) { for _, val := range standard { trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) var cases = []struct { op func(tr *Trie) }{ { func(tr *Trie) { - it := tr.MustNodeIterator(nil) + it := tr.NodeIterator(nil) for it.Next(true) { } }, }, { func(tr *Trie) { - it := NewIterator(tr.MustNodeIterator(nil)) + it := NewIterator(tr.NodeIterator(nil)) for it.Next() { } }, @@ -241,7 +241,7 @@ func TestAccessListLeak(t *testing.T) { { func(tr *Trie) { for _, val := range standard { - tr.Prove([]byte(val.k), rawdb.NewMemoryDatabase()) + tr.Prove([]byte(val.k), 0, rawdb.NewMemoryDatabase()) } }, }, @@ -268,8 +268,8 @@ func TestTinyTree(t *testing.T) { for _, val := range tiny { trie.MustUpdate([]byte(val.k), randBytes(32)) } - root, set, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(set), nil) + root, set := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(set)) parent := root trie, _ = New(TrieID(root), db) @@ -277,8 +277,8 @@ func TestTinyTree(t *testing.T) { for _, val := range tiny { trie.MustUpdate([]byte(val.k), []byte(val.v)) } - root, set, _ = trie.Commit(false) - db.Update(root, parent, 0, trienode.NewWithNodeSet(set), nil) + root, set = trie.Commit(false) + db.Update(root, parent, trienode.NewWithNodeSet(set)) trie, _ = New(TrieID(root), db) if err := verifyAccessList(orig, trie, set); err != nil { @@ -300,7 +300,7 @@ func compareSet(setA, setB map[string]struct{}) bool { func forNodes(tr *Trie) map[string][]byte { var ( - it = tr.MustNodeIterator(nil) + it = tr.NodeIterator(nil) nodes = make(map[string][]byte) ) for it.Next(true) { @@ -319,7 +319,7 @@ func iterNodes(db *Database, root common.Hash) map[string][]byte { func forHashedNodes(tr *Trie) map[string][]byte { var ( - it = tr.MustNodeIterator(nil) + it = tr.NodeIterator(nil) nodes = make(map[string][]byte) ) for it.Next(true) { diff --git a/trie/trie.go b/trie/trie.go index 168f2b9730..2104fd1a83 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -49,10 +49,6 @@ type Trie struct { root node owner common.Hash - // Flag whether the commit operation is already performed. If so the - // trie is not usable(latest states is invisible). - committed bool - // Keep track of the number leaves which have been inserted since the last // hashing operation. This number will not directly map to the number of // actually unhashed nodes. @@ -74,12 +70,11 @@ func (t *Trie) newFlag() nodeFlag { // Copy returns a copy of Trie. func (t *Trie) Copy() *Trie { return &Trie{ - root: t.root, - owner: t.owner, - committed: t.committed, - unhashed: t.unhashed, - reader: t.reader, - tracer: t.tracer.copy(), + root: t.root, + owner: t.owner, + unhashed: t.unhashed, + reader: t.reader, + tracer: t.tracer.copy(), } } @@ -89,7 +84,7 @@ func (t *Trie) Copy() *Trie { // zero hash or the sha3 hash of an empty string, then trie is initially // empty, otherwise, the root node must be present in database or returns // a MissingNodeError if not. -func New(id *ID, db *Database) (*Trie, error) { +func New(id *ID, db NodeReader) (*Trie, error) { reader, err := newTrieReader(id.StateRoot, id.Owner, db) if err != nil { return nil, err @@ -115,24 +110,10 @@ func NewEmpty(db *Database) *Trie { return tr } -// MustNodeIterator is a wrapper of NodeIterator and will omit any encountered -// error but just print out an error message. -func (t *Trie) MustNodeIterator(start []byte) NodeIterator { - it, err := t.NodeIterator(start) - if err != nil { - log.Error("Unhandled trie error in Trie.NodeIterator", "err", err) - } - return it -} - // NodeIterator returns an iterator that returns nodes of the trie. Iteration starts at // the key after the given start key. -func (t *Trie) NodeIterator(start []byte) (NodeIterator, error) { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return nil, ErrCommitted - } - return newNodeIterator(t, start), nil +func (t *Trie) NodeIterator(start []byte) NodeIterator { + return newNodeIterator(t, start) } // MustGet is a wrapper of Get and will omit any encountered error but just @@ -151,10 +132,6 @@ func (t *Trie) MustGet(key []byte) []byte { // If the requested node is not present in trie, no error will be returned. // If the trie is corrupted, a MissingNodeError is returned. func (t *Trie) Get(key []byte) ([]byte, error) { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return nil, ErrCommitted - } value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0) if err == nil && didResolve { t.root = newroot @@ -214,10 +191,6 @@ func (t *Trie) MustGetNode(path []byte) ([]byte, int) { // If the requested node is not present in trie, no error will be returned. // If the trie is corrupted, a MissingNodeError is returned. func (t *Trie) GetNode(path []byte) ([]byte, int, error) { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return nil, 0, ErrCommitted - } item, newroot, resolved, err := t.getNode(t.root, compactToHex(path), 0) if err != nil { return nil, resolved, err @@ -310,10 +283,6 @@ func (t *Trie) MustUpdate(key, value []byte) { // If the requested node is not present in trie, no error will be returned. // If the trie is corrupted, a MissingNodeError is returned. func (t *Trie) Update(key, value []byte) error { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return ErrCommitted - } return t.update(key, value) } @@ -428,10 +397,6 @@ func (t *Trie) MustDelete(key []byte) { // If the requested node is not present in trie, no error will be returned. // If the trie is corrupted, a MissingNodeError is returned. func (t *Trie) Delete(key []byte) error { - // Short circuit if the trie is already committed and not usable. - if t.committed { - return ErrCommitted - } t.unhashed++ k := keybytesToHex(key) _, n, err := t.delete(t.root, nil, k) @@ -617,25 +582,17 @@ func (t *Trie) Hash() common.Hash { // The returned nodeset can be nil if the trie is clean (nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage -func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) { +func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { defer t.tracer.reset() - defer func() { - t.committed = true - }() + + nodes := trienode.NewNodeSet(t.owner) + t.tracer.markDeletions(nodes) + // Trie is empty and can be classified into two types of situations: - // (a) The trie was empty and no update happens => return nil - // (b) The trie was non-empty and all nodes are dropped => return - // the node set includes all deleted nodes + // - The trie was empty and no update happens + // - The trie was non-empty and all nodes are dropped if t.root == nil { - paths := t.tracer.deletedNodes() - if len(paths) == 0 { - return types.EmptyRootHash, nil, nil // case (a) - } - nodes := trienode.NewNodeSet(t.owner) - for _, path := range paths { - nodes.AddNode([]byte(path), trienode.NewDeleted()) - } - return types.EmptyRootHash, nodes, nil // case (b) + return types.EmptyRootHash, nodes } // Derive the hash for all dirty nodes first. We hold the assumption // in the following procedure that all nodes are hashed. @@ -647,14 +604,10 @@ func (t *Trie) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet, error) // Replace the root node with the origin hash in order to // ensure all resolved nodes are dropped after the commit. t.root = hashedNode - return rootHash, nil, nil - } - nodes := trienode.NewNodeSet(t.owner) - for _, path := range t.tracer.deletedNodes() { - nodes.AddNode([]byte(path), trienode.NewDeleted()) + return rootHash, nil } t.root = newCommitter(nodes, t.tracer, collectLeaf).Commit(t.root) - return rootHash, nodes, nil + return rootHash, nodes } // hashRoot calculates the root hash of the given trie @@ -678,5 +631,4 @@ func (t *Trie) Reset() { t.owner = common.Hash{} t.unhashed = 0 t.tracer.reset() - t.committed = false } diff --git a/trie/trie_reader.go b/trie/trie_reader.go index 64ba0f14b1..1112f9d245 100644 --- a/trie/trie_reader.go +++ b/trie/trie_reader.go @@ -27,24 +27,26 @@ package trie import ( - "github.com/ava-labs/subnet-evm/core/types" + "fmt" + "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" ) // Reader wraps the Node method of a backing trie store. type Reader interface { - // Node retrieves the trie node blob with the provided trie identifier, node path and - // the corresponding node hash. No error will be returned if the node is not found. - // - // When looking up nodes in the account trie, 'owner' is the zero hash. For contract - // storage trie nodes, 'owner' is the hash of the account address that containing the - // storage. - // - // TODO(rjl493456442): remove the 'hash' parameter, it's redundant in PBSS. + // Node retrieves the RLP-encoded trie node blob with the provided trie + // identifier, node path and the corresponding node hash. No error will + // be returned if the node is not found. Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) } +// NodeReader wraps all the necessary functions for accessing trie node. +type NodeReader interface { + // Reader returns a reader for accessing all trie nodes with provided + // state root. Nil is returned in case the state is not available. + Reader(root common.Hash) Reader +} + // trieReader is a wrapper of the underlying node reader. It's not safe // for concurrent usage. type trieReader struct { @@ -54,16 +56,10 @@ type trieReader struct { } // newTrieReader initializes the trie reader with the given node reader. -func newTrieReader(stateRoot, owner common.Hash, db *Database) (*trieReader, error) { - if stateRoot == (common.Hash{}) || stateRoot == types.EmptyRootHash { - if stateRoot == (common.Hash{}) { - log.Error("Zero state root hash!") - } - return &trieReader{owner: owner}, nil - } - reader, err := db.Reader(stateRoot) - if err != nil { - return nil, &MissingNodeError{Owner: owner, NodeHash: stateRoot, err: err} +func newTrieReader(stateRoot, owner common.Hash, db NodeReader) (*trieReader, error) { + reader := db.Reader(stateRoot) + if reader == nil { + return nil, fmt.Errorf("state not found #%x", stateRoot) } return &trieReader{owner: owner, reader: reader}, nil } diff --git a/trie/trie_test.go b/trie/trie_test.go index 82db275e3d..f986f8128a 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -87,9 +87,9 @@ func TestMissingRoot(t *testing.T) { func TestMissingNode(t *testing.T) { testMissingNode(t, false, rawdb.HashScheme) - testMissingNode(t, false, rawdb.PathScheme) + //testMissingNode(t, false, rawdb.PathScheme) testMissingNode(t, true, rawdb.HashScheme) - testMissingNode(t, true, rawdb.PathScheme) + //testMissingNode(t, true, rawdb.PathScheme) } func testMissingNode(t *testing.T, memonly bool, scheme string) { @@ -99,8 +99,8 @@ func testMissingNode(t *testing.T, memonly bool, scheme string) { trie := NewEmpty(triedb) updateString(trie, "120000", "qwerqwerqwerqwerqwerqwerqwerqwer") updateString(trie, "123456", "asdfasdfasdfasdfasdfasdfasdfasdf") - root, nodes, _ := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) if !memonly { require.NoError(t, triedb.Commit(root, false)) @@ -188,7 +188,7 @@ func TestInsert(t *testing.T) { updateString(trie, "A", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") exp = common.HexToHash("d23786fb4a010da3ce639d66d5e904a11dbc02746d1ce25029e53290cabf28ab") - root, _, _ = trie.Commit(false) + root, _ = trie.Commit(false) if root != exp { t.Errorf("case 2: exp %x got %x", exp, root) } @@ -213,8 +213,8 @@ func TestGet(t *testing.T) { if i == 1 { return } - root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) trie, _ = New(TrieID(root), db) } } @@ -285,8 +285,8 @@ func TestReplication(t *testing.T) { for _, val := range vals { updateString(trie, val.k, val.v) } - root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) // create a new trie on top of the database and check that lookups work. trie2, err := New(TrieID(root), db) @@ -298,14 +298,14 @@ func TestReplication(t *testing.T) { t.Errorf("trie2 doesn't have %q => %q", kv.k, kv.v) } } - hash, nodes, _ := trie2.Commit(false) + hash, nodes := trie2.Commit(false) if hash != root { t.Errorf("root failure. expected %x got %x", root, hash) } // recreate the trie after commit if nodes != nil { - db.Update(hash, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(hash, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) } trie2, err = New(TrieID(hash), db) if err != nil { @@ -433,44 +433,44 @@ func verifyAccessList(old *Trie, new *Trie, set *trienode.NodeSet) error { if !ok || n.IsDeleted() { return errors.New("expect new node") } - //if len(n.Prev) > 0 { - // return errors.New("unexpected origin value") - //} + if len(n.Prev) > 0 { + return errors.New("unexpected origin value") + } } // Check deletion set - for path := range deletes { + for path, blob := range deletes { n, ok := set.Nodes[path] if !ok || !n.IsDeleted() { return errors.New("expect deleted node") } - //if len(n.Prev) == 0 { - // return errors.New("expect origin value") - //} - //if !bytes.Equal(n.Prev, blob) { - // return errors.New("invalid origin value") - //} + if len(n.Prev) == 0 { + return errors.New("expect origin value") + } + if !bytes.Equal(n.Prev, blob) { + return errors.New("invalid origin value") + } } // Check update set - for path := range updates { + for path, blob := range updates { n, ok := set.Nodes[path] if !ok || n.IsDeleted() { return errors.New("expect updated node") } - //if len(n.Prev) == 0 { - // return errors.New("expect origin value") - //} - //if !bytes.Equal(n.Prev, blob) { - // return errors.New("invalid origin value") - //} + if len(n.Prev) == 0 { + return errors.New("expect origin value") + } + if !bytes.Equal(n.Prev, blob) { + return errors.New("invalid origin value") + } } return nil } func runRandTest(rt randTest) bool { var scheme = rawdb.HashScheme - if rand.Intn(2) == 0 { - scheme = rawdb.PathScheme - } + //if rand.Intn(2) == 0 { + // scheme = rawdb.PathScheme + //} var ( origin = types.EmptyRootHash triedb = newTestDatabase(rawdb.NewMemoryDatabase(), scheme) @@ -500,7 +500,7 @@ func runRandTest(rt randTest) bool { continue } proofDb := rawdb.NewMemoryDatabase() - err := tr.Prove(step.key, proofDb) + err := tr.Prove(step.key, 0, proofDb) if err != nil { rt[i].err = fmt.Errorf("failed for proving key %#x, %v", step.key, err) } @@ -511,9 +511,9 @@ func runRandTest(rt randTest) bool { case opHash: tr.Hash() case opCommit: - root, nodes, _ := tr.Commit(true) + root, nodes := tr.Commit(true) if nodes != nil { - triedb.Update(root, origin, 0, trienode.NewWithNodeSet(nodes), nil) + triedb.Update(root, origin, trienode.NewWithNodeSet(nodes)) } newtr, err := New(TrieID(root), triedb) if err != nil { @@ -531,7 +531,7 @@ func runRandTest(rt randTest) bool { origin = root case opItercheckhash: checktr := NewEmpty(triedb) - it := NewIterator(tr.MustNodeIterator(nil)) + it := NewIterator(tr.NodeIterator(nil)) for it.Next() { checktr.MustUpdate(it.Key, it.Value) } @@ -540,8 +540,8 @@ func runRandTest(rt randTest) bool { } case opNodeDiff: var ( - origIter = origTrie.MustNodeIterator(nil) - curIter = tr.MustNodeIterator(nil) + origIter = origTrie.NodeIterator(nil) + curIter = tr.NodeIterator(nil) origSeen = make(map[string]struct{}) curSeen = make(map[string]struct{}) ) @@ -727,7 +727,7 @@ func TestTinyTrie(t *testing.T) { t.Errorf("3: got %x, exp %x", root, exp) } checktr := NewEmpty(NewDatabase(rawdb.NewMemoryDatabase())) - it := NewIterator(trie.MustNodeIterator(nil)) + it := NewIterator(trie.NodeIterator(nil)) for it.Next() { checktr.MustUpdate(it.Key, it.Value) } @@ -751,7 +751,7 @@ func TestCommitAfterHash(t *testing.T) { if exp != root { t.Errorf("got %x, exp %x", root, exp) } - root, _, _ = trie.Commit(false) + root, _ = trie.Commit(false) if exp != root { t.Errorf("got %x, exp %x", root, exp) } @@ -854,8 +854,8 @@ func TestCommitSequence(t *testing.T) { trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } // Flush trie -> database - root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) // Flush memdb -> disk (sponge) db.Commit(root, false) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { @@ -895,8 +895,8 @@ func TestCommitSequenceRandomBlobs(t *testing.T) { trie.MustUpdate(key, val) } // Flush trie -> database - root, nodes, _ := trie.Commit(false) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) // Flush memdb -> disk (sponge) db.Commit(root, false) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { @@ -934,9 +934,9 @@ func TestCommitSequenceStackTrie(t *testing.T) { stTrie.Update(key, val) } // Flush trie -> database - root, nodes, _ := trie.Commit(false) + root, nodes := trie.Commit(false) // Flush memdb -> disk (sponge) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) db.Commit(root, false) // And flush stacktrie -> disk stRoot, err := stTrie.Commit() @@ -982,9 +982,9 @@ func TestCommitSequenceSmallRoot(t *testing.T) { trie.Update(key, []byte{0x1}) stTrie.Update(key, []byte{0x1}) // Flush trie -> database - root, nodes, _ := trie.Commit(false) + root, nodes := trie.Commit(false) // Flush memdb -> disk (sponge) - db.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + db.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) db.Commit(root, false) // And flush stacktrie -> disk stRoot, err := stTrie.Commit() @@ -1155,8 +1155,8 @@ func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [] trie.MustUpdate(crypto.Keccak256(addresses[i][:]), accounts[i]) } h := trie.Hash() - root, nodes, _ := trie.Commit(false) - triedb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) + root, nodes := trie.Commit(false) + triedb.Update(root, types.EmptyRootHash, trienode.NewWithNodeSet(nodes)) b.StartTimer() triedb.Dereference(h) b.StopTimer() diff --git a/trie/triedb/hashdb/database.go b/trie/triedb/hashdb/database.go index e25e9c1ea4..cf9a6f2b17 100644 --- a/trie/triedb/hashdb/database.go +++ b/trie/triedb/hashdb/database.go @@ -28,7 +28,6 @@ package hashdb import ( "errors" - "fmt" "reflect" "sync" "time" @@ -37,7 +36,6 @@ import ( "github.com/ava-labs/subnet-evm/core/types" "github.com/ava-labs/subnet-evm/metrics" "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" @@ -45,35 +43,35 @@ import ( ) var ( - memcacheCleanHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil) - memcacheCleanMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil) - memcacheCleanReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil) - memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil) - - memcacheDirtyHitMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil) - memcacheDirtyMissMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil) - memcacheDirtyReadMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil) - memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil) - - memcacheDirtySizeGauge = metrics.NewRegisteredGaugeFloat64("hashdb/memcache/dirty/size", nil) - memcacheDirtyChildSizeGauge = metrics.NewRegisteredGaugeFloat64("hashdb/memcache/dirty/childsize", nil) - memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("hashdb/memcache/dirty/nodes", nil) - - memcacheFlushMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/count", nil) - memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil) - memcacheFlushLockTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/locktime", nil) - memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil) - memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil) - - memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil) - memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil) - memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil) - - memcacheCommitMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/count", nil) - memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil) - memcacheCommitLockTimeTimer = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/locktime", nil) - memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil) - memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil) + memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) + memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) + memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) + memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) + + memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) + memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) + memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) + memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) + + memcacheDirtySizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/size", nil) + memcacheDirtyChildSizeGauge = metrics.NewRegisteredGaugeFloat64("trie/memcache/dirty/childsize", nil) + memcacheDirtyNodesGauge = metrics.NewRegisteredGauge("trie/memcache/dirty/nodes", nil) + + memcacheFlushMeter = metrics.NewRegisteredMeter("trie/memcache/flush/count", nil) + memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) + memcacheFlushLockTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/locktime", nil) + memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) + memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) + + memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) + memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) + memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) + + memcacheCommitMeter = metrics.NewRegisteredMeter("trie/memcache/commit/count", nil) + memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) + memcacheCommitLockTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/locktime", nil) + memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) + memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) ) // ChildResolver defines the required method to decode the provided @@ -86,7 +84,6 @@ type cache interface { HasGet([]byte, []byte) ([]byte, bool) Del([]byte) Set([]byte, []byte) - SaveToFileConcurrent(string, int) error } // Database is an intermediate write layer between the trie data structures and @@ -260,7 +257,6 @@ func (db *Database) Reference(child common.Hash, parent common.Hash) { db.reference(child, parent) } -// reference is the private locked version of Reference. func (db *Database) reference(child common.Hash, parent common.Hash) { // If the node does not exist, it's a node pulled from disk, skip node, ok := db.dirties[child] @@ -307,7 +303,7 @@ func (db *Database) Dereference(root common.Hash) { memcacheDirtyNodesGauge.Update(int64(len(db.dirties))) memcacheGCTimeTimer.Update(time.Since(start)) - memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), @@ -464,7 +460,7 @@ func (db *Database) Cap(limit common.StorageSize) error { memcacheFlushMeter.Mark(1) memcacheFlushTimeTimer.Update(time.Since(start)) memcacheFlushLockTimeTimer.Update(lockTime + time.Since(lockStart)) - memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), @@ -515,7 +511,7 @@ func (db *Database) Commit(node common.Hash, report bool) error { memcacheCommitMeter.Mark(1) memcacheCommitTimeTimer.Update(time.Since(start)) memcacheCommitLockTimeTimer.Update(lockTime + time.Since(lockStart)) - memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize)) + memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) logger := log.Info @@ -613,7 +609,7 @@ func (db *Database) Initialized(genesisRoot common.Hash) bool { // Update inserts the dirty nodes in provided nodeset into database and link the // account trie with multiple storage tries if necessary. -func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { +func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { // Ensure the parent state is present and signal a warning if not. if parent != types.EmptyRootHash { if blob, _ := db.Node(parent); len(blob) == 0 { @@ -629,7 +625,7 @@ func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, n // UpdateAndReferenceRoot inserts the dirty nodes in provided nodeset into // database and links the account trie with multiple storage tries if necessary, // then adds a reference [from] root to the metaroot while holding the db's lock. -func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { +func (db *Database) UpdateAndReferenceRoot(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error { // Ensure the parent state is present and signal a warning if not. if parent != types.EmptyRootHash { if blob, _ := db.Node(parent); len(blob) == 0 { @@ -710,12 +706,8 @@ func (db *Database) Scheme() string { } // Reader retrieves a node reader belonging to the given state root. -// An error will be returned if the requested state is not available. -func (db *Database) Reader(root common.Hash) (*reader, error) { - if _, err := db.Node(root); err != nil { - return nil, fmt.Errorf("state %#x is not available, %v", root, err) - } - return &reader{db: db}, nil +func (db *Database) Reader(root common.Hash) *reader { + return &reader{db: db} } // reader is a state reader of Database which implements the Reader interface. diff --git a/trie/triedb/pathdb/database.go b/trie/triedb/pathdb/database.go deleted file mode 100644 index 17f27e3a25..0000000000 --- a/trie/triedb/pathdb/database.go +++ /dev/null @@ -1,373 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package pathdb - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/params" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" -) - -// maxDiffLayers is the maximum diff layers allowed in the layer tree. -const maxDiffLayers = 128 - -// layer is the interface implemented by all state layers which includes some -// public methods and some additional methods for internal usage. -type layer interface { - // Node retrieves the trie node with the node info. An error will be returned - // if the read operation exits abnormally. For example, if the layer is already - // stale, or the associated state is regarded as corrupted. Notably, no error - // will be returned if the requested node is not found in database. - Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) - - // rootHash returns the root hash for which this layer was made. - rootHash() common.Hash - - // stateID returns the associated state id of layer. - stateID() uint64 - - // parentLayer returns the subsequent layer of it, or nil if the disk was reached. - parentLayer() layer - - // update creates a new layer on top of the existing layer diff tree with - // the provided dirty trie nodes along with the state change set. - // - // Note, the maps are retained by the method to avoid copying everything. - update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer - - // journal commits an entire diff hierarchy to disk into a single journal entry. - // This is meant to be used during shutdown to persist the layer without - // flattening everything down (bad for reorgs). - journal(w io.Writer) error -} - -// Config contains the settings for database. -type Config struct { - StateLimit uint64 // Number of recent blocks to maintain state history for - CleanSize int // Maximum memory allowance (in bytes) for caching clean nodes - DirtySize int // Maximum memory allowance (in bytes) for caching dirty nodes - ReadOnly bool // Flag whether the database is opened in read only mode. -} - -var ( - // defaultCleanSize is the default memory allowance of clean cache. - defaultCleanSize = 16 * 1024 * 1024 - - // defaultBufferSize is the default memory allowance of node buffer - // that aggregates the writes from above until it's flushed into the - // disk. Do not increase the buffer size arbitrarily, otherwise the - // system pause time will increase when the database writes happen. - defaultBufferSize = 128 * 1024 * 1024 -) - -// Defaults contains default settings for Ethereum mainnet. -var Defaults = &Config{ - StateLimit: params.FullImmutabilityThreshold, - CleanSize: defaultCleanSize, - DirtySize: defaultBufferSize, -} - -// Database is a multiple-layered structure for maintaining in-memory trie nodes. -// It consists of one persistent base layer backed by a key-value store, on top -// of which arbitrarily many in-memory diff layers are stacked. The memory diffs -// can form a tree with branching, but the disk layer is singleton and common to -// all. If a reorg goes deeper than the disk layer, a batch of reverse diffs can -// be applied to rollback. The deepest reorg that can be handled depends on the -// amount of state histories tracked in the disk. -// -// At most one readable and writable database can be opened at the same time in -// the whole system which ensures that only one database writer can operate disk -// state. Unexpected open operations can cause the system to panic. -type Database struct { - // readOnly is the flag whether the mutation is allowed to be applied. - // It will be set automatically when the database is journaled during - // the shutdown to reject all following unexpected mutations. - readOnly bool // Indicator if database is opened in read only mode - bufferSize int // Memory allowance (in bytes) for caching dirty nodes - config *Config // Configuration for database - diskdb ethdb.Database // Persistent storage for matured trie nodes - tree *layerTree // The group for all known layers - lock sync.RWMutex // Lock to prevent mutations from happening at the same time - - // NOTE(freezer): This is disabled since we do not have a freezer. - // freezer *rawdb.ResettableFreezer // Freezer for storing trie histories, nil possible in tests -} - -// New attempts to load an already existing layer from a persistent key-value -// store (with a number of memory layers from a journal). If the journal is not -// matched with the base persistent layer, all the recorded diff layers are discarded. -func New(diskdb ethdb.Database, config *Config) *Database { - if config == nil { - config = Defaults - } - db := &Database{ - readOnly: config.ReadOnly, - bufferSize: config.DirtySize, - config: config, - diskdb: diskdb, - } - // Construct the layer tree by resolving the in-disk singleton state - // and in-memory layer journal. - db.tree = newLayerTree(db.loadLayers()) - - // NOTE(freezer): This is disabled since we do not have a freezer. - // Open the freezer for state history if the passed database contains an - // ancient store. Otherwise, all the relevant functionalities are disabled. - // - // Because the freezer can only be opened once at the same time, this - // mechanism also ensures that at most one **non-readOnly** database - // is opened at the same time to prevent accidental mutation. - //if ancient, err := diskdb.AncientDatadir(); err == nil && ancient != "" && !db.readOnly { - // freezer, err := rawdb.NewStateHistoryFreezer(ancient, false) - // if err != nil { - // log.Crit("Failed to open state history freezer", "err", err) - // } - // db.freezer = freezer - - // // Truncate the extra state histories above in freezer in case - // // it's not aligned with the disk layer. - // pruned, err := truncateFromHead(db.diskdb, freezer, db.tree.bottom().stateID()) - // if err != nil { - // log.Crit("Failed to truncate extra state histories", "err", err) - // } - // if pruned != 0 { - // log.Warn("Truncated extra state histories", "number", pruned) - // } - //} - log.Warn("Path-based state scheme is an experimental feature") - return db -} - -// Reader retrieves a layer belonging to the given state root. -func (db *Database) Reader(root common.Hash) (layer, error) { - l := db.tree.get(root) - if l == nil { - return nil, fmt.Errorf("state %#x is not available", root) - } - return l, nil -} - -// Update adds a new layer into the tree, if that can be linked to an existing -// old parent. It is disallowed to insert a disk layer (the origin of all). Apart -// from that this function will flatten the extra diff layers at bottom into disk -// to only keep 128 diff layers in memory by default. -// -// The passed in maps(nodes, states) will be retained to avoid copying everything. -// Therefore, these maps must not be changed afterwards. -func (db *Database) Update(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { - // Hold the lock to prevent concurrent mutations. - db.lock.Lock() - defer db.lock.Unlock() - - // Short circuit if the database is in read only mode. - if db.readOnly { - return errSnapshotReadOnly - } - if err := db.tree.add(root, parentRoot, block, nodes, states); err != nil { - return err - } - // Keep 128 diff layers in the memory, persistent layer is 129th. - // - head layer is paired with HEAD state - // - head-1 layer is paired with HEAD-1 state - // - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state - // - head-128 layer(disk layer) is paired with HEAD-128 state - return db.tree.cap(root, maxDiffLayers) -} - -// Commit traverses downwards the layer tree from a specified layer with the -// provided state root and all the layers below are flattened downwards. It -// can be used alone and mostly for test purposes. -func (db *Database) Commit(root common.Hash, report bool) error { - // Hold the lock to prevent concurrent mutations. - db.lock.Lock() - defer db.lock.Unlock() - - // Short circuit if the database is in read only mode. - if db.readOnly { - return errSnapshotReadOnly - } - return db.tree.cap(root, 0) -} - -// Reset rebuilds the database with the specified state as the base. -// -// - if target state is empty, clear the stored state and all layers on top -// - if target state is non-empty, ensure the stored state matches with it -// and clear all other layers on top. -func (db *Database) Reset(root common.Hash) error { - db.lock.Lock() - defer db.lock.Unlock() - - // Short circuit if the database is in read only mode. - if db.readOnly { - return errSnapshotReadOnly - } - batch := db.diskdb.NewBatch() - root = types.TrieRootHash(root) - if root == types.EmptyRootHash { - // Empty state is requested as the target, nuke out - // the root node and leave all others as dangling. - rawdb.DeleteAccountTrieNode(batch, nil) - } else { - // Ensure the requested state is existent before any - // action is applied. - _, hash := rawdb.ReadAccountTrieNode(db.diskdb, nil) - if hash != root { - return fmt.Errorf("state is mismatched, local: %x, target: %x", hash, root) - } - } - // Mark the disk layer as stale before applying any mutation. - db.tree.bottom().markStale() - - // Drop the stale state journal in persistent database and - // reset the persistent state id back to zero. - rawdb.DeleteTrieJournal(batch) - rawdb.WritePersistentStateID(batch, 0) - if err := batch.Write(); err != nil { - return err - } - // NOTE(freezer): This is disabled since we do not have a freezer. - // Clean up all state histories in freezer. Theoretically - // all root->id mappings should be removed as well. Since - // mappings can be huge and might take a while to clear - // them, just leave them in disk and wait for overwriting. - // if db.freezer != nil { - // if err := db.freezer.Reset(); err != nil { - // return err - // } - // } - // Re-construct a new disk layer backed by persistent state - // with **empty clean cache and node buffer**. - dl := newDiskLayer(root, 0, db, nil, newNodeBuffer(db.bufferSize, nil, 0)) - db.tree.reset(dl) - log.Info("Rebuilt trie database", "root", root) - return nil -} - -// Recover rollbacks the database to a specified historical point. -// The state is supported as the rollback destination only if it's -// canonical state and the corresponding trie histories are existent. -func (db *Database) Recover(root common.Hash, loader triestate.TrieLoader) error { - // NOTE(freezer): This is disabled since we do not have a freezer. - return errors.New("state rollback is non-supported") -} - -// Recoverable returns the indicator if the specified state is recoverable. -func (db *Database) Recoverable(root common.Hash) bool { - // Ensure the requested state is a known state. - root = types.TrieRootHash(root) - id := rawdb.ReadStateID(db.diskdb, root) - if id == nil { - return false - } - // Recoverable state must below the disk layer. The recoverable - // state only refers the state that is currently not available, - // but can be restored by applying state history. - dl := db.tree.bottom() - if *id >= dl.stateID() { - return false - } - return false - // NOTE(freezer): This is disabled since we do not have a freezer. - // Ensure the requested state is a canonical state and all state - // histories in range [id+1, disklayer.ID] are present and complete. - // parent := root - // return checkHistories(db.freezer, *id+1, dl.stateID()-*id, func(m *meta) error { - // if m.parent != parent { - // return errors.New("unexpected state history") - // } - // if len(m.incomplete) > 0 { - // return errors.New("incomplete state history") - // } - // parent = m.root - // return nil - // }) == nil -} - -// Close closes the trie database and the held freezer. -func (db *Database) Close() error { - db.lock.Lock() - defer db.lock.Unlock() - - db.readOnly = true - return nil - // NOTE(freezer): This is disabled since we do not have a freezer. - // if db.freezer == nil { - // return nil - // } - // return db.freezer.Close() -} - -// Size returns the current storage size of the memory cache in front of the -// persistent database layer. -func (db *Database) Size() (size common.StorageSize) { - db.tree.forEach(func(layer layer) { - if diff, ok := layer.(*diffLayer); ok { - size += common.StorageSize(diff.memory) - } - if disk, ok := layer.(*diskLayer); ok { - size += disk.size() - } - }) - return size -} - -// Initialized returns an indicator if the state data is already -// initialized in path-based scheme. -func (db *Database) Initialized(genesisRoot common.Hash) bool { - var inited bool - db.tree.forEach(func(layer layer) { - if layer.rootHash() != types.EmptyRootHash { - inited = true - } - }) - return inited -} - -// SetBufferSize sets the node buffer size to the provided value(in bytes). -func (db *Database) SetBufferSize(size int) error { - db.lock.Lock() - defer db.lock.Unlock() - - db.bufferSize = size - return db.tree.bottom().setBufferSize(db.bufferSize) -} - -// Scheme returns the node scheme used in the database. -func (db *Database) Scheme() string { - return rawdb.PathScheme -} diff --git a/trie/triedb/pathdb/database_test.go b/trie/triedb/pathdb/database_test.go deleted file mode 100644 index 2948e22140..0000000000 --- a/trie/triedb/pathdb/database_test.go +++ /dev/null @@ -1,563 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package pathdb - -import ( - "bytes" - "errors" - "fmt" - "math/big" - "math/rand" - "testing" - - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie/testutil" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" - "github.com/stretchr/testify/require" -) - -func updateTrie(addrHash common.Hash, root common.Hash, dirties, cleans map[common.Hash][]byte) (common.Hash, *trienode.NodeSet) { - h, err := newTestHasher(addrHash, root, cleans) - if err != nil { - panic(fmt.Errorf("failed to create hasher, err: %w", err)) - } - for key, val := range dirties { - if len(val) == 0 { - h.Delete(key.Bytes()) - } else { - h.Update(key.Bytes(), val) - } - } - return h.Commit(false) -} - -func generateAccount(storageRoot common.Hash) types.StateAccount { - return types.StateAccount{ - Nonce: uint64(rand.Intn(100)), - Balance: big.NewInt(rand.Int63()), - CodeHash: testutil.RandBytes(32), - Root: storageRoot, - } -} - -const ( - createAccountOp int = iota - modifyAccountOp - deleteAccountOp - opLen -) - -type genctx struct { - accounts map[common.Hash][]byte - storages map[common.Hash]map[common.Hash][]byte - accountOrigin map[common.Address][]byte - storageOrigin map[common.Address]map[common.Hash][]byte - nodes *trienode.MergedNodeSet -} - -func newCtx() *genctx { - return &genctx{ - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - accountOrigin: make(map[common.Address][]byte), - storageOrigin: make(map[common.Address]map[common.Hash][]byte), - nodes: trienode.NewMergedNodeSet(), - } -} - -type tester struct { - db *Database - roots []common.Hash - preimages map[common.Hash]common.Address - accounts map[common.Hash][]byte - storages map[common.Hash]map[common.Hash][]byte - - // state snapshots - snapAccounts map[common.Hash]map[common.Hash][]byte - snapStorages map[common.Hash]map[common.Hash]map[common.Hash][]byte -} - -func newTester(t *testing.T) *tester { - var ( - // NOTE(freezer): This is disabled since we do not have a freezer. - // disk, _ = rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), t.TempDir(), "", false) - disk = rawdb.NewMemoryDatabase() - db = New(disk, &Config{CleanSize: 256 * 1024, DirtySize: 256 * 1024}) - obj = &tester{ - db: db, - preimages: make(map[common.Hash]common.Address), - accounts: make(map[common.Hash][]byte), - storages: make(map[common.Hash]map[common.Hash][]byte), - snapAccounts: make(map[common.Hash]map[common.Hash][]byte), - snapStorages: make(map[common.Hash]map[common.Hash]map[common.Hash][]byte), - } - ) - for i := 0; i < 2*128; i++ { - var parent = types.EmptyRootHash - if len(obj.roots) != 0 { - parent = obj.roots[len(obj.roots)-1] - } - root, nodes, states := obj.generate(parent) - if err := db.Update(root, parent, uint64(i), nodes, states); err != nil { - panic(fmt.Errorf("failed to update state changes, err: %w", err)) - } - obj.roots = append(obj.roots, root) - } - return obj -} - -func (t *tester) release() { - t.db.Close() - t.db.diskdb.Close() -} - -func (t *tester) randAccount() (common.Address, []byte) { - for addrHash, account := range t.accounts { - return t.preimages[addrHash], account - } - return common.Address{}, nil -} - -func (t *tester) generateStorage(ctx *genctx, addr common.Address) common.Hash { - var ( - addrHash = crypto.Keccak256Hash(addr.Bytes()) - storage = make(map[common.Hash][]byte) - origin = make(map[common.Hash][]byte) - ) - for i := 0; i < 10; i++ { - v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) - hash := testutil.RandomHash() - - storage[hash] = v - origin[hash] = nil - } - root, set := updateTrie(addrHash, types.EmptyRootHash, storage, nil) - - ctx.storages[addrHash] = storage - ctx.storageOrigin[addr] = origin - ctx.nodes.Merge(set) - return root -} - -func (t *tester) mutateStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash { - var ( - addrHash = crypto.Keccak256Hash(addr.Bytes()) - storage = make(map[common.Hash][]byte) - origin = make(map[common.Hash][]byte) - ) - for hash, val := range t.storages[addrHash] { - origin[hash] = val - storage[hash] = nil - - if len(origin) == 3 { - break - } - } - for i := 0; i < 3; i++ { - v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) - hash := testutil.RandomHash() - - storage[hash] = v - origin[hash] = nil - } - root, set := updateTrie(crypto.Keccak256Hash(addr.Bytes()), root, storage, t.storages[addrHash]) - - ctx.storages[addrHash] = storage - ctx.storageOrigin[addr] = origin - ctx.nodes.Merge(set) - return root -} - -func (t *tester) clearStorage(ctx *genctx, addr common.Address, root common.Hash) common.Hash { - var ( - addrHash = crypto.Keccak256Hash(addr.Bytes()) - storage = make(map[common.Hash][]byte) - origin = make(map[common.Hash][]byte) - ) - for hash, val := range t.storages[addrHash] { - origin[hash] = val - storage[hash] = nil - } - root, set := updateTrie(addrHash, root, storage, t.storages[addrHash]) - if root != types.EmptyRootHash { - panic("failed to clear storage trie") - } - ctx.storages[addrHash] = storage - ctx.storageOrigin[addr] = origin - ctx.nodes.Merge(set) - return root -} - -func (t *tester) generate(parent common.Hash) (common.Hash, *trienode.MergedNodeSet, *triestate.Set) { - var ( - ctx = newCtx() - dirties = make(map[common.Hash]struct{}) - ) - for i := 0; i < 20; i++ { - switch rand.Intn(opLen) { - case createAccountOp: - // account creation - addr := testutil.RandomAddress() - addrHash := crypto.Keccak256Hash(addr.Bytes()) - if _, ok := t.accounts[addrHash]; ok { - continue - } - if _, ok := dirties[addrHash]; ok { - continue - } - dirties[addrHash] = struct{}{} - - root := t.generateStorage(ctx, addr) - ctx.accounts[addrHash] = types.SlimAccountRLP(generateAccount(root)) - ctx.accountOrigin[addr] = nil - t.preimages[addrHash] = addr - - case modifyAccountOp: - // account mutation - addr, account := t.randAccount() - if addr == (common.Address{}) { - continue - } - addrHash := crypto.Keccak256Hash(addr.Bytes()) - if _, ok := dirties[addrHash]; ok { - continue - } - dirties[addrHash] = struct{}{} - - acct, _ := types.FullAccount(account) - stRoot := t.mutateStorage(ctx, addr, acct.Root) - newAccount := types.SlimAccountRLP(generateAccount(stRoot)) - - ctx.accounts[addrHash] = newAccount - ctx.accountOrigin[addr] = account - - case deleteAccountOp: - // account deletion - addr, account := t.randAccount() - if addr == (common.Address{}) { - continue - } - addrHash := crypto.Keccak256Hash(addr.Bytes()) - if _, ok := dirties[addrHash]; ok { - continue - } - dirties[addrHash] = struct{}{} - - acct, _ := types.FullAccount(account) - if acct.Root != types.EmptyRootHash { - t.clearStorage(ctx, addr, acct.Root) - } - ctx.accounts[addrHash] = nil - ctx.accountOrigin[addr] = account - } - } - root, set := updateTrie(common.Hash{}, parent, ctx.accounts, t.accounts) - ctx.nodes.Merge(set) - - // Save state snapshot before commit - t.snapAccounts[parent] = copyAccounts(t.accounts) - t.snapStorages[parent] = copyStorages(t.storages) - - // Commit all changes to live state set - for addrHash, account := range ctx.accounts { - if len(account) == 0 { - delete(t.accounts, addrHash) - } else { - t.accounts[addrHash] = account - } - } - for addrHash, slots := range ctx.storages { - if _, ok := t.storages[addrHash]; !ok { - t.storages[addrHash] = make(map[common.Hash][]byte) - } - for sHash, slot := range slots { - if len(slot) == 0 { - delete(t.storages[addrHash], sHash) - } else { - t.storages[addrHash][sHash] = slot - } - } - } - return root, ctx.nodes, triestate.New(ctx.accountOrigin, ctx.storageOrigin, nil) -} - -// lastRoot returns the latest root hash, or empty if nothing is cached. -func (t *tester) lastHash() common.Hash { - if len(t.roots) == 0 { - return common.Hash{} - } - return t.roots[len(t.roots)-1] -} - -func (t *tester) verifyState(root common.Hash) error { - reader, err := t.db.Reader(root) - if err != nil { - return err - } - _, err = reader.Node(common.Hash{}, nil, root) - if err != nil { - return errors.New("root node is not available") - } - for addrHash, account := range t.snapAccounts[root] { - blob, err := reader.Node(common.Hash{}, addrHash.Bytes(), crypto.Keccak256Hash(account)) - if err != nil || !bytes.Equal(blob, account) { - return fmt.Errorf("account is mismatched: %w", err) - } - } - for addrHash, slots := range t.snapStorages[root] { - for hash, slot := range slots { - blob, err := reader.Node(addrHash, hash.Bytes(), crypto.Keccak256Hash(slot)) - if err != nil || !bytes.Equal(blob, slot) { - return fmt.Errorf("slot is mismatched: %w", err) - } - } - } - return nil -} - -// bottomIndex returns the index of current disk layer. -func (t *tester) bottomIndex() int { - bottom := t.db.tree.bottom() - for i := 0; i < len(t.roots); i++ { - if t.roots[i] == bottom.rootHash() { - return i - } - } - return -1 -} - -func TestDatabaseRollback(t *testing.T) { - // Verify state histories - tester := newTester(t) - defer tester.release() - - // NOTE(freezer): This is disabled since we do not have a freezer. - // if err := tester.verifyHistory(); err != nil { - // t.Fatalf("Invalid state history, err: %v", err) - // } - // Revert database from top to bottom - for i := tester.bottomIndex(); i >= 0; i-- { - root := tester.roots[i] - parent := types.EmptyRootHash - if i > 0 { - parent = tester.roots[i-1] - } - loader := newHashLoader(tester.snapAccounts[root], tester.snapStorages[root]) - // NOTE(freezer): This is disabled since we do not have a freezer. - // if err := tester.db.Recover(parent, loader); err != nil { - // t.Fatalf("Failed to revert db, err: %v", err) - // } - require.ErrorContains(t, tester.db.Recover(parent, loader), "state rollback is non-supported") - tester.verifyState(parent) - } - // NOTE(freezer): This is disabled since we do not have a freezer. - // if tester.db.tree.len() != 1 { - // t.Fatal("Only disk layer is expected") - // } -} - -func TestDatabaseRecoverable(t *testing.T) { - var ( - tester = newTester(t) - index = tester.bottomIndex() - ) - defer tester.release() - - var cases = []struct { - root common.Hash - expect bool - }{ - // Unknown state should be unrecoverable - {common.Hash{0x1}, false}, - - // Initial state should be recoverable - {types.EmptyRootHash, true}, - - // Initial state should be recoverable - {common.Hash{}, true}, - - // Layers below current disk layer are recoverable - {tester.roots[index-1], true}, - - // Disklayer itself is not recoverable, since it's - // available for accessing. - {tester.roots[index], false}, - - // Layers above current disk layer are not recoverable - // since they are available for accessing. - {tester.roots[index+1], false}, - } - for i, c := range cases { - result := tester.db.Recoverable(c.root) - // NOTE(freezer): This is disabled since we do not have a freezer. - // originally was `result != c.expect` - if result != false { - t.Fatalf("case: %d, unexpected result, want %t, got %t", i, c.expect, result) - } - } -} - -func TestReset(t *testing.T) { - var ( - tester = newTester(t) - // index = tester.bottomIndex() - ) - defer tester.release() - - // Reset database to unknown target, should reject it - if err := tester.db.Reset(testutil.RandomHash()); err == nil { - t.Fatal("Failed to reject invalid reset") - } - // Reset database to state persisted in the disk - if err := tester.db.Reset(types.EmptyRootHash); err != nil { - t.Fatalf("Failed to reset database %v", err) - } - // Ensure journal is deleted from disk - if blob := rawdb.ReadTrieJournal(tester.db.diskdb); len(blob) != 0 { - t.Fatal("Failed to clean journal") - } - // NOTE(freezer): This is disabled since we do not have a freezer. - // Ensure all trie histories are removed - // for i := 0; i <= index; i++ { - // _, err := readHistory(tester.db.freezer, uint64(i+1)) - // if err == nil { - // t.Fatalf("Failed to clean state history, index %d", i+1) - // } - // } - // Verify layer tree structure, single disk layer is expected - if tester.db.tree.len() != 1 { - t.Fatalf("Extra layer kept %d", tester.db.tree.len()) - } - if tester.db.tree.bottom().rootHash() != types.EmptyRootHash { - t.Fatalf("Root hash is not matched exp %x got %x", types.EmptyRootHash, tester.db.tree.bottom().rootHash()) - } -} - -func TestCommit(t *testing.T) { - tester := newTester(t) - defer tester.release() - - if err := tester.db.Commit(tester.lastHash(), false); err != nil { - t.Fatalf("Failed to cap database, err: %v", err) - } - // Verify layer tree structure, single disk layer is expected - if tester.db.tree.len() != 1 { - t.Fatal("Layer tree structure is invalid") - } - if tester.db.tree.bottom().rootHash() != tester.lastHash() { - t.Fatal("Layer tree structure is invalid") - } - // Verify states - if err := tester.verifyState(tester.lastHash()); err != nil { - t.Fatalf("State is invalid, err: %v", err) - } - // NOTE(freezer): This is disabled since we do not have a freezer. - // Verify state histories - // if err := tester.verifyHistory(); err != nil { - // t.Fatalf("State history is invalid, err: %v", err) - // } -} - -func TestJournal(t *testing.T) { - tester := newTester(t) - defer tester.release() - - if err := tester.db.Journal(tester.lastHash()); err != nil { - t.Errorf("Failed to journal, err: %v", err) - } - tester.db.Close() - tester.db = New(tester.db.diskdb, nil) - - // Verify states including disk layer and all diff on top. - for i := 0; i < len(tester.roots); i++ { - if i >= tester.bottomIndex() { - if err := tester.verifyState(tester.roots[i]); err != nil { - t.Fatalf("Invalid state, err: %v", err) - } - continue - } - if err := tester.verifyState(tester.roots[i]); err == nil { - t.Fatal("Unexpected state") - } - } -} - -func TestCorruptedJournal(t *testing.T) { - tester := newTester(t) - defer tester.release() - - if err := tester.db.Journal(tester.lastHash()); err != nil { - t.Errorf("Failed to journal, err: %v", err) - } - tester.db.Close() - _, root := rawdb.ReadAccountTrieNode(tester.db.diskdb, nil) - - // Mutate the journal in disk, it should be regarded as invalid - blob := rawdb.ReadTrieJournal(tester.db.diskdb) - blob[0] = 1 - rawdb.WriteTrieJournal(tester.db.diskdb, blob) - - // Verify states, all not-yet-written states should be discarded - tester.db = New(tester.db.diskdb, nil) - for i := 0; i < len(tester.roots); i++ { - if tester.roots[i] == root { - if err := tester.verifyState(root); err != nil { - t.Fatalf("Disk state is corrupted, err: %v", err) - } - continue - } - if err := tester.verifyState(tester.roots[i]); err == nil { - t.Fatal("Unexpected state") - } - } -} - -// copyAccounts returns a deep-copied account set of the provided one. -func copyAccounts(set map[common.Hash][]byte) map[common.Hash][]byte { - copied := make(map[common.Hash][]byte, len(set)) - for key, val := range set { - copied[key] = common.CopyBytes(val) - } - return copied -} - -// copyStorages returns a deep-copied storage set of the provided one. -func copyStorages(set map[common.Hash]map[common.Hash][]byte) map[common.Hash]map[common.Hash][]byte { - copied := make(map[common.Hash]map[common.Hash][]byte, len(set)) - for addrHash, subset := range set { - copied[addrHash] = make(map[common.Hash][]byte, len(subset)) - for key, val := range subset { - copied[addrHash][key] = common.CopyBytes(val) - } - } - return copied -} diff --git a/trie/triedb/pathdb/difflayer.go b/trie/triedb/pathdb/difflayer.go deleted file mode 100644 index 52f08bf46a..0000000000 --- a/trie/triedb/pathdb/difflayer.go +++ /dev/null @@ -1,184 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package pathdb - -import ( - "fmt" - "sync" - - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" -) - -// diffLayer represents a collection of modifications made to the in-memory tries -// along with associated state changes after running a block on top. -// -// The goal of a diff layer is to act as a journal, tracking recent modifications -// made to the state, that have not yet graduated into a semi-immutable state. -type diffLayer struct { - // Immutables - root common.Hash // Root hash to which this layer diff belongs to - id uint64 // Corresponding state id - block uint64 // Associated block number - nodes map[common.Hash]map[string]*trienode.Node // Cached trie nodes indexed by owner and path - states *triestate.Set // Associated state change set for building history - memory uint64 // Approximate guess as to how much memory we use - - parent layer // Parent layer modified by this one, never nil, **can be changed** - lock sync.RWMutex // Lock used to protect parent -} - -// newDiffLayer creates a new diff layer on top of an existing layer. -func newDiffLayer(parent layer, root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { - var ( - size int64 - count int - ) - dl := &diffLayer{ - root: root, - id: id, - block: block, - nodes: nodes, - states: states, - parent: parent, - } - for _, subset := range nodes { - for path, n := range subset { - dl.memory += uint64(n.Size() + len(path)) - size += int64(len(n.Blob) + len(path)) - } - count += len(subset) - } - if states != nil { - dl.memory += uint64(states.Size()) - } - dirtyWriteMeter.Mark(size) - diffLayerNodesMeter.Mark(int64(count)) - diffLayerBytesMeter.Mark(int64(dl.memory)) - log.Debug("Created new diff layer", "id", id, "block", block, "nodes", count, "size", common.StorageSize(dl.memory)) - return dl -} - -// rootHash implements the layer interface, returning the root hash of -// corresponding state. -func (dl *diffLayer) rootHash() common.Hash { - return dl.root -} - -// stateID implements the layer interface, returning the state id of the layer. -func (dl *diffLayer) stateID() uint64 { - return dl.id -} - -// parentLayer implements the layer interface, returning the subsequent -// layer of the diff layer. -func (dl *diffLayer) parentLayer() layer { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.parent -} - -// node retrieves the node with provided node information. It's the internal -// version of Node function with additional accessed layer tracked. No error -// will be returned if node is not found. -func (dl *diffLayer) node(owner common.Hash, path []byte, hash common.Hash, depth int) ([]byte, error) { - // Hold the lock, ensure the parent won't be changed during the - // state accessing. - dl.lock.RLock() - defer dl.lock.RUnlock() - - // If the trie node is known locally, return it - subset, ok := dl.nodes[owner] - if ok { - n, ok := subset[string(path)] - if ok { - // If the trie node is not hash matched, or marked as removed, - // bubble up an error here. It shouldn't happen at all. - if n.Hash != hash { - dirtyFalseMeter.Mark(1) - log.Error("Unexpected trie node in diff layer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) - return nil, newUnexpectedNodeError("diff", hash, n.Hash, owner, path) - } - dirtyHitMeter.Mark(1) - dirtyNodeHitDepthHist.Update(int64(depth)) - dirtyReadMeter.Mark(int64(len(n.Blob))) - return n.Blob, nil - } - } - // Trie node unknown to this layer, resolve from parent - if diff, ok := dl.parent.(*diffLayer); ok { - return diff.node(owner, path, hash, depth+1) - } - // Failed to resolve through diff layers, fallback to disk layer - return dl.parent.Node(owner, path, hash) -} - -// Node implements the layer interface, retrieving the trie node blob with the -// provided node information. No error will be returned if the node is not found. -func (dl *diffLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { - return dl.node(owner, path, hash, 0) -} - -// update implements the layer interface, creating a new layer on top of the -// existing layer tree with the specified data items. -func (dl *diffLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { - return newDiffLayer(dl, root, id, block, nodes, states) -} - -// persist flushes the diff layer and all its parent layers to disk layer. -func (dl *diffLayer) persist(force bool) (layer, error) { - if parent, ok := dl.parentLayer().(*diffLayer); ok { - // Hold the lock to prevent any read operation until the new - // parent is linked correctly. - dl.lock.Lock() - - // The merging of diff layers starts at the bottom-most layer, - // therefore we recurse down here, flattening on the way up - // (diffToDisk). - result, err := parent.persist(force) - if err != nil { - dl.lock.Unlock() - return nil, err - } - dl.parent = result - dl.lock.Unlock() - } - return diffToDisk(dl, force) -} - -// diffToDisk merges a bottom-most diff into the persistent disk layer underneath -// it. The method will panic if called onto a non-bottom-most diff layer. -func diffToDisk(layer *diffLayer, force bool) (layer, error) { - disk, ok := layer.parentLayer().(*diskLayer) - if !ok { - panic(fmt.Sprintf("unknown layer type: %T", layer.parentLayer())) - } - return disk.commit(layer, force) -} diff --git a/trie/triedb/pathdb/difflayer_test.go b/trie/triedb/pathdb/difflayer_test.go deleted file mode 100644 index 5520a5779e..0000000000 --- a/trie/triedb/pathdb/difflayer_test.go +++ /dev/null @@ -1,180 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package pathdb - -import ( - "bytes" - "testing" - - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/trie/testutil" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ethereum/go-ethereum/common" -) - -func emptyLayer() *diskLayer { - return &diskLayer{ - db: New(rawdb.NewMemoryDatabase(), nil), - buffer: newNodeBuffer(defaultBufferSize, nil, 0), - } -} - -// goos: darwin -// goarch: arm64 -// pkg: github.com/ava-labs/subnet-evm/trie -// BenchmarkSearch128Layers -// BenchmarkSearch128Layers-8 243826 4755 ns/op -func BenchmarkSearch128Layers(b *testing.B) { benchmarkSearch(b, 0, 128) } - -// goos: darwin -// goarch: arm64 -// pkg: github.com/ava-labs/subnet-evm/trie -// BenchmarkSearch512Layers -// BenchmarkSearch512Layers-8 49686 24256 ns/op -func BenchmarkSearch512Layers(b *testing.B) { benchmarkSearch(b, 0, 512) } - -// goos: darwin -// goarch: arm64 -// pkg: github.com/ava-labs/subnet-evm/trie -// BenchmarkSearch1Layer -// BenchmarkSearch1Layer-8 14062725 88.40 ns/op -func BenchmarkSearch1Layer(b *testing.B) { benchmarkSearch(b, 127, 128) } - -func benchmarkSearch(b *testing.B, depth int, total int) { - var ( - npath []byte - nhash common.Hash - nblob []byte - ) - // First, we set up 128 diff layers, with 3K items each - fill := func(parent layer, index int) *diffLayer { - nodes := make(map[common.Hash]map[string]*trienode.Node) - nodes[common.Hash{}] = make(map[string]*trienode.Node) - for i := 0; i < 3000; i++ { - var ( - path = testutil.RandBytes(32) - node = testutil.RandomNode() - ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) - if npath == nil && depth == index { - npath = common.CopyBytes(path) - nblob = common.CopyBytes(node.Blob) - nhash = node.Hash - } - } - return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) - } - var layer layer - layer = emptyLayer() - for i := 0; i < total; i++ { - layer = fill(layer, i) - } - b.ResetTimer() - - var ( - have []byte - err error - ) - for i := 0; i < b.N; i++ { - have, err = layer.Node(common.Hash{}, npath, nhash) - if err != nil { - b.Fatal(err) - } - } - if !bytes.Equal(have, nblob) { - b.Fatalf("have %x want %x", have, nblob) - } -} - -// goos: darwin -// goarch: arm64 -// pkg: github.com/ava-labs/subnet-evm/trie -// BenchmarkPersist -// BenchmarkPersist-8 10 111252975 ns/op -func BenchmarkPersist(b *testing.B) { - // First, we set up 128 diff layers, with 3K items each - fill := func(parent layer) *diffLayer { - nodes := make(map[common.Hash]map[string]*trienode.Node) - nodes[common.Hash{}] = make(map[string]*trienode.Node) - for i := 0; i < 3000; i++ { - var ( - path = testutil.RandBytes(32) - node = testutil.RandomNode() - ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) - } - return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) - } - for i := 0; i < b.N; i++ { - b.StopTimer() - var layer layer - layer = emptyLayer() - for i := 1; i < 128; i++ { - layer = fill(layer) - } - b.StartTimer() - - dl, ok := layer.(*diffLayer) - if !ok { - break - } - dl.persist(false) - } -} - -// BenchmarkJournal benchmarks the performance for journaling the layers. -// -// BenchmarkJournal -// BenchmarkJournal-8 10 110969279 ns/op -func BenchmarkJournal(b *testing.B) { - b.SkipNow() - - // First, we set up 128 diff layers, with 3K items each - fill := func(parent layer) *diffLayer { - nodes := make(map[common.Hash]map[string]*trienode.Node) - nodes[common.Hash{}] = make(map[string]*trienode.Node) - for i := 0; i < 3000; i++ { - var ( - path = testutil.RandBytes(32) - node = testutil.RandomNode() - ) - nodes[common.Hash{}][string(path)] = trienode.New(node.Hash, node.Blob) - } - // TODO(rjl493456442) a non-nil state set is expected. - return newDiffLayer(parent, common.Hash{}, 0, 0, nodes, nil) - } - var layer layer - layer = emptyLayer() - for i := 0; i < 128; i++ { - layer = fill(layer) - } - b.ResetTimer() - - for i := 0; i < b.N; i++ { - layer.journal(new(bytes.Buffer)) - } -} diff --git a/trie/triedb/pathdb/disklayer.go b/trie/triedb/pathdb/disklayer.go deleted file mode 100644 index aa3287ccfd..0000000000 --- a/trie/triedb/pathdb/disklayer.go +++ /dev/null @@ -1,308 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package pathdb - -import ( - "errors" - "fmt" - "sync" - - "github.com/VictoriaMetrics/fastcache" - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "golang.org/x/crypto/sha3" -) - -// diskLayer is a low level persistent layer built on top of a key-value store. -type diskLayer struct { - root common.Hash // Immutable, root hash to which this layer was made for - id uint64 // Immutable, corresponding state id - db *Database // Path-based trie database - cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs - buffer *nodebuffer // Node buffer to aggregate writes - stale bool // Signals that the layer became stale (state progressed) - lock sync.RWMutex // Lock used to protect stale flag -} - -// newDiskLayer creates a new disk layer based on the passing arguments. -func newDiskLayer(root common.Hash, id uint64, db *Database, cleans *fastcache.Cache, buffer *nodebuffer) *diskLayer { - // Initialize a clean cache if the memory allowance is not zero - // or reuse the provided cache if it is not nil (inherited from - // the original disk layer). - if cleans == nil && db.config.CleanSize != 0 { - cleans = fastcache.New(db.config.CleanSize) - } - return &diskLayer{ - root: root, - id: id, - db: db, - cleans: cleans, - buffer: buffer, - } -} - -// root implements the layer interface, returning root hash of corresponding state. -func (dl *diskLayer) rootHash() common.Hash { - return dl.root -} - -// stateID implements the layer interface, returning the state id of disk layer. -func (dl *diskLayer) stateID() uint64 { - return dl.id -} - -// parent implements the layer interface, returning nil as there's no layer -// below the disk. -func (dl *diskLayer) parentLayer() layer { - return nil -} - -// isStale return whether this layer has become stale (was flattened across) or if -// it's still live. -func (dl *diskLayer) isStale() bool { - dl.lock.RLock() - defer dl.lock.RUnlock() - - return dl.stale -} - -// markStale sets the stale flag as true. -func (dl *diskLayer) markStale() { - dl.lock.Lock() - defer dl.lock.Unlock() - - if dl.stale { - panic("triedb disk layer is stale") // we've committed into the same base from two children, boom - } - dl.stale = true -} - -// Node implements the layer interface, retrieving the trie node with the -// provided node info. No error will be returned if the node is not found. -func (dl *diskLayer) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { - dl.lock.RLock() - defer dl.lock.RUnlock() - - if dl.stale { - return nil, errSnapshotStale - } - // Try to retrieve the trie node from the not-yet-written - // node buffer first. Note the buffer is lock free since - // it's impossible to mutate the buffer before tagging the - // layer as stale. - n, err := dl.buffer.node(owner, path, hash) - if err != nil { - return nil, err - } - if n != nil { - dirtyHitMeter.Mark(1) - dirtyReadMeter.Mark(int64(len(n.Blob))) - return n.Blob, nil - } - dirtyMissMeter.Mark(1) - - // Try to retrieve the trie node from the clean memory cache - key := cacheKey(owner, path) - if dl.cleans != nil { - if blob := dl.cleans.Get(nil, key); len(blob) > 0 { - h := newHasher() - defer h.release() - - got := h.hash(blob) - if got == hash { - cleanHitMeter.Mark(1) - cleanReadMeter.Mark(int64(len(blob))) - return blob, nil - } - cleanFalseMeter.Mark(1) - log.Error("Unexpected trie node in clean cache", "owner", owner, "path", path, "expect", hash, "got", got) - } - cleanMissMeter.Mark(1) - } - // Try to retrieve the trie node from the disk. - var ( - nBlob []byte - nHash common.Hash - ) - if owner == (common.Hash{}) { - nBlob, nHash = rawdb.ReadAccountTrieNode(dl.db.diskdb, path) - } else { - nBlob, nHash = rawdb.ReadStorageTrieNode(dl.db.diskdb, owner, path) - } - if nHash != hash { - diskFalseMeter.Mark(1) - log.Error("Unexpected trie node in disk", "owner", owner, "path", path, "expect", hash, "got", nHash) - return nil, newUnexpectedNodeError("disk", hash, nHash, owner, path) - } - if dl.cleans != nil && len(nBlob) > 0 { - dl.cleans.Set(key, nBlob) - cleanWriteMeter.Mark(int64(len(nBlob))) - } - return nBlob, nil -} - -// update implements the layer interface, returning a new diff layer on top -// with the given state set. -func (dl *diskLayer) update(root common.Hash, id uint64, block uint64, nodes map[common.Hash]map[string]*trienode.Node, states *triestate.Set) *diffLayer { - return newDiffLayer(dl, root, id, block, nodes, states) -} - -// commit merges the given bottom-most diff layer into the node buffer -// and returns a newly constructed disk layer. Note the current disk -// layer must be tagged as stale first to prevent re-access. -func (dl *diskLayer) commit(bottom *diffLayer, force bool) (*diskLayer, error) { - dl.lock.Lock() - defer dl.lock.Unlock() - - // NOTE(freezer): This is disabled since we do not have a freezer. - // Construct and store the state history first. If crash happens - // after storing the state history but without flushing the - // corresponding states(journal), the stored state history will - // be truncated in the next restart. - // if dl.db.freezer != nil { - // err := writeHistory(dl.db.diskdb, dl.db.freezer, bottom, dl.db.config.StateLimit) - // if err != nil { - // return nil, err - // } - // } - // Mark the diskLayer as stale before applying any mutations on top. - dl.stale = true - - // Store the root->id lookup afterwards. All stored lookups are - // identified by the **unique** state root. It's impossible that - // in the same chain blocks are not adjacent but have the same - // root. - if dl.id == 0 { - rawdb.WriteStateID(dl.db.diskdb, dl.root, 0) - } - rawdb.WriteStateID(dl.db.diskdb, bottom.rootHash(), bottom.stateID()) - - // Construct a new disk layer by merging the nodes from the provided - // diff layer, and flush the content in disk layer if there are too - // many nodes cached. The clean cache is inherited from the original - // disk layer for reusing. - ndl := newDiskLayer(bottom.root, bottom.stateID(), dl.db, dl.cleans, dl.buffer.commit(bottom.nodes)) - err := ndl.buffer.flush(ndl.db.diskdb, ndl.cleans, ndl.id, force) - if err != nil { - return nil, err - } - return ndl, nil -} - -// nolint: unused -// revert applies the given state history and return a reverted disk layer. -func (dl *diskLayer) revert(h *history, loader triestate.TrieLoader) (*diskLayer, error) { - if h.meta.root != dl.rootHash() { - return nil, errUnexpectedHistory - } - // Reject if the provided state history is incomplete. It's due to - // a large construct SELF-DESTRUCT which can't be handled because - // of memory limitation. - if len(h.meta.incomplete) > 0 { - return nil, errors.New("incomplete state history") - } - if dl.id == 0 { - return nil, fmt.Errorf("%w: zero state id", errStateUnrecoverable) - } - // Apply the reverse state changes upon the current state. This must - // be done before holding the lock in order to access state in "this" - // layer. - nodes, err := triestate.Apply(h.meta.parent, h.meta.root, h.accounts, h.storages, loader) - if err != nil { - return nil, err - } - // Mark the diskLayer as stale before applying any mutations on top. - dl.lock.Lock() - defer dl.lock.Unlock() - - dl.stale = true - - // State change may be applied to node buffer, or the persistent - // state, depends on if node buffer is empty or not. If the node - // buffer is not empty, it means that the state transition that - // needs to be reverted is not yet flushed and cached in node - // buffer, otherwise, manipulate persistent state directly. - if !dl.buffer.empty() { - err := dl.buffer.revert(dl.db.diskdb, nodes) - if err != nil { - return nil, err - } - } else { - batch := dl.db.diskdb.NewBatch() - writeNodes(batch, nodes, dl.cleans) - rawdb.WritePersistentStateID(batch, dl.id-1) - if err := batch.Write(); err != nil { - log.Crit("Failed to write states", "err", err) - } - } - return newDiskLayer(h.meta.parent, dl.id-1, dl.db, dl.cleans, dl.buffer), nil -} - -// setBufferSize sets the node buffer size to the provided value. -func (dl *diskLayer) setBufferSize(size int) error { - dl.lock.RLock() - defer dl.lock.RUnlock() - - if dl.stale { - return errSnapshotStale - } - return dl.buffer.setSize(size, dl.db.diskdb, dl.cleans, dl.id) -} - -// size returns the approximate size of cached nodes in the disk layer. -func (dl *diskLayer) size() common.StorageSize { - dl.lock.RLock() - defer dl.lock.RUnlock() - - if dl.stale { - return 0 - } - return common.StorageSize(dl.buffer.size) -} - -// hasher is used to compute the sha256 hash of the provided data. -type hasher struct{ sha crypto.KeccakState } - -var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, -} - -func newHasher() *hasher { - return hasherPool.Get().(*hasher) -} - -func (h *hasher) hash(data []byte) common.Hash { - return crypto.HashData(h.sha, data) -} - -func (h *hasher) release() { - hasherPool.Put(h) -} diff --git a/trie/triedb/pathdb/errors.go b/trie/triedb/pathdb/errors.go deleted file mode 100644 index be6bf6c36e..0000000000 --- a/trie/triedb/pathdb/errors.go +++ /dev/null @@ -1,63 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package pathdb - -import ( - "errors" - "fmt" - - "github.com/ethereum/go-ethereum/common" -) - -var ( - // errSnapshotReadOnly is returned if the database is opened in read only mode - // and mutation is requested. - errSnapshotReadOnly = errors.New("read only") - - // errSnapshotStale is returned from data accessors if the underlying layer - // layer had been invalidated due to the chain progressing forward far enough - // to not maintain the layer's original state. - errSnapshotStale = errors.New("layer stale") - - // nolint: unused - // errUnexpectedHistory is returned if an unmatched state history is applied - // to the database for state rollback. - errUnexpectedHistory = errors.New("unexpected state history") - - // nolint: unused - // errStateUnrecoverable is returned if state is required to be reverted to - // a destination without associated state history available. - errStateUnrecoverable = errors.New("state is unrecoverable") - - // errUnexpectedNode is returned if the requested node with specified path is - // not hash matched with expectation. - errUnexpectedNode = errors.New("unexpected node") -) - -func newUnexpectedNodeError(loc string, expHash common.Hash, gotHash common.Hash, owner common.Hash, path []byte) error { - return fmt.Errorf("%w, loc: %s, node: (%x %v), %x!=%x", errUnexpectedNode, loc, owner, path, expHash, gotHash) -} diff --git a/trie/triedb/pathdb/history.go b/trie/triedb/pathdb/history.go deleted file mode 100644 index 83fc385185..0000000000 --- a/trie/triedb/pathdb/history.go +++ /dev/null @@ -1,496 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package pathdb - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "golang.org/x/exp/slices" -) - -// State history records the state changes involved in executing a block. The -// state can be reverted to the previous version by applying the associated -// history object (state reverse diff). State history objects are kept to -// guarantee that the system can perform state rollbacks in case of deep reorg. -// -// Each state transition will generate a state history object. Note that not -// every block has a corresponding state history object. If a block performs -// no state changes whatsoever, no state is created for it. Each state history -// will have a sequentially increasing number acting as its unique identifier. -// -// The state history is written to disk (ancient store) when the corresponding -// diff layer is merged into the disk layer. At the same time, system can prune -// the oldest histories according to config. -// -// Disk State -// ^ -// | -// +------------+ +---------+ +---------+ +---------+ -// | Init State |---->| State 1 |---->| ... |---->| State n | -// +------------+ +---------+ +---------+ +---------+ -// -// +-----------+ +------+ +-----------+ -// | History 1 |----> | ... |---->| History n | -// +-----------+ +------+ +-----------+ -// -// # Rollback -// -// If the system wants to roll back to a previous state n, it needs to ensure -// all history objects from n+1 up to the current disk layer are existent. The -// history objects are applied to the state in reverse order, starting from the -// current disk layer. - -const ( - accountIndexSize = common.AddressLength + 13 // The length of encoded account index - slotIndexSize = common.HashLength + 5 // The length of encoded slot index - historyMetaSize = 9 + 2*common.HashLength // The length of fixed size part of meta object - - stateHistoryVersion = uint8(0) // initial version of state history structure. -) - -// Each state history entry is consisted of five elements: -// -// # metadata -// This object contains a few meta fields, such as the associated state root, -// block number, version tag and so on. This object may contain an extra -// accountHash list which means the storage changes belong to these accounts -// are not complete due to large contract destruction. The incomplete history -// can not be used for rollback and serving archive state request. -// -// # account index -// This object contains some index information of account. For example, offset -// and length indicate the location of the data belonging to the account. Besides, -// storageOffset and storageSlots indicate the storage modification location -// belonging to the account. -// -// The size of each account index is *fixed*, and all indexes are sorted -// lexicographically. Thus binary search can be performed to quickly locate a -// specific account. -// -// # account data -// Account data is a concatenated byte stream composed of all account data. -// The account data can be solved by the offset and length info indicated -// by corresponding account index. -// -// fixed size -// ^ ^ -// / \ -// +-----------------+-----------------+----------------+-----------------+ -// | Account index 1 | Account index 2 | ... | Account index N | -// +-----------------+-----------------+----------------+-----------------+ -// | -// | length -// offset |----------------+ -// v v -// +----------------+----------------+----------------+----------------+ -// | Account data 1 | Account data 2 | ... | Account data N | -// +----------------+----------------+----------------+----------------+ -// -// # storage index -// This object is similar with account index. It's also fixed size and contains -// the location info of storage slot data. -// -// # storage data -// Storage data is a concatenated byte stream composed of all storage slot data. -// The storage slot data can be solved by the location info indicated by -// corresponding account index and storage slot index. -// -// fixed size -// ^ ^ -// / \ -// +-----------------+-----------------+----------------+-----------------+ -// | Account index 1 | Account index 2 | ... | Account index N | -// +-----------------+-----------------+----------------+-----------------+ -// | -// | storage slots -// storage offset |-----------------------------------------------------+ -// v v -// +-----------------+-----------------+-----------------+ -// | storage index 1 | storage index 2 | storage index 3 | -// +-----------------+-----------------+-----------------+ -// | length -// offset |-------------+ -// v v -// +-------------+ -// | slot data 1 | -// +-------------+ - -// accountIndex describes the metadata belonging to an account. -type accountIndex struct { - address common.Address // The address of account - length uint8 // The length of account data, size limited by 255 - offset uint32 // The offset of item in account data table - storageOffset uint32 // The offset of storage index in storage index table - storageSlots uint32 // The number of mutated storage slots belonging to the account -} - -// encode packs account index into byte stream. -func (i *accountIndex) encode() []byte { - var buf [accountIndexSize]byte - copy(buf[:], i.address.Bytes()) - buf[common.AddressLength] = i.length - binary.BigEndian.PutUint32(buf[common.AddressLength+1:], i.offset) - binary.BigEndian.PutUint32(buf[common.AddressLength+5:], i.storageOffset) - binary.BigEndian.PutUint32(buf[common.AddressLength+9:], i.storageSlots) - return buf[:] -} - -// decode unpacks account index from byte stream. -func (i *accountIndex) decode(blob []byte) { - i.address = common.BytesToAddress(blob[:common.AddressLength]) - i.length = blob[common.AddressLength] - i.offset = binary.BigEndian.Uint32(blob[common.AddressLength+1:]) - i.storageOffset = binary.BigEndian.Uint32(blob[common.AddressLength+5:]) - i.storageSlots = binary.BigEndian.Uint32(blob[common.AddressLength+9:]) -} - -// slotIndex describes the metadata belonging to a storage slot. -type slotIndex struct { - hash common.Hash // The hash of slot key - length uint8 // The length of storage slot, up to 32 bytes defined in protocol - offset uint32 // The offset of item in storage slot data table -} - -// encode packs slot index into byte stream. -func (i *slotIndex) encode() []byte { - var buf [slotIndexSize]byte - copy(buf[:common.HashLength], i.hash.Bytes()) - buf[common.HashLength] = i.length - binary.BigEndian.PutUint32(buf[common.HashLength+1:], i.offset) - return buf[:] -} - -// decode unpack slot index from the byte stream. -func (i *slotIndex) decode(blob []byte) { - i.hash = common.BytesToHash(blob[:common.HashLength]) - i.length = blob[common.HashLength] - i.offset = binary.BigEndian.Uint32(blob[common.HashLength+1:]) -} - -// meta describes the meta data of state history object. -type meta struct { - version uint8 // version tag of history object - parent common.Hash // prev-state root before the state transition - root common.Hash // post-state root after the state transition - block uint64 // associated block number - incomplete []common.Address // list of address whose storage set is incomplete -} - -// encode packs the meta object into byte stream. -func (m *meta) encode() []byte { - buf := make([]byte, historyMetaSize+len(m.incomplete)*common.AddressLength) - buf[0] = m.version - copy(buf[1:1+common.HashLength], m.parent.Bytes()) - copy(buf[1+common.HashLength:1+2*common.HashLength], m.root.Bytes()) - binary.BigEndian.PutUint64(buf[1+2*common.HashLength:historyMetaSize], m.block) - for i, h := range m.incomplete { - copy(buf[i*common.AddressLength+historyMetaSize:], h.Bytes()) - } - return buf[:] -} - -// decode unpacks the meta object from byte stream. -func (m *meta) decode(blob []byte) error { - if len(blob) < 1 { - return fmt.Errorf("no version tag") - } - switch blob[0] { - case stateHistoryVersion: - if len(blob) < historyMetaSize { - return fmt.Errorf("invalid state history meta, len: %d", len(blob)) - } - if (len(blob)-historyMetaSize)%common.AddressLength != 0 { - return fmt.Errorf("corrupted state history meta, len: %d", len(blob)) - } - m.version = blob[0] - m.parent = common.BytesToHash(blob[1 : 1+common.HashLength]) - m.root = common.BytesToHash(blob[1+common.HashLength : 1+2*common.HashLength]) - m.block = binary.BigEndian.Uint64(blob[1+2*common.HashLength : historyMetaSize]) - for pos := historyMetaSize; pos < len(blob); { - m.incomplete = append(m.incomplete, common.BytesToAddress(blob[pos:pos+common.AddressLength])) - pos += common.AddressLength - } - return nil - default: - return fmt.Errorf("unknown version %d", blob[0]) - } -} - -// history represents a set of state changes belong to a block along with -// the metadata including the state roots involved in the state transition. -// State history objects in disk are linked with each other by a unique id -// (8-bytes integer), the oldest state history object can be pruned on demand -// in order to control the storage size. -type history struct { - meta *meta // Meta data of history - accounts map[common.Address][]byte // Account data keyed by its address hash - accountList []common.Address // Sorted account hash list - storages map[common.Address]map[common.Hash][]byte // Storage data keyed by its address hash and slot hash - storageList map[common.Address][]common.Hash // Sorted slot hash list -} - -// newHistory constructs the state history object with provided state change set. -func newHistory(root common.Hash, parent common.Hash, block uint64, states *triestate.Set) *history { - var ( - accountList []common.Address - storageList = make(map[common.Address][]common.Hash) - incomplete []common.Address - ) - for addr := range states.Accounts { - accountList = append(accountList, addr) - } - slices.SortFunc(accountList, common.Address.Cmp) - - for addr, slots := range states.Storages { - slist := make([]common.Hash, 0, len(slots)) - for slotHash := range slots { - slist = append(slist, slotHash) - } - slices.SortFunc(slist, common.Hash.Cmp) - storageList[addr] = slist - } - for addr := range states.Incomplete { - incomplete = append(incomplete, addr) - } - slices.SortFunc(incomplete, common.Address.Cmp) - - return &history{ - meta: &meta{ - version: stateHistoryVersion, - parent: parent, - root: root, - block: block, - incomplete: incomplete, - }, - accounts: states.Accounts, - accountList: accountList, - storages: states.Storages, - storageList: storageList, - } -} - -// encode serializes the state history and returns four byte streams represent -// concatenated account/storage data, account/storage indexes respectively. -func (h *history) encode() ([]byte, []byte, []byte, []byte) { - var ( - slotNumber uint32 // the number of processed slots - accountData []byte // the buffer for concatenated account data - storageData []byte // the buffer for concatenated storage data - accountIndexes []byte // the buffer for concatenated account index - storageIndexes []byte // the buffer for concatenated storage index - ) - for _, addr := range h.accountList { - accIndex := accountIndex{ - address: addr, - length: uint8(len(h.accounts[addr])), - offset: uint32(len(accountData)), - } - slots, exist := h.storages[addr] - if exist { - // Encode storage slots in order - for _, slotHash := range h.storageList[addr] { - sIndex := slotIndex{ - hash: slotHash, - length: uint8(len(slots[slotHash])), - offset: uint32(len(storageData)), - } - storageData = append(storageData, slots[slotHash]...) - storageIndexes = append(storageIndexes, sIndex.encode()...) - } - // Fill up the storage meta in account index - accIndex.storageOffset = slotNumber - accIndex.storageSlots = uint32(len(slots)) - slotNumber += uint32(len(slots)) - } - accountData = append(accountData, h.accounts[addr]...) - accountIndexes = append(accountIndexes, accIndex.encode()...) - } - return accountData, storageData, accountIndexes, storageIndexes -} - -// decoder wraps the byte streams for decoding with extra meta fields. -type decoder struct { - accountData []byte // the buffer for concatenated account data - storageData []byte // the buffer for concatenated storage data - accountIndexes []byte // the buffer for concatenated account index - storageIndexes []byte // the buffer for concatenated storage index - - lastAccount *common.Address // the address of last resolved account - lastAccountRead uint32 // the read-cursor position of account data - lastSlotIndexRead uint32 // the read-cursor position of storage slot index - lastSlotDataRead uint32 // the read-cursor position of storage slot data -} - -// verify validates the provided byte streams for decoding state history. A few -// checks will be performed to quickly detect data corruption. The byte stream -// is regarded as corrupted if: -// -// - account indexes buffer is empty(empty state set is invalid) -// - account indexes/storage indexer buffer is not aligned -// -// note, these situations are allowed: -// -// - empty account data: all accounts were not present -// - empty storage set: no slots are modified -func (r *decoder) verify() error { - if len(r.accountIndexes)%accountIndexSize != 0 || len(r.accountIndexes) == 0 { - return fmt.Errorf("invalid account index, len: %d", len(r.accountIndexes)) - } - if len(r.storageIndexes)%slotIndexSize != 0 { - return fmt.Errorf("invalid storage index, len: %d", len(r.storageIndexes)) - } - return nil -} - -// readAccount parses the account from the byte stream with specified position. -func (r *decoder) readAccount(pos int) (accountIndex, []byte, error) { - // Decode account index from the index byte stream. - var index accountIndex - if (pos+1)*accountIndexSize > len(r.accountIndexes) { - return accountIndex{}, nil, errors.New("account data buffer is corrupted") - } - index.decode(r.accountIndexes[pos*accountIndexSize : (pos+1)*accountIndexSize]) - - // Perform validation before parsing account data, ensure - // - account is sorted in order in byte stream - // - account data is strictly encoded with no gap inside - // - account data is not out-of-slice - if r.lastAccount != nil { // zero address is possible - if bytes.Compare(r.lastAccount.Bytes(), index.address.Bytes()) >= 0 { - return accountIndex{}, nil, errors.New("account is not in order") - } - } - if index.offset != r.lastAccountRead { - return accountIndex{}, nil, errors.New("account data buffer is gaped") - } - last := index.offset + uint32(index.length) - if uint32(len(r.accountData)) < last { - return accountIndex{}, nil, errors.New("account data buffer is corrupted") - } - data := r.accountData[index.offset:last] - - r.lastAccount = &index.address - r.lastAccountRead = last - - return index, data, nil -} - -// readStorage parses the storage slots from the byte stream with specified account. -func (r *decoder) readStorage(accIndex accountIndex) ([]common.Hash, map[common.Hash][]byte, error) { - var ( - last common.Hash - list []common.Hash - storage = make(map[common.Hash][]byte) - ) - for j := 0; j < int(accIndex.storageSlots); j++ { - var ( - index slotIndex - start = (accIndex.storageOffset + uint32(j)) * uint32(slotIndexSize) - end = (accIndex.storageOffset + uint32(j+1)) * uint32(slotIndexSize) - ) - // Perform validation before parsing storage slot data, ensure - // - slot index is not out-of-slice - // - slot data is not out-of-slice - // - slot is sorted in order in byte stream - // - slot indexes is strictly encoded with no gap inside - // - slot data is strictly encoded with no gap inside - if start != r.lastSlotIndexRead { - return nil, nil, errors.New("storage index buffer is gapped") - } - if uint32(len(r.storageIndexes)) < end { - return nil, nil, errors.New("storage index buffer is corrupted") - } - index.decode(r.storageIndexes[start:end]) - - if bytes.Compare(last.Bytes(), index.hash.Bytes()) >= 0 { - return nil, nil, errors.New("storage slot is not in order") - } - if index.offset != r.lastSlotDataRead { - return nil, nil, errors.New("storage data buffer is gapped") - } - sEnd := index.offset + uint32(index.length) - if uint32(len(r.storageData)) < sEnd { - return nil, nil, errors.New("storage data buffer is corrupted") - } - storage[index.hash] = r.storageData[r.lastSlotDataRead:sEnd] - list = append(list, index.hash) - - last = index.hash - r.lastSlotIndexRead = end - r.lastSlotDataRead = sEnd - } - return list, storage, nil -} - -// decode deserializes the account and storage data from the provided byte stream. -func (h *history) decode(accountData, storageData, accountIndexes, storageIndexes []byte) error { - var ( - accounts = make(map[common.Address][]byte) - storages = make(map[common.Address]map[common.Hash][]byte) - accountList []common.Address - storageList = make(map[common.Address][]common.Hash) - - r = &decoder{ - accountData: accountData, - storageData: storageData, - accountIndexes: accountIndexes, - storageIndexes: storageIndexes, - } - ) - if err := r.verify(); err != nil { - return err - } - for i := 0; i < len(accountIndexes)/accountIndexSize; i++ { - // Resolve account first - accIndex, accData, err := r.readAccount(i) - if err != nil { - return err - } - accounts[accIndex.address] = accData - accountList = append(accountList, accIndex.address) - - // Resolve storage slots - slotList, slotData, err := r.readStorage(accIndex) - if err != nil { - return err - } - if len(slotList) > 0 { - storageList[accIndex.address] = slotList - storages[accIndex.address] = slotData - } - } - h.accounts = accounts - h.accountList = accountList - h.storages = storages - h.storageList = storageList - return nil -} diff --git a/trie/triedb/pathdb/history_test.go b/trie/triedb/pathdb/history_test.go deleted file mode 100644 index abf40c2838..0000000000 --- a/trie/triedb/pathdb/history_test.go +++ /dev/null @@ -1,171 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package pathdb - -import ( - "bytes" - "reflect" - "testing" - - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie/testutil" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" -) - -// randomStateSet generates a random state change set. -func randomStateSet(n int) *triestate.Set { - var ( - accounts = make(map[common.Address][]byte) - storages = make(map[common.Address]map[common.Hash][]byte) - ) - for i := 0; i < n; i++ { - addr := testutil.RandomAddress() - storages[addr] = make(map[common.Hash][]byte) - for j := 0; j < 3; j++ { - v, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(testutil.RandBytes(32))) - storages[addr][testutil.RandomHash()] = v - } - account := generateAccount(types.EmptyRootHash) - accounts[addr] = types.SlimAccountRLP(account) - } - return triestate.New(accounts, storages, nil) -} - -func makeHistory() *history { - return newHistory(testutil.RandomHash(), types.EmptyRootHash, 0, randomStateSet(3)) -} - -// nolint: unused -func makeHistories(n int) []*history { - var ( - parent = types.EmptyRootHash - result []*history - ) - for i := 0; i < n; i++ { - root := testutil.RandomHash() - h := newHistory(root, parent, uint64(i), randomStateSet(3)) - parent = root - result = append(result, h) - } - return result -} - -func TestEncodeDecodeHistory(t *testing.T) { - var ( - m meta - dec history - obj = makeHistory() - ) - // check if meta data can be correctly encode/decode - blob := obj.meta.encode() - if err := m.decode(blob); err != nil { - t.Fatalf("Failed to decode %v", err) - } - if !reflect.DeepEqual(&m, obj.meta) { - t.Fatal("meta is mismatched") - } - - // check if account/storage data can be correctly encode/decode - accountData, storageData, accountIndexes, storageIndexes := obj.encode() - if err := dec.decode(accountData, storageData, accountIndexes, storageIndexes); err != nil { - t.Fatalf("Failed to decode, err: %v", err) - } - if !compareSet(dec.accounts, obj.accounts) { - t.Fatal("account data is mismatched") - } - if !compareStorages(dec.storages, obj.storages) { - t.Fatal("storage data is mismatched") - } - if !compareList(dec.accountList, obj.accountList) { - t.Fatal("account list is mismatched") - } - if !compareStorageList(dec.storageList, obj.storageList) { - t.Fatal("storage list is mismatched") - } -} - -func compareSet[k comparable](a, b map[k][]byte) bool { - if len(a) != len(b) { - return false - } - for key, valA := range a { - valB, ok := b[key] - if !ok { - return false - } - if !bytes.Equal(valA, valB) { - return false - } - } - return true -} - -func compareList[k comparable](a, b []k) bool { - if len(a) != len(b) { - return false - } - for i := 0; i < len(a); i++ { - if a[i] != b[i] { - return false - } - } - return true -} - -func compareStorages(a, b map[common.Address]map[common.Hash][]byte) bool { - if len(a) != len(b) { - return false - } - for h, subA := range a { - subB, ok := b[h] - if !ok { - return false - } - if !compareSet(subA, subB) { - return false - } - } - return true -} - -func compareStorageList(a, b map[common.Address][]common.Hash) bool { - if len(a) != len(b) { - return false - } - for h, la := range a { - lb, ok := b[h] - if !ok { - return false - } - if !compareList(la, lb) { - return false - } - } - return true -} diff --git a/trie/triedb/pathdb/journal.go b/trie/triedb/pathdb/journal.go deleted file mode 100644 index d35f00bab6..0000000000 --- a/trie/triedb/pathdb/journal.go +++ /dev/null @@ -1,388 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package pathdb - -import ( - "bytes" - "errors" - "fmt" - "io" - - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" -) - -var ( - errMissJournal = errors.New("journal not found") - errMissVersion = errors.New("version not found") - errUnexpectedVersion = errors.New("unexpected journal version") - errMissDiskRoot = errors.New("disk layer root not found") - errUnmatchedJournal = errors.New("unmatched journal") -) - -const journalVersion uint64 = 0 - -// journalNode represents a trie node persisted in the journal. -type journalNode struct { - Path []byte // Path of the node in the trie - Blob []byte // RLP-encoded trie node blob, nil means the node is deleted -} - -// journalNodes represents a list trie nodes belong to a single account -// or the main account trie. -type journalNodes struct { - Owner common.Hash - Nodes []journalNode -} - -// journalAccounts represents a list accounts belong to the layer. -type journalAccounts struct { - Addresses []common.Address - Accounts [][]byte -} - -// journalStorage represents a list of storage slots belong to an account. -type journalStorage struct { - Incomplete bool - Account common.Address - Hashes []common.Hash - Slots [][]byte -} - -// loadJournal tries to parse the layer journal from the disk. -func (db *Database) loadJournal(diskRoot common.Hash) (layer, error) { - journal := rawdb.ReadTrieJournal(db.diskdb) - if len(journal) == 0 { - return nil, errMissJournal - } - r := rlp.NewStream(bytes.NewReader(journal), 0) - - // Firstly, resolve the first element as the journal version - version, err := r.Uint64() - if err != nil { - return nil, errMissVersion - } - if version != journalVersion { - return nil, fmt.Errorf("%w want %d got %d", errUnexpectedVersion, journalVersion, version) - } - // Secondly, resolve the disk layer root, ensure it's continuous - // with disk layer. Note now we can ensure it's the layer journal - // correct version, so we expect everything can be resolved properly. - var root common.Hash - if err := r.Decode(&root); err != nil { - return nil, errMissDiskRoot - } - // The journal is not matched with persistent state, discard them. - // It can happen that geth crashes without persisting the journal. - if !bytes.Equal(root.Bytes(), diskRoot.Bytes()) { - return nil, fmt.Errorf("%w want %x got %x", errUnmatchedJournal, root, diskRoot) - } - // Load the disk layer from the journal - base, err := db.loadDiskLayer(r) - if err != nil { - return nil, err - } - // Load all the diff layers from the journal - head, err := db.loadDiffLayer(base, r) - if err != nil { - return nil, err - } - log.Debug("Loaded layer journal", "diskroot", diskRoot, "diffhead", head.rootHash()) - return head, nil -} - -// loadLayers loads a pre-existing state layer backed by a key-value store. -func (db *Database) loadLayers() layer { - // Retrieve the root node of persistent state. - _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil) - root = types.TrieRootHash(root) - - // Load the layers by resolving the journal - head, err := db.loadJournal(root) - if err == nil { - return head - } - // journal is not matched(or missing) with the persistent state, discard - // it. Display log for discarding journal, but try to avoid showing - // useless information when the db is created from scratch. - if !(root == types.EmptyRootHash && errors.Is(err, errMissJournal)) { - log.Info("Failed to load journal, discard it", "err", err) - } - // Return single layer with persistent state. - return newDiskLayer(root, rawdb.ReadPersistentStateID(db.diskdb), db, nil, newNodeBuffer(db.bufferSize, nil, 0)) -} - -// loadDiskLayer reads the binary blob from the layer journal, reconstructing -// a new disk layer on it. -func (db *Database) loadDiskLayer(r *rlp.Stream) (layer, error) { - // Resolve disk layer root - var root common.Hash - if err := r.Decode(&root); err != nil { - return nil, fmt.Errorf("load disk root: %v", err) - } - // Resolve the state id of disk layer, it can be different - // with the persistent id tracked in disk, the id distance - // is the number of transitions aggregated in disk layer. - var id uint64 - if err := r.Decode(&id); err != nil { - return nil, fmt.Errorf("load state id: %v", err) - } - stored := rawdb.ReadPersistentStateID(db.diskdb) - if stored > id { - return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id) - } - // Resolve nodes cached in node buffer - var encoded []journalNodes - if err := r.Decode(&encoded); err != nil { - return nil, fmt.Errorf("load disk nodes: %v", err) - } - nodes := make(map[common.Hash]map[string]*trienode.Node) - for _, entry := range encoded { - subset := make(map[string]*trienode.Node) - for _, n := range entry.Nodes { - if len(n.Blob) > 0 { - subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob) - } else { - subset[string(n.Path)] = trienode.NewDeleted() - } - } - nodes[entry.Owner] = subset - } - // Calculate the internal state transitions by id difference. - base := newDiskLayer(root, id, db, nil, newNodeBuffer(db.bufferSize, nodes, id-stored)) - return base, nil -} - -// loadDiffLayer reads the next sections of a layer journal, reconstructing a new -// diff and verifying that it can be linked to the requested parent. -func (db *Database) loadDiffLayer(parent layer, r *rlp.Stream) (layer, error) { - // Read the next diff journal entry - var root common.Hash - if err := r.Decode(&root); err != nil { - // The first read may fail with EOF, marking the end of the journal - if err == io.EOF { - return parent, nil - } - return nil, fmt.Errorf("load diff root: %v", err) - } - var block uint64 - if err := r.Decode(&block); err != nil { - return nil, fmt.Errorf("load block number: %v", err) - } - // Read in-memory trie nodes from journal - var encoded []journalNodes - if err := r.Decode(&encoded); err != nil { - return nil, fmt.Errorf("load diff nodes: %v", err) - } - nodes := make(map[common.Hash]map[string]*trienode.Node) - for _, entry := range encoded { - subset := make(map[string]*trienode.Node) - for _, n := range entry.Nodes { - if len(n.Blob) > 0 { - subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob) - } else { - subset[string(n.Path)] = trienode.NewDeleted() - } - } - nodes[entry.Owner] = subset - } - // Read state changes from journal - var ( - jaccounts journalAccounts - jstorages []journalStorage - accounts = make(map[common.Address][]byte) - storages = make(map[common.Address]map[common.Hash][]byte) - incomplete = make(map[common.Address]struct{}) - ) - if err := r.Decode(&jaccounts); err != nil { - return nil, fmt.Errorf("load diff accounts: %v", err) - } - for i, addr := range jaccounts.Addresses { - accounts[addr] = jaccounts.Accounts[i] - } - if err := r.Decode(&jstorages); err != nil { - return nil, fmt.Errorf("load diff storages: %v", err) - } - for _, entry := range jstorages { - set := make(map[common.Hash][]byte) - for i, h := range entry.Hashes { - if len(entry.Slots[i]) > 0 { - set[h] = entry.Slots[i] - } else { - set[h] = nil - } - } - if entry.Incomplete { - incomplete[entry.Account] = struct{}{} - } - storages[entry.Account] = set - } - return db.loadDiffLayer(newDiffLayer(parent, root, parent.stateID()+1, block, nodes, triestate.New(accounts, storages, incomplete)), r) -} - -// journal implements the layer interface, marshaling the un-flushed trie nodes -// along with layer meta data into provided byte buffer. -func (dl *diskLayer) journal(w io.Writer) error { - dl.lock.RLock() - defer dl.lock.RUnlock() - - // Ensure the layer didn't get stale - if dl.stale { - return errSnapshotStale - } - // Step one, write the disk root into the journal. - if err := rlp.Encode(w, dl.root); err != nil { - return err - } - // Step two, write the corresponding state id into the journal - if err := rlp.Encode(w, dl.id); err != nil { - return err - } - // Step three, write all unwritten nodes into the journal - nodes := make([]journalNodes, 0, len(dl.buffer.nodes)) - for owner, subset := range dl.buffer.nodes { - entry := journalNodes{Owner: owner} - for path, node := range subset { - entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob}) - } - nodes = append(nodes, entry) - } - if err := rlp.Encode(w, nodes); err != nil { - return err - } - log.Debug("Journaled pathdb disk layer", "root", dl.root, "nodes", len(dl.buffer.nodes)) - return nil -} - -// journal implements the layer interface, writing the memory layer contents -// into a buffer to be stored in the database as the layer journal. -func (dl *diffLayer) journal(w io.Writer) error { - dl.lock.RLock() - defer dl.lock.RUnlock() - - // journal the parent first - if err := dl.parent.journal(w); err != nil { - return err - } - // Everything below was journaled, persist this layer too - if err := rlp.Encode(w, dl.root); err != nil { - return err - } - if err := rlp.Encode(w, dl.block); err != nil { - return err - } - // Write the accumulated trie nodes into buffer - nodes := make([]journalNodes, 0, len(dl.nodes)) - for owner, subset := range dl.nodes { - entry := journalNodes{Owner: owner} - for path, node := range subset { - entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob}) - } - nodes = append(nodes, entry) - } - if err := rlp.Encode(w, nodes); err != nil { - return err - } - // Write the accumulated state changes into buffer - var jacct journalAccounts - for addr, account := range dl.states.Accounts { - jacct.Addresses = append(jacct.Addresses, addr) - jacct.Accounts = append(jacct.Accounts, account) - } - if err := rlp.Encode(w, jacct); err != nil { - return err - } - storage := make([]journalStorage, 0, len(dl.states.Storages)) - for addr, slots := range dl.states.Storages { - entry := journalStorage{Account: addr} - if _, ok := dl.states.Incomplete[addr]; ok { - entry.Incomplete = true - } - for slotHash, slot := range slots { - entry.Hashes = append(entry.Hashes, slotHash) - entry.Slots = append(entry.Slots, slot) - } - storage = append(storage, entry) - } - if err := rlp.Encode(w, storage); err != nil { - return err - } - log.Debug("Journaled pathdb diff layer", "root", dl.root, "parent", dl.parent.rootHash(), "id", dl.stateID(), "block", dl.block, "nodes", len(dl.nodes)) - return nil -} - -// Journal commits an entire diff hierarchy to disk into a single journal entry. -// This is meant to be used during shutdown to persist the layer without -// flattening everything down (bad for reorgs). And this function will mark the -// database as read-only to prevent all following mutation to disk. -func (db *Database) Journal(root common.Hash) error { - // Retrieve the head layer to journal from. - l := db.tree.get(root) - if l == nil { - return fmt.Errorf("triedb layer [%#x] missing", root) - } - // Run the journaling - db.lock.Lock() - defer db.lock.Unlock() - - // Short circuit if the database is in read only mode. - if db.readOnly { - return errSnapshotReadOnly - } - // Firstly write out the metadata of journal - journal := new(bytes.Buffer) - if err := rlp.Encode(journal, journalVersion); err != nil { - return err - } - // The stored state in disk might be empty, convert the - // root to emptyRoot in this case. - _, diskroot := rawdb.ReadAccountTrieNode(db.diskdb, nil) - diskroot = types.TrieRootHash(diskroot) - - // Secondly write out the state root in disk, ensure all layers - // on top are continuous with disk. - if err := rlp.Encode(journal, diskroot); err != nil { - return err - } - // Finally write out the journal of each layer in reverse order. - if err := l.journal(journal); err != nil { - return err - } - // Store the journal into the database and return - rawdb.WriteTrieJournal(db.diskdb, journal.Bytes()) - - // Set the db in read only mode to reject all following mutations - db.readOnly = true - log.Info("Stored journal in triedb", "disk", diskroot, "size", common.StorageSize(journal.Len())) - return nil -} diff --git a/trie/triedb/pathdb/layertree.go b/trie/triedb/pathdb/layertree.go deleted file mode 100644 index 58b112c6bb..0000000000 --- a/trie/triedb/pathdb/layertree.go +++ /dev/null @@ -1,224 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package pathdb - -import ( - "errors" - "fmt" - "sync" - - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" -) - -// layerTree is a group of state layers identified by the state root. -// This structure defines a few basic operations for manipulating -// state layers linked with each other in a tree structure. It's -// thread-safe to use. However, callers need to ensure the thread-safety -// of the referenced layer by themselves. -type layerTree struct { - lock sync.RWMutex - layers map[common.Hash]layer -} - -// newLayerTree constructs the layerTree with the given head layer. -func newLayerTree(head layer) *layerTree { - tree := new(layerTree) - tree.reset(head) - return tree -} - -// reset initializes the layerTree by the given head layer. -// All the ancestors will be iterated out and linked in the tree. -func (tree *layerTree) reset(head layer) { - tree.lock.Lock() - defer tree.lock.Unlock() - - var layers = make(map[common.Hash]layer) - for head != nil { - layers[head.rootHash()] = head - head = head.parentLayer() - } - tree.layers = layers -} - -// get retrieves a layer belonging to the given state root. -func (tree *layerTree) get(root common.Hash) layer { - tree.lock.RLock() - defer tree.lock.RUnlock() - - return tree.layers[types.TrieRootHash(root)] -} - -// forEach iterates the stored layers inside and applies the -// given callback on them. -func (tree *layerTree) forEach(onLayer func(layer)) { - tree.lock.RLock() - defer tree.lock.RUnlock() - - for _, layer := range tree.layers { - onLayer(layer) - } -} - -// len returns the number of layers cached. -func (tree *layerTree) len() int { - tree.lock.RLock() - defer tree.lock.RUnlock() - - return len(tree.layers) -} - -// add inserts a new layer into the tree if it can be linked to an existing old parent. -func (tree *layerTree) add(root common.Hash, parentRoot common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error { - // Reject noop updates to avoid self-loops. This is a special case that can - // happen for clique networks and proof-of-stake networks where empty blocks - // don't modify the state (0 block subsidy). - // - // Although we could silently ignore this internally, it should be the caller's - // responsibility to avoid even attempting to insert such a layer. - root, parentRoot = types.TrieRootHash(root), types.TrieRootHash(parentRoot) - if root == parentRoot { - return errors.New("layer cycle") - } - parent := tree.get(parentRoot) - if parent == nil { - return fmt.Errorf("triedb parent [%#x] layer missing", parentRoot) - } - l := parent.update(root, parent.stateID()+1, block, nodes.Flatten(), states) - - tree.lock.Lock() - tree.layers[l.rootHash()] = l - tree.lock.Unlock() - return nil -} - -// cap traverses downwards the diff tree until the number of allowed diff layers -// are crossed. All diffs beyond the permitted number are flattened downwards. -func (tree *layerTree) cap(root common.Hash, layers int) error { - // Retrieve the head layer to cap from - root = types.TrieRootHash(root) - l := tree.get(root) - if l == nil { - return fmt.Errorf("triedb layer [%#x] missing", root) - } - diff, ok := l.(*diffLayer) - if !ok { - return fmt.Errorf("triedb layer [%#x] is disk layer", root) - } - tree.lock.Lock() - defer tree.lock.Unlock() - - // If full commit was requested, flatten the diffs and merge onto disk - if layers == 0 { - base, err := diff.persist(true) - if err != nil { - return err - } - // Replace the entire layer tree with the flat base - tree.layers = map[common.Hash]layer{base.rootHash(): base} - return nil - } - // Dive until we run out of layers or reach the persistent database - for i := 0; i < layers-1; i++ { - // If we still have diff layers below, continue down - if parent, ok := diff.parentLayer().(*diffLayer); ok { - diff = parent - } else { - // Diff stack too shallow, return without modifications - return nil - } - } - // We're out of layers, flatten anything below, stopping if it's the disk or if - // the memory limit is not yet exceeded. - switch parent := diff.parentLayer().(type) { - case *diskLayer: - return nil - - case *diffLayer: - // Hold the lock to prevent any read operations until the new - // parent is linked correctly. - diff.lock.Lock() - - base, err := parent.persist(false) - if err != nil { - diff.lock.Unlock() - return err - } - tree.layers[base.rootHash()] = base - diff.parent = base - - diff.lock.Unlock() - - default: - panic(fmt.Sprintf("unknown data layer in triedb: %T", parent)) - } - // Remove any layer that is stale or links into a stale layer - children := make(map[common.Hash][]common.Hash) - for root, layer := range tree.layers { - if dl, ok := layer.(*diffLayer); ok { - parent := dl.parentLayer().rootHash() - children[parent] = append(children[parent], root) - } - } - var remove func(root common.Hash) - remove = func(root common.Hash) { - delete(tree.layers, root) - for _, child := range children[root] { - remove(child) - } - delete(children, root) - } - for root, layer := range tree.layers { - if dl, ok := layer.(*diskLayer); ok && dl.isStale() { - remove(root) - } - } - return nil -} - -// bottom returns the bottom-most disk layer in this tree. -func (tree *layerTree) bottom() *diskLayer { - tree.lock.RLock() - defer tree.lock.RUnlock() - - if len(tree.layers) == 0 { - return nil // Shouldn't happen, empty tree - } - // pick a random one as the entry point - var current layer - for _, layer := range tree.layers { - current = layer - break - } - for current.parentLayer() != nil { - current = current.parentLayer() - } - return current.(*diskLayer) -} diff --git a/trie/triedb/pathdb/metrics.go b/trie/triedb/pathdb/metrics.go deleted file mode 100644 index 27dfe7fede..0000000000 --- a/trie/triedb/pathdb/metrics.go +++ /dev/null @@ -1,61 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package pathdb - -import "github.com/ava-labs/subnet-evm/metrics" - -// nolint: unused -var ( - cleanHitMeter = metrics.NewRegisteredMeter("pathdb/clean/hit", nil) - cleanMissMeter = metrics.NewRegisteredMeter("pathdb/clean/miss", nil) - cleanReadMeter = metrics.NewRegisteredMeter("pathdb/clean/read", nil) - cleanWriteMeter = metrics.NewRegisteredMeter("pathdb/clean/write", nil) - - dirtyHitMeter = metrics.NewRegisteredMeter("pathdb/dirty/hit", nil) - dirtyMissMeter = metrics.NewRegisteredMeter("pathdb/dirty/miss", nil) - dirtyReadMeter = metrics.NewRegisteredMeter("pathdb/dirty/read", nil) - dirtyWriteMeter = metrics.NewRegisteredMeter("pathdb/dirty/write", nil) - dirtyNodeHitDepthHist = metrics.NewRegisteredHistogram("pathdb/dirty/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) - - cleanFalseMeter = metrics.NewRegisteredMeter("pathdb/clean/false", nil) - dirtyFalseMeter = metrics.NewRegisteredMeter("pathdb/dirty/false", nil) - diskFalseMeter = metrics.NewRegisteredMeter("pathdb/disk/false", nil) - - commitTimeTimer = metrics.NewRegisteredTimer("pathdb/commit/time", nil) - commitNodesMeter = metrics.NewRegisteredMeter("pathdb/commit/nodes", nil) - commitBytesMeter = metrics.NewRegisteredMeter("pathdb/commit/bytes", nil) - - gcNodesMeter = metrics.NewRegisteredMeter("pathdb/gc/nodes", nil) - gcBytesMeter = metrics.NewRegisteredMeter("pathdb/gc/bytes", nil) - - diffLayerBytesMeter = metrics.NewRegisteredMeter("pathdb/diff/bytes", nil) - diffLayerNodesMeter = metrics.NewRegisteredMeter("pathdb/diff/nodes", nil) - - historyBuildTimeMeter = metrics.NewRegisteredTimer("pathdb/history/time", nil) - historyDataBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/data", nil) - historyIndexBytesMeter = metrics.NewRegisteredMeter("pathdb/history/bytes/index", nil) -) diff --git a/trie/triedb/pathdb/nodebuffer.go b/trie/triedb/pathdb/nodebuffer.go deleted file mode 100644 index 9a0ce7fb02..0000000000 --- a/trie/triedb/pathdb/nodebuffer.go +++ /dev/null @@ -1,287 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2022 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package pathdb - -import ( - "fmt" - "time" - - "github.com/VictoriaMetrics/fastcache" - "github.com/ava-labs/subnet-evm/core/rawdb" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" -) - -// nodebuffer is a collection of modified trie nodes to aggregate the disk -// write. The content of the nodebuffer must be checked before diving into -// disk (since it basically is not-yet-written data). -type nodebuffer struct { - layers uint64 // The number of diff layers aggregated inside - size uint64 // The size of aggregated writes - limit uint64 // The maximum memory allowance in bytes - nodes map[common.Hash]map[string]*trienode.Node // The dirty node set, mapped by owner and path -} - -// newNodeBuffer initializes the node buffer with the provided nodes. -func newNodeBuffer(limit int, nodes map[common.Hash]map[string]*trienode.Node, layers uint64) *nodebuffer { - if nodes == nil { - nodes = make(map[common.Hash]map[string]*trienode.Node) - } - var size uint64 - for _, subset := range nodes { - for path, n := range subset { - size += uint64(len(n.Blob) + len(path)) - } - } - return &nodebuffer{ - layers: layers, - nodes: nodes, - size: size, - limit: uint64(limit), - } -} - -// node retrieves the trie node with given node info. -func (b *nodebuffer) node(owner common.Hash, path []byte, hash common.Hash) (*trienode.Node, error) { - subset, ok := b.nodes[owner] - if !ok { - return nil, nil - } - n, ok := subset[string(path)] - if !ok { - return nil, nil - } - if n.Hash != hash { - dirtyFalseMeter.Mark(1) - log.Error("Unexpected trie node in node buffer", "owner", owner, "path", path, "expect", hash, "got", n.Hash) - return nil, newUnexpectedNodeError("dirty", hash, n.Hash, owner, path) - } - return n, nil -} - -// commit merges the dirty nodes into the nodebuffer. This operation won't take -// the ownership of the nodes map which belongs to the bottom-most diff layer. -// It will just hold the node references from the given map which are safe to -// copy. -func (b *nodebuffer) commit(nodes map[common.Hash]map[string]*trienode.Node) *nodebuffer { - var ( - delta int64 - overwrite int64 - overwriteSize int64 - ) - for owner, subset := range nodes { - current, exist := b.nodes[owner] - if !exist { - // Allocate a new map for the subset instead of claiming it directly - // from the passed map to avoid potential concurrent map read/write. - // The nodes belong to original diff layer are still accessible even - // after merging, thus the ownership of nodes map should still belong - // to original layer and any mutation on it should be prevented. - current = make(map[string]*trienode.Node) - for path, n := range subset { - current[path] = n - delta += int64(len(n.Blob) + len(path)) - } - b.nodes[owner] = current - continue - } - for path, n := range subset { - if orig, exist := current[path]; !exist { - delta += int64(len(n.Blob) + len(path)) - } else { - delta += int64(len(n.Blob) - len(orig.Blob)) - overwrite++ - overwriteSize += int64(len(orig.Blob) + len(path)) - } - current[path] = n - } - b.nodes[owner] = current - } - b.updateSize(delta) - b.layers++ - gcNodesMeter.Mark(overwrite) - gcBytesMeter.Mark(overwriteSize) - return b -} - -// nolint: unused -// revert is the reverse operation of commit. It also merges the provided nodes -// into the nodebuffer, the difference is that the provided node set should -// revert the changes made by the last state transition. -func (b *nodebuffer) revert(db ethdb.KeyValueReader, nodes map[common.Hash]map[string]*trienode.Node) error { - // Short circuit if no embedded state transition to revert. - if b.layers == 0 { - return errStateUnrecoverable - } - b.layers-- - - // Reset the entire buffer if only a single transition left. - if b.layers == 0 { - b.reset() - return nil - } - var delta int64 - for owner, subset := range nodes { - current, ok := b.nodes[owner] - if !ok { - panic(fmt.Sprintf("non-existent subset (%x)", owner)) - } - for path, n := range subset { - orig, ok := current[path] - if !ok { - // There is a special case in MPT that one child is removed from - // a fullNode which only has two children, and then a new child - // with different position is immediately inserted into the fullNode. - // In this case, the clean child of the fullNode will also be - // marked as dirty because of node collapse and expansion. - // - // In case of database rollback, don't panic if this "clean" - // node occurs which is not present in buffer. - var nhash common.Hash - if owner == (common.Hash{}) { - _, nhash = rawdb.ReadAccountTrieNode(db, []byte(path)) - } else { - _, nhash = rawdb.ReadStorageTrieNode(db, owner, []byte(path)) - } - // Ignore the clean node in the case described above. - if nhash == n.Hash { - continue - } - panic(fmt.Sprintf("non-existent node (%x %v) blob: %v", owner, path, crypto.Keccak256Hash(n.Blob).Hex())) - } - current[path] = n - delta += int64(len(n.Blob)) - int64(len(orig.Blob)) - } - } - b.updateSize(delta) - return nil -} - -// updateSize updates the total cache size by the given delta. -func (b *nodebuffer) updateSize(delta int64) { - size := int64(b.size) + delta - if size >= 0 { - b.size = uint64(size) - return - } - s := b.size - b.size = 0 - log.Error("Invalid pathdb buffer size", "prev", common.StorageSize(s), "delta", common.StorageSize(delta)) -} - -// reset cleans up the disk cache. -func (b *nodebuffer) reset() { - b.layers = 0 - b.size = 0 - b.nodes = make(map[common.Hash]map[string]*trienode.Node) -} - -// nolint: unused -// empty returns an indicator if nodebuffer contains any state transition inside. -func (b *nodebuffer) empty() bool { - return b.layers == 0 -} - -// setSize sets the buffer size to the provided number, and invokes a flush -// operation if the current memory usage exceeds the new limit. -func (b *nodebuffer) setSize(size int, db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64) error { - b.limit = uint64(size) - return b.flush(db, clean, id, false) -} - -// flush persists the in-memory dirty trie node into the disk if the configured -// memory threshold is reached. Note, all data must be written atomically. -func (b *nodebuffer) flush(db ethdb.KeyValueStore, clean *fastcache.Cache, id uint64, force bool) error { - if b.size <= b.limit && !force { - return nil - } - // Ensure the target state id is aligned with the internal counter. - head := rawdb.ReadPersistentStateID(db) - if head+b.layers != id { - return fmt.Errorf("buffer layers (%d) cannot be applied on top of persisted state id (%d) to reach requested state id (%d)", b.layers, head, id) - } - var ( - start = time.Now() - batch = db.NewBatchWithSize(int(b.size)) - ) - nodes := writeNodes(batch, b.nodes, clean) - rawdb.WritePersistentStateID(batch, id) - - // Flush all mutations in a single batch - size := batch.ValueSize() - if err := batch.Write(); err != nil { - return err - } - commitBytesMeter.Mark(int64(size)) - commitNodesMeter.Mark(int64(nodes)) - commitTimeTimer.UpdateSince(start) - log.Debug("Persisted pathdb nodes", "nodes", len(b.nodes), "bytes", common.StorageSize(size), "elapsed", common.PrettyDuration(time.Since(start))) - b.reset() - return nil -} - -// writeNodes writes the trie nodes into the provided database batch. -// Note this function will also inject all the newly written nodes -// into clean cache. -func writeNodes(batch ethdb.Batch, nodes map[common.Hash]map[string]*trienode.Node, clean *fastcache.Cache) (total int) { - for owner, subset := range nodes { - for path, n := range subset { - if n.IsDeleted() { - if owner == (common.Hash{}) { - rawdb.DeleteAccountTrieNode(batch, []byte(path)) - } else { - rawdb.DeleteStorageTrieNode(batch, owner, []byte(path)) - } - if clean != nil { - clean.Del(cacheKey(owner, []byte(path))) - } - } else { - if owner == (common.Hash{}) { - rawdb.WriteAccountTrieNode(batch, []byte(path), n.Blob) - } else { - rawdb.WriteStorageTrieNode(batch, owner, []byte(path), n.Blob) - } - if clean != nil { - clean.Set(cacheKey(owner, []byte(path)), n.Blob) - } - } - } - total += len(subset) - } - return total -} - -// cacheKey constructs the unique key of clean cache. -func cacheKey(owner common.Hash, path []byte) []byte { - if owner == (common.Hash{}) { - return path - } - return append(owner.Bytes(), path...) -} diff --git a/trie/triedb/pathdb/testutils.go b/trie/triedb/pathdb/testutils.go deleted file mode 100644 index 71b845d2ad..0000000000 --- a/trie/triedb/pathdb/testutils.go +++ /dev/null @@ -1,166 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package pathdb - -import ( - "bytes" - "fmt" - - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ava-labs/subnet-evm/trie/triestate" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "golang.org/x/exp/slices" -) - -// testHasher is a test utility for computing root hash of a batch of state -// elements. The hash algorithm is to sort all the elements in lexicographical -// order, concat the key and value in turn, and perform hash calculation on -// the concatenated bytes. Except the root hash, a nodeset will be returned -// once Commit is called, which contains all the changes made to hasher. -type testHasher struct { - owner common.Hash // owner identifier - root common.Hash // original root - dirties map[common.Hash][]byte // dirty states - cleans map[common.Hash][]byte // clean states -} - -// newTestHasher constructs a hasher object with provided states. -func newTestHasher(owner common.Hash, root common.Hash, cleans map[common.Hash][]byte) (*testHasher, error) { - if cleans == nil { - cleans = make(map[common.Hash][]byte) - } - if got, _ := hash(cleans); got != root { - return nil, fmt.Errorf("state root mismatched, want: %x, got: %x", root, got) - } - return &testHasher{ - owner: owner, - root: root, - dirties: make(map[common.Hash][]byte), - cleans: cleans, - }, nil -} - -// Get returns the value for key stored in the trie. -func (h *testHasher) Get(key []byte) ([]byte, error) { - hash := common.BytesToHash(key) - val, ok := h.dirties[hash] - if ok { - return val, nil - } - return h.cleans[hash], nil -} - -// Update associates key with value in the trie. -func (h *testHasher) Update(key, value []byte) error { - h.dirties[common.BytesToHash(key)] = common.CopyBytes(value) - return nil -} - -// Delete removes any existing value for key from the trie. -func (h *testHasher) Delete(key []byte) error { - h.dirties[common.BytesToHash(key)] = nil - return nil -} - -// Commit computes the new hash of the states and returns the set with all -// state changes. -func (h *testHasher) Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) { - var ( - nodes = make(map[common.Hash][]byte) - set = trienode.NewNodeSet(h.owner) - ) - for hash, val := range h.cleans { - nodes[hash] = val - } - for hash, val := range h.dirties { - nodes[hash] = val - if bytes.Equal(val, h.cleans[hash]) { - continue - } - if len(val) == 0 { - set.AddNode(hash.Bytes(), trienode.NewDeleted()) - } else { - set.AddNode(hash.Bytes(), trienode.New(crypto.Keccak256Hash(val), val)) - } - } - root, blob := hash(nodes) - - // Include the dirty root node as well. - if root != types.EmptyRootHash && root != h.root { - set.AddNode(nil, trienode.New(root, blob)) - } - if root == types.EmptyRootHash && h.root != types.EmptyRootHash { - set.AddNode(nil, trienode.NewDeleted()) - } - return root, set -} - -// hash performs the hash computation upon the provided states. -func hash(states map[common.Hash][]byte) (common.Hash, []byte) { - var hs []common.Hash - for hash := range states { - hs = append(hs, hash) - } - slices.SortFunc(hs, common.Hash.Cmp) - - var input []byte - for _, hash := range hs { - if len(states[hash]) == 0 { - continue - } - input = append(input, hash.Bytes()...) - input = append(input, states[hash]...) - } - if len(input) == 0 { - return types.EmptyRootHash, nil - } - return crypto.Keccak256Hash(input), input -} - -type hashLoader struct { - accounts map[common.Hash][]byte - storages map[common.Hash]map[common.Hash][]byte -} - -func newHashLoader(accounts map[common.Hash][]byte, storages map[common.Hash]map[common.Hash][]byte) *hashLoader { - return &hashLoader{ - accounts: accounts, - storages: storages, - } -} - -// OpenTrie opens the main account trie. -func (l *hashLoader) OpenTrie(root common.Hash) (triestate.Trie, error) { - return newTestHasher(common.Hash{}, root, l.accounts) -} - -// OpenStorageTrie opens the storage trie of an account. -func (l *hashLoader) OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (triestate.Trie, error) { - return newTestHasher(addrHash, root, l.storages[addrHash]) -} diff --git a/trie/trienode/node.go b/trie/trienode/node.go index 98d5588b6d..8152eab6c0 100644 --- a/trie/trienode/node.go +++ b/trie/trienode/node.go @@ -25,8 +25,8 @@ import ( ) // Node is a wrapper which contains the encoded blob of the trie node and its -// node hash. It is general enough that can be used to represent trie node -// corresponding to different trie implementations. +// unique hash identifier. It is general enough that can be used to represent +// trie nodes corresponding to different trie implementations. type Node struct { Hash common.Hash // Node hash, empty for deleted node Blob []byte // Encoded node blob, nil for the deleted node @@ -42,13 +42,35 @@ func (n *Node) IsDeleted() bool { return n.Hash == (common.Hash{}) } +// WithPrev wraps the Node with the previous node value attached. +type WithPrev struct { + *Node + Prev []byte // Encoded original value, nil means it's non-existent +} + +// Unwrap returns the internal Node object. +func (n *WithPrev) Unwrap() *Node { + return n.Node +} + +// Size returns the total memory size used by this node. It overloads +// the function in Node by counting the size of previous value as well. +func (n *WithPrev) Size() int { + return n.Node.Size() + len(n.Prev) +} + // New constructs a node with provided node information. func New(hash common.Hash, blob []byte) *Node { return &Node{Hash: hash, Blob: blob} } -// NewDeleted constructs a node which is deleted. -func NewDeleted() *Node { return New(common.Hash{}, nil) } +// NewWithPrev constructs a node with provided node information. +func NewWithPrev(hash common.Hash, blob []byte, prev []byte) *WithPrev { + return &WithPrev{ + Node: New(hash, blob), + Prev: prev, + } +} // leaf represents a trie leaf node type leaf struct { @@ -61,7 +83,7 @@ type leaf struct { type NodeSet struct { Owner common.Hash Leaves []*leaf - Nodes map[string]*Node + Nodes map[string]*WithPrev updates int // the count of updated and inserted nodes deletes int // the count of deleted nodes } @@ -71,26 +93,26 @@ type NodeSet struct { func NewNodeSet(owner common.Hash) *NodeSet { return &NodeSet{ Owner: owner, - Nodes: make(map[string]*Node), + Nodes: make(map[string]*WithPrev), } } // ForEachWithOrder iterates the nodes with the order from bottom to top, // right to left, nodes with the longest path will be iterated first. func (set *NodeSet) ForEachWithOrder(callback func(path string, n *Node)) { - var paths []string + var paths sort.StringSlice for path := range set.Nodes { paths = append(paths, path) } - // Bottom-up, the longest path first - sort.Sort(sort.Reverse(sort.StringSlice(paths))) + // Bottom-up, longest path first + sort.Sort(sort.Reverse(paths)) for _, path := range paths { - callback(path, set.Nodes[path]) + callback(path, set.Nodes[path].Unwrap()) } } // AddNode adds the provided node into set. -func (set *NodeSet) AddNode(path []byte, n *Node) { +func (set *NodeSet) AddNode(path []byte, n *WithPrev) { if n.IsDeleted() { set.deletes += 1 } else { @@ -99,26 +121,6 @@ func (set *NodeSet) AddNode(path []byte, n *Node) { set.Nodes[string(path)] = n } -// Merge adds a set of nodes into the set. -func (set *NodeSet) Merge(owner common.Hash, nodes map[string]*Node) error { - if set.Owner != owner { - return fmt.Errorf("nodesets belong to different owner are not mergeable %x-%x", set.Owner, owner) - } - for path, node := range nodes { - prev, ok := set.Nodes[path] - if ok { - // overwrite happens, revoke the counter - if prev.IsDeleted() { - set.deletes -= 1 - } else { - set.updates -= 1 - } - } - set.AddNode([]byte(path), node) - } - return nil -} - // AddLeaf adds the provided leaf node into set. TODO(rjl493456442) how can // we get rid of it? func (set *NodeSet) AddLeaf(parent common.Hash, blob []byte) { @@ -148,11 +150,16 @@ func (set *NodeSet) Summary() string { for path, n := range set.Nodes { // Deletion if n.IsDeleted() { - fmt.Fprintf(out, " [-]: %x\n", path) + fmt.Fprintf(out, " [-]: %x prev: %x\n", path, n.Prev) continue } - // Insertion or update - fmt.Fprintf(out, " [+/*]: %x -> %v \n", path, n.Hash) + // Insertion + if len(n.Prev) == 0 { + fmt.Fprintf(out, " [+]: %x -> %v\n", path, n.Hash) + continue + } + // Update + fmt.Fprintf(out, " [*]: %x -> %v prev: %x\n", path, n.Hash, n.Prev) } } for _, n := range set.Leaves { @@ -181,19 +188,10 @@ func NewWithNodeSet(set *NodeSet) *MergedNodeSet { // Merge merges the provided dirty nodes of a trie into the set. The assumption // is held that no duplicated set belonging to the same trie will be merged twice. func (set *MergedNodeSet) Merge(other *NodeSet) error { - subset, present := set.Sets[other.Owner] + _, present := set.Sets[other.Owner] if present { - return subset.Merge(other.Owner, other.Nodes) + return fmt.Errorf("duplicate trie for owner %#x", other.Owner) } set.Sets[other.Owner] = other return nil } - -// Flatten returns a two-dimensional map for internal nodes. -func (set *MergedNodeSet) Flatten() map[common.Hash]map[string]*Node { - nodes := make(map[common.Hash]map[string]*Node) - for owner, set := range set.Sets { - nodes[owner] = set.Nodes - } - return nodes -} diff --git a/trie/triestate/state.go b/trie/triestate/state.go deleted file mode 100644 index 6504ac6518..0000000000 --- a/trie/triestate/state.go +++ /dev/null @@ -1,277 +0,0 @@ -// (c) 2024, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2023 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see - -package triestate - -import ( - "errors" - "fmt" - "sync" - - "github.com/ava-labs/subnet-evm/core/types" - "github.com/ava-labs/subnet-evm/trie/trienode" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" -) - -// Trie is an Ethereum state trie, can be implemented by Ethereum Merkle Patricia -// tree or Verkle tree. -type Trie interface { - // Get returns the value for key stored in the trie. - Get(key []byte) ([]byte, error) - - // Update associates key with value in the trie. - Update(key, value []byte) error - - // Delete removes any existing value for key from the trie. - Delete(key []byte) error - - // Commit the trie and returns a set of dirty nodes generated along with - // the new root hash. - Commit(collectLeaf bool) (common.Hash, *trienode.NodeSet) -} - -// TrieLoader wraps functions to load tries. -type TrieLoader interface { - // OpenTrie opens the main account trie. - OpenTrie(root common.Hash) (Trie, error) - - // OpenStorageTrie opens the storage trie of an account. - OpenStorageTrie(stateRoot common.Hash, addrHash, root common.Hash) (Trie, error) -} - -// Set represents a collection of mutated states during a state transition. -// The value refers to the original content of state before the transition -// is made. Nil means that the state was not present previously. -type Set struct { - Accounts map[common.Address][]byte // Mutated account set, nil means the account was not present - Storages map[common.Address]map[common.Hash][]byte // Mutated storage set, nil means the slot was not present - Incomplete map[common.Address]struct{} // Indicator whether the storage is incomplete due to large deletion - size common.StorageSize // Approximate size of set -} - -// New constructs the state set with provided data. -func New(accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, incomplete map[common.Address]struct{}) *Set { - return &Set{ - Accounts: accounts, - Storages: storages, - Incomplete: incomplete, - } -} - -// Size returns the approximate memory size occupied by the set. -func (s *Set) Size() common.StorageSize { - if s.size != 0 { - return s.size - } - for _, account := range s.Accounts { - s.size += common.StorageSize(common.AddressLength + len(account)) - } - for _, slots := range s.Storages { - for _, val := range slots { - s.size += common.StorageSize(common.HashLength + len(val)) - } - s.size += common.StorageSize(common.AddressLength) - } - s.size += common.StorageSize(common.AddressLength * len(s.Incomplete)) - return s.size -} - -// context wraps all fields for executing state diffs. -type context struct { - prevRoot common.Hash - postRoot common.Hash - accounts map[common.Address][]byte - storages map[common.Address]map[common.Hash][]byte - accountTrie Trie - nodes *trienode.MergedNodeSet -} - -// Apply traverses the provided state diffs, apply them in the associated -// post-state and return the generated dirty trie nodes. The state can be -// loaded via the provided trie loader. -func Apply(prevRoot common.Hash, postRoot common.Hash, accounts map[common.Address][]byte, storages map[common.Address]map[common.Hash][]byte, loader TrieLoader) (map[common.Hash]map[string]*trienode.Node, error) { - tr, err := loader.OpenTrie(postRoot) - if err != nil { - return nil, err - } - ctx := &context{ - prevRoot: prevRoot, - postRoot: postRoot, - accounts: accounts, - storages: storages, - accountTrie: tr, - nodes: trienode.NewMergedNodeSet(), - } - for addr, account := range accounts { - var err error - if len(account) == 0 { - err = deleteAccount(ctx, loader, addr) - } else { - err = updateAccount(ctx, loader, addr) - } - if err != nil { - return nil, fmt.Errorf("failed to revert state, err: %w", err) - } - } - root, result := tr.Commit(false) - if root != prevRoot { - return nil, fmt.Errorf("failed to revert state, want %#x, got %#x", prevRoot, root) - } - if err := ctx.nodes.Merge(result); err != nil { - return nil, err - } - return ctx.nodes.Flatten(), nil -} - -// updateAccount the account was present in prev-state, and may or may not -// existent in post-state. Apply the reverse diff and verify if the storage -// root matches the one in prev-state account. -func updateAccount(ctx *context, loader TrieLoader, addr common.Address) error { - // The account was present in prev-state, decode it from the - // 'slim-rlp' format bytes. - h := newHasher() - defer h.release() - - addrHash := h.hash(addr.Bytes()) - prev, err := types.FullAccount(ctx.accounts[addr]) - if err != nil { - return err - } - // The account may or may not existent in post-state, try to - // load it and decode if it's found. - blob, err := ctx.accountTrie.Get(addrHash.Bytes()) - if err != nil { - return err - } - post := types.NewEmptyStateAccount() - if len(blob) != 0 { - if err := rlp.DecodeBytes(blob, &post); err != nil { - return err - } - } - // Apply all storage changes into the post-state storage trie. - st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root) - if err != nil { - return err - } - for key, val := range ctx.storages[addr] { - var err error - if len(val) == 0 { - err = st.Delete(key.Bytes()) - } else { - err = st.Update(key.Bytes(), val) - } - if err != nil { - return err - } - } - root, result := st.Commit(false) - if root != prev.Root { - return errors.New("failed to reset storage trie") - } - // The returned set can be nil if storage trie is not changed - // at all. - if result != nil { - if err := ctx.nodes.Merge(result); err != nil { - return err - } - } - // Write the prev-state account into the main trie - full, err := rlp.EncodeToBytes(prev) - if err != nil { - return err - } - return ctx.accountTrie.Update(addrHash.Bytes(), full) -} - -// deleteAccount the account was not present in prev-state, and is expected -// to be existent in post-state. Apply the reverse diff and verify if the -// account and storage is wiped out correctly. -func deleteAccount(ctx *context, loader TrieLoader, addr common.Address) error { - // The account must be existent in post-state, load the account. - h := newHasher() - defer h.release() - - addrHash := h.hash(addr.Bytes()) - blob, err := ctx.accountTrie.Get(addrHash.Bytes()) - if err != nil { - return err - } - if len(blob) == 0 { - return fmt.Errorf("account is non-existent %#x", addrHash) - } - var post types.StateAccount - if err := rlp.DecodeBytes(blob, &post); err != nil { - return err - } - st, err := loader.OpenStorageTrie(ctx.postRoot, addrHash, post.Root) - if err != nil { - return err - } - for key, val := range ctx.storages[addr] { - if len(val) != 0 { - return errors.New("expect storage deletion") - } - if err := st.Delete(key.Bytes()); err != nil { - return err - } - } - root, result := st.Commit(false) - if root != types.EmptyRootHash { - return errors.New("failed to clear storage trie") - } - // The returned set can be nil if storage trie is not changed - // at all. - if result != nil { - if err := ctx.nodes.Merge(result); err != nil { - return err - } - } - // Delete the post-state account from the main trie. - return ctx.accountTrie.Delete(addrHash.Bytes()) -} - -// hasher is used to compute the sha256 hash of the provided data. -type hasher struct{ sha crypto.KeccakState } - -var hasherPool = sync.Pool{ - New: func() interface{} { return &hasher{sha: sha3.NewLegacyKeccak256().(crypto.KeccakState)} }, -} - -func newHasher() *hasher { - return hasherPool.Get().(*hasher) -} - -func (h *hasher) hash(data []byte) common.Hash { - return crypto.HashData(h.sha, data) -} - -func (h *hasher) release() { - hasherPool.Put(h) -} diff --git a/utils/metered_cache.go b/utils/metered_cache.go index d554dcb13a..17c86bdaa2 100644 --- a/utils/metered_cache.go +++ b/utils/metered_cache.go @@ -5,11 +5,15 @@ package utils import ( "fmt" + "os" + "path/filepath" "sync/atomic" "time" "github.com/VictoriaMetrics/fastcache" "github.com/ava-labs/subnet-evm/metrics" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" ) // MeteredCache wraps *fastcache.Cache and periodically pulls stats from it. @@ -31,15 +35,37 @@ type MeteredCache struct { updateFrequency uint64 } +func dirSize(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + return nil + }) + return size, err +} + // NewMeteredCache returns a new MeteredCache that will update stats to the // provided namespace once per each [updateFrequency] operations. // Note: if [updateFrequency] is passed as 0, it will be treated as 1. -func NewMeteredCache(size int, namespace string, updateFrequency uint64) *MeteredCache { +func NewMeteredCache(size int, journal string, namespace string, updateFrequency uint64) *MeteredCache { + var cache *fastcache.Cache + if journal == "" { + cache = fastcache.New(size) + } else { + dirSize, err := dirSize(journal) + log.Info("attempting to load cache from disk", "path", journal, "dirSize", common.StorageSize(dirSize), "err", err) + cache = fastcache.LoadFromFileOrNew(journal, size) + } if updateFrequency == 0 { updateFrequency = 1 // avoid division by zero } mc := &MeteredCache{ - Cache: fastcache.New(size), + Cache: cache, namespace: namespace, updateFrequency: updateFrequency, } diff --git a/warp/aggregator/mock_signature_getter.go b/warp/aggregator/mock_signature_getter.go index 537e3ae2e1..f00bb920fa 100644 --- a/warp/aggregator/mock_signature_getter.go +++ b/warp/aggregator/mock_signature_getter.go @@ -8,8 +8,8 @@ import ( context "context" reflect "reflect" - ids "github.com/ava-labs/avalanchego/ids" bls "github.com/ava-labs/avalanchego/utils/crypto/bls" + ids "github.com/ava-labs/avalanchego/ids" warp "github.com/ava-labs/avalanchego/vms/platformvm/warp" gomock "go.uber.org/mock/gomock" )