From 4f3022bcee6fc5f345044f0130daad6ca965623b Mon Sep 17 00:00:00 2001 From: Guillaume Ballet <3272758+gballet@users.noreply.github.com> Date: Thu, 30 Nov 2023 14:45:58 +0100 Subject: [PATCH] Refactor transition post genesis (#311) * rewrite per-block conversion pointer management * remove unused method * fix: a branch that can verge at genesis or post genesis (#314) * fix: import cycle in conversion refactor (#315) * fix shadowfork panic in OpenStorageTrie --- consensus/beacon/consensus.go | 7 + core/blockchain.go | 6 + core/chain_makers.go | 7 + core/genesis.go | 21 +- .../conversion.go} | 224 ++++++++++++++++-- core/state/database.go | 206 ++++++++++------ core/state_processor.go | 200 +--------------- light/trie.go | 30 ++- miner/worker.go | 3 - 9 files changed, 389 insertions(+), 315 deletions(-) rename core/{overlay_transition.go => overlay/conversion.go} (53%) diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go index 5eeddc47f07e..dd94b8da7790 100644 --- a/consensus/beacon/consensus.go +++ b/consensus/beacon/consensus.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core/overlay" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" @@ -363,6 +364,12 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types. state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeKeccakLeafKey) state.Witness().TouchAddressOnWriteAndComputeGas(w.Address[:], uint256.Int{}, utils.CodeSizeLeafKey) } + + if chain.Config().IsPrague(header.Number, header.Time) { + fmt.Println("at block", header.Number, "performing transition?", state.Database().InTransition()) + parent := chain.GetHeaderByHash(header.ParentHash) + overlay.OverlayVerkleTransition(state, parent.Root) + } } // FinalizeAndAssemble implements consensus.Engine, setting the final state and diff --git a/core/blockchain.go b/core/blockchain.go index ea521835a740..6dcd0e81fab3 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -313,6 +313,12 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis // Declare the end of the verkle transition if need be if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsPrague { + // TODO this only works when resuming a chain that has already gone + // through the conversion. All pointers should be saved to the DB + // for it to be able to recover if interrupted during the transition + // but that's left out to a later PR since there's not really a need + // right now. + bc.stateCache.InitTransitionStatus(true, true) bc.stateCache.EndVerkleTransition() } diff --git a/core/chain_makers.go b/core/chain_makers.go index 5b9dc0c6ff08..3909500d91fa 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -435,8 +435,15 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine } var snaps *snapshot.Tree triedb := state.NewDatabaseWithConfig(db, nil) + triedb.StartVerkleTransition(common.Hash{}, common.Hash{}, config, config.PragueTime, common.Hash{}) triedb.EndVerkleTransition() + //statedb, err := state.New(parent.Root(), triedb, snaps) + //if err != nil { + // panic(fmt.Sprintf("could not find state for block %d: err=%v, parent root=%x", parent.NumberU64(), err, parent.Root())) + //} + statedb.Database().SaveTransitionState(parent.Root()) for i := 0; i < n; i++ { + // XXX merge uncommment statedb, err := state.New(parent.Root(), triedb, snaps) if err != nil { panic(fmt.Sprintf("could not find state for block %d: err=%v, parent root=%x", i, err, parent.Root())) diff --git a/core/genesis.go b/core/genesis.go index c8a4bc5952d9..0aad87a10d70 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -126,6 +126,7 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c // all the derived states will be discarded to not pollute disk. db := state.NewDatabase(rawdb.NewMemoryDatabase()) if cfg.IsPrague(big.NewInt(int64(0)), timestamp) { + db.StartVerkleTransition(common.Hash{}, common.Hash{}, cfg, ×tamp, common.Hash{}) db.EndVerkleTransition() } statedb, err := state.New(types.EmptyRootHash, db, nil) @@ -146,15 +147,17 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c // flush is very similar with deriveHash, but the main difference is // all the generated states will be persisted into the given database. // Also, the genesis state specification will be flushed as well. -func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash, cfg *params.ChainConfig) error { - statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil) - if err != nil { - return err +func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash, cfg *params.ChainConfig, timestamp *uint64) error { + database := state.NewDatabaseWithNodeDB(db, triedb) + // End the verkle conversion at genesis if the fork block is 0 + if timestamp != nil && cfg.IsPrague(big.NewInt(int64(0)), *timestamp) { + database.StartVerkleTransition(common.Hash{}, common.Hash{}, cfg, timestamp, common.Hash{}) + database.EndVerkleTransition() } - // End the verkle conversion at genesis if the fork block is 0 - if triedb.IsVerkle() { - statedb.Database().EndVerkleTransition() + statedb, err := state.New(types.EmptyRootHash, database, nil) + if err != nil { + return err } for addr, account := range *ga { @@ -221,7 +224,7 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash comm return errors.New("not found") } } - return alloc.flush(db, triedb, blockhash, config) + return alloc.flush(db, triedb, blockhash, config, nil) } // GenesisAccount is an account in the state of the genesis block. @@ -536,7 +539,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block // All the checks has passed, flush the states derived from the genesis // specification as well as the specification itself into the provided // database. - if err := g.Alloc.flush(db, triedb, block.Hash(), g.Config); err != nil { + if err := g.Alloc.flush(db, triedb, block.Hash(), g.Config, &g.Timestamp); err != nil { return nil, err } rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty()) diff --git a/core/overlay_transition.go b/core/overlay/conversion.go similarity index 53% rename from core/overlay_transition.go rename to core/overlay/conversion.go index 24bb7d5e6c02..e76aa5900173 100644 --- a/core/overlay_transition.go +++ b/core/overlay/conversion.go @@ -14,14 +14,17 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package core +package overlay import ( "bufio" "bytes" + "encoding/binary" "fmt" "io" "os" + "runtime" + "sync" "time" "github.com/ethereum/go-ethereum/common" @@ -32,8 +35,187 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/ethereum/go-ethereum/trie/utils" + "github.com/gballet/go-verkle" + "github.com/holiman/uint256" ) +var zeroTreeIndex uint256.Int + +// keyValueMigrator is a helper module that collects key-values from the overlay-tree migration for Verkle Trees. +// It assumes that the walk of the base tree is done in address-order, so it exploit that fact to +// collect the key-values in a way that is efficient. +type keyValueMigrator struct { + // leafData contains the values for the future leaf for a particular VKT branch. + leafData []migratedKeyValue + + // When prepare() is called, it will start a background routine that will process the leafData + // saving the result in newLeaves to be used by migrateCollectedKeyValues(). The background + // routine signals that it is done by closing processingReady. + processingReady chan struct{} + newLeaves []verkle.LeafNode + prepareErr error +} + +func newKeyValueMigrator() *keyValueMigrator { + // We do initialize the VKT config since prepare() might indirectly make multiple GetConfig() calls + // in different goroutines when we never called GetConfig() before, causing a race considering the way + // that `config` is designed in go-verkle. + // TODO: jsign as a fix for this in the PR where we move to a file-less precomp, since it allows safe + // concurrent calls to GetConfig(). When that gets merged, we can remove this line. + _ = verkle.GetConfig() + return &keyValueMigrator{ + processingReady: make(chan struct{}), + leafData: make([]migratedKeyValue, 0, 10_000), + } +} + +type migratedKeyValue struct { + branchKey branchKey + leafNodeData verkle.BatchNewLeafNodeData +} +type branchKey struct { + addr common.Address + treeIndex uint256.Int +} + +func newBranchKey(addr []byte, treeIndex *uint256.Int) branchKey { + var sk branchKey + copy(sk.addr[:], addr) + sk.treeIndex = *treeIndex + return sk +} + +func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { + treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(slotNumber) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) + leafNodeData.Values[subIndex] = slotValue +} + +func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) + + var version [verkle.LeafValueSize]byte + leafNodeData.Values[utils.VersionLeafKey] = version[:] + + var balance [verkle.LeafValueSize]byte + for i, b := range acc.Balance.Bytes() { + balance[len(acc.Balance.Bytes())-1-i] = b + } + leafNodeData.Values[utils.BalanceLeafKey] = balance[:] + + var nonce [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) + leafNodeData.Values[utils.NonceLeafKey] = nonce[:] + + leafNodeData.Values[utils.CodeKeccakLeafKey] = acc.CodeHash[:] +} + +func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) + + // Save the code size. + var codeSizeBytes [verkle.LeafValueSize]byte + binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize) + leafNodeData.Values[utils.CodeSizeLeafKey] = codeSizeBytes[:] + + // The first 128 chunks are stored in the account header leaf. + for i := 0; i < 128 && i < len(chunks)/32; i++ { + leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)] + } + + // Potential further chunks, have their own leaf nodes. + for i := 128; i < len(chunks)/32; { + treeIndex, _ := utils.GetTreeKeyCodeChunkIndices(uint256.NewInt(uint64(i))) + leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) + + j := i + for ; (j-i) < 256 && j < len(chunks)/32; j++ { + leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)] + } + i = j + } +} + +func (kvm *keyValueMigrator) getOrInitLeafNodeData(bk branchKey) *verkle.BatchNewLeafNodeData { + // Remember that keyValueMigration receives actions ordered by (address, subtreeIndex). + // This means that we can assume that the last element of leafData is the one that we + // are looking for, or that we need to create a new one. + if len(kvm.leafData) == 0 || kvm.leafData[len(kvm.leafData)-1].branchKey != bk { + kvm.leafData = append(kvm.leafData, migratedKeyValue{ + branchKey: bk, + leafNodeData: verkle.BatchNewLeafNodeData{ + Stem: nil, // It will be calculated in the prepare() phase, since it's CPU heavy. + Values: make(map[byte][]byte), + }, + }) + } + return &kvm.leafData[len(kvm.leafData)-1].leafNodeData +} + +func (kvm *keyValueMigrator) prepare() { + // We fire a background routine to process the leafData and save the result in newLeaves. + // The background routine signals that it is done by closing processingReady. + go func() { + // Step 1: We split kvm.leafData in numBatches batches, and we process each batch in a separate goroutine. + // This fills each leafNodeData.Stem with the correct value. + var wg sync.WaitGroup + batchNum := runtime.NumCPU() + batchSize := (len(kvm.leafData) + batchNum - 1) / batchNum + for i := 0; i < len(kvm.leafData); i += batchSize { + start := i + end := i + batchSize + if end > len(kvm.leafData) { + end = len(kvm.leafData) + } + wg.Add(1) + + batch := kvm.leafData[start:end] + go func() { + defer wg.Done() + var currAddr common.Address + var currPoint *verkle.Point + for i := range batch { + if batch[i].branchKey.addr != currAddr || currAddr == (common.Address{}) { + currAddr = batch[i].branchKey.addr + currPoint = utils.EvaluateAddressPoint(currAddr[:]) + } + stem := utils.GetTreeKeyWithEvaluatedAddess(currPoint, &batch[i].branchKey.treeIndex, 0) + stem = stem[:verkle.StemSize] + batch[i].leafNodeData.Stem = stem + } + }() + } + wg.Wait() + + // Step 2: Now that we have all stems (i.e: tree keys) calculated, we can create the new leaves. + nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData)) + for i := range kvm.leafData { + nodeValues[i] = kvm.leafData[i].leafNodeData + } + + // Create all leaves in batch mode so we can optimize cryptography operations. + kvm.newLeaves, kvm.prepareErr = verkle.BatchNewLeafNode(nodeValues) + close(kvm.processingReady) + }() +} + +func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error { + now := time.Now() + <-kvm.processingReady + if kvm.prepareErr != nil { + return fmt.Errorf("failed to prepare key values: %w", kvm.prepareErr) + } + log.Info("Prepared key values from base tree", "duration", time.Since(now)) + + // Insert into the tree. + if err := tree.InsertMigratedLeaves(kvm.newLeaves); err != nil { + return fmt.Errorf("failed to insert migrated leaves: %w", err) + } + + return nil +} + // OverlayVerkleTransition contains the overlay conversion logic func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { migrdb := statedb.Database() @@ -47,7 +229,7 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { mpt = tt.Base() vkt = tt.Overlay() hasPreimagesBin = false - preimageSeek = migrdb.GetCurrentPreimageOffset(root) + preimageSeek = migrdb.GetCurrentPreimageOffset() fpreimages *bufio.Reader ) @@ -65,7 +247,7 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { hasPreimagesBin = true } - accIt, err := statedb.Snaps().AccountIterator(mpt.Hash(), migrdb.GetCurrentAccountHash(root)) + accIt, err := statedb.Snaps().AccountIterator(mpt.Hash(), migrdb.GetCurrentAccountHash()) if err != nil { return err } @@ -73,7 +255,7 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { accIt.Next() // If we're about to start with the migration process, we have to read the first account hash preimage. - if migrdb.GetCurrentAccountAddress(root) == nil { + if migrdb.GetCurrentAccountAddress() == nil { var addr common.Address if hasPreimagesBin { if _, err := io.ReadFull(fpreimages, addr[:]); err != nil { @@ -85,8 +267,8 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { return fmt.Errorf("addr len is zero is not 32: %d", len(addr)) } } - migrdb.SetCurrentAccountAddress(addr, root) - if migrdb.GetCurrentAccountHash(root) != accIt.Hash() { + migrdb.SetCurrentAccountAddress(addr) + if migrdb.GetCurrentAccountHash() != accIt.Hash() { return fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) } preimageSeek += int64(len(addr)) @@ -108,7 +290,7 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { log.Error("Invalid account encountered during traversal", "error", err) return err } - vkt.SetStorageRootConversion(*migrdb.GetCurrentAccountAddress(root), acc.Root) + vkt.SetStorageRootConversion(*migrdb.GetCurrentAccountAddress(), acc.Root) // Start with processing the storage, because once the account is // converted, the `stateRoot` field loses its meaning. Which means @@ -120,7 +302,7 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { // to during normal block execution. A mitigation strategy has been // introduced with the `*StorageRootConversion` fields in VerkleDB. if acc.HasStorage() { - stIt, err := statedb.Snaps().StorageIterator(mpt.Hash(), accIt.Hash(), migrdb.GetCurrentSlotHash(root)) + stIt, err := statedb.Snaps().StorageIterator(mpt.Hash(), accIt.Hash(), migrdb.GetCurrentSlotHash()) if err != nil { return err } @@ -132,7 +314,7 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { // processing the storage for that account where we left off. // If the entire storage was processed, then the iterator was // created in vain, but it's ok as this will not happen often. - for ; !migrdb.GetStorageProcessed(root) && count < maxMovedCount; count++ { + for ; !migrdb.GetStorageProcessed() && count < maxMovedCount; count++ { var ( value []byte // slot value after RLP decoding safeValue [32]byte // 32-byte aligned value @@ -160,12 +342,12 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { } preimageSeek += int64(len(slotnr)) - mkv.addStorageSlot(migrdb.GetCurrentAccountAddress(root).Bytes(), slotnr, safeValue[:]) + mkv.addStorageSlot(migrdb.GetCurrentAccountAddress().Bytes(), slotnr, safeValue[:]) // advance the storage iterator - migrdb.SetStorageProcessed(!stIt.Next(), root) - if !migrdb.GetStorageProcessed(root) { - migrdb.SetCurrentSlotHash(stIt.Hash(), root) + migrdb.SetStorageProcessed(!stIt.Next()) + if !migrdb.GetStorageProcessed() { + migrdb.SetCurrentSlotHash(stIt.Hash()) } } stIt.Release() @@ -178,20 +360,20 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { if count < maxMovedCount { count++ // count increase for the account itself - mkv.addAccount(migrdb.GetCurrentAccountAddress(root).Bytes(), acc) - vkt.ClearStrorageRootConversion(*migrdb.GetCurrentAccountAddress(root)) + mkv.addAccount(migrdb.GetCurrentAccountAddress().Bytes(), acc) + vkt.ClearStrorageRootConversion(*migrdb.GetCurrentAccountAddress()) // Store the account code if present if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { code := rawdb.ReadCode(statedb.Database().DiskDB(), common.BytesToHash(acc.CodeHash)) chunks := trie.ChunkifyCode(code) - mkv.addAccountCode(migrdb.GetCurrentAccountAddress(root).Bytes(), uint64(len(code)), chunks) + mkv.addAccountCode(migrdb.GetCurrentAccountAddress().Bytes(), uint64(len(code)), chunks) } // reset storage iterator marker for next account - migrdb.SetStorageProcessed(false, root) - migrdb.SetCurrentSlotHash(common.Hash{}, root) + migrdb.SetStorageProcessed(false) + migrdb.SetCurrentSlotHash(common.Hash{}) // Move to the next account, if available - or end // the transition otherwise. @@ -212,7 +394,7 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { return fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash()) } preimageSeek += int64(len(addr)) - migrdb.SetCurrentAccountAddress(addr, root) + migrdb.SetCurrentAccountAddress(addr) } else { // case when the account iterator has // reached the end but count < maxCount @@ -221,9 +403,9 @@ func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash) error { } } } - migrdb.SetCurrentPreimageOffset(preimageSeek, root) + migrdb.SetCurrentPreimageOffset(preimageSeek) - log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash(root)) + log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash()) // Take all the collected key-values and prepare the new leaf values. // This fires a background routine that will start doing the work that diff --git a/core/state/database.go b/core/state/database.go index cd9f8dcf43f1..fcc41ba95927 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/trie/trienode" @@ -74,27 +75,33 @@ type Database interface { Transitioned() bool - SetCurrentSlotHash(common.Hash, common.Hash) + InitTransitionStatus(bool, bool) - GetCurrentAccountAddress(common.Hash) *common.Address + SetCurrentSlotHash(common.Hash) - SetCurrentAccountAddress(common.Address, common.Hash) + GetCurrentAccountAddress() *common.Address - GetCurrentAccountHash(common.Hash) common.Hash + SetCurrentAccountAddress(common.Address) - GetCurrentSlotHash(common.Hash) common.Hash + GetCurrentAccountHash() common.Hash - SetStorageProcessed(bool, common.Hash) + GetCurrentSlotHash() common.Hash - GetStorageProcessed(common.Hash) bool + SetStorageProcessed(bool) - GetCurrentPreimageOffset(common.Hash) int64 + GetStorageProcessed() bool - SetCurrentPreimageOffset(int64, common.Hash) + GetCurrentPreimageOffset() int64 + + SetCurrentPreimageOffset(int64) AddRootTranslation(originalRoot, translatedRoot common.Hash) SetLastMerkleRoot(common.Hash) + + SaveTransitionState(common.Hash) + + LoadTransitionState(common.Hash) } // Trie is a Ethereum Merkle Patricia trie. @@ -182,40 +189,31 @@ func NewDatabase(db ethdb.Database) Database { // large memory cache. func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { return &cachingDB{ - disk: db, - codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: trie.NewDatabaseWithConfig(db, config), - addrToPoint: utils.NewPointCache(), - StorageProcessed: map[common.Hash]bool{}, - CurrentAccountAddress: map[common.Hash]*common.Address{}, - CurrentSlotHash: map[common.Hash]common.Hash{}, - CurrentPreimageOffset: map[common.Hash]int64{}, + disk: db, + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), + triedb: trie.NewDatabaseWithConfig(db, config), + addrToPoint: utils.NewPointCache(), } } // NewDatabaseWithNodeDB creates a state database with an already initialized node database. func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database { return &cachingDB{ - disk: db, - codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), - codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), - triedb: triedb, - addrToPoint: utils.NewPointCache(), - ended: triedb.IsVerkle(), - StorageProcessed: map[common.Hash]bool{}, - CurrentAccountAddress: map[common.Hash]*common.Address{}, - CurrentSlotHash: map[common.Hash]common.Hash{}, - CurrentPreimageOffset: map[common.Hash]int64{}, + disk: db, + codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize), + codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize), + triedb: triedb, + addrToPoint: utils.NewPointCache(), } } func (db *cachingDB) InTransition() bool { - return db.started && !db.ended + return db.CurrentTransitionState != nil && db.CurrentTransitionState.started && !db.CurrentTransitionState.ended } func (db *cachingDB) Transitioned() bool { - return db.ended + return db.CurrentTransitionState != nil && db.CurrentTransitionState.ended } // Fork implements the fork @@ -227,29 +225,35 @@ func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.H | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ \ /| | | \\___ /\___ \ |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ \/\_/ |__|___| /_____//_____/ |__|`) - db.started = true - db.ended = false + db.CurrentTransitionState = &TransitionState{ + started: true, + // initialize so that the first storage-less accounts are processed + StorageProcessed: true, + } // db.AddTranslation(originalRoot, translatedRoot) db.baseRoot = originalRoot - // initialize so that the first storage-less accounts are processed - db.StorageProcessed[root] = true // Reinitialize values in case of a reorg - db.CurrentAccountAddress[root] = &(common.Address{}) - db.CurrentSlotHash[root] = common.Hash{} - db.CurrentPreimageOffset[root] = 0 if pragueTime != nil { chainConfig.PragueTime = pragueTime } } func (db *cachingDB) ReorgThroughVerkleTransition() { - db.ended, db.started = false, false + log.Warn("trying to reorg through the transition, which makes no sense at this point") +} + +func (db *cachingDB) InitTransitionStatus(started, ended bool) { + db.CurrentTransitionState = &TransitionState{ + ended: ended, + started: started, + // TODO add other fields when we handle mid-transition interrupts + } } func (db *cachingDB) EndVerkleTransition() { - if !db.started { - db.started = true + if !db.CurrentTransitionState.started { + db.CurrentTransitionState.started = true } fmt.Println(` @@ -259,7 +263,35 @@ func (db *cachingDB) EndVerkleTransition() { | | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ | |__/ __ \| | / /_/ \ ___// /_/ | |____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ |____(____ |___| \____ |\___ \____ | |__|`) - db.ended = true + db.CurrentTransitionState.ended = true +} + +type TransitionState struct { + CurrentAccountAddress *common.Address // addresss of the last translated account + CurrentSlotHash common.Hash // hash of the last translated storage slot + CurrentPreimageOffset int64 // next byte to read from the preimage file + started, ended bool + + // Mark whether the storage for an account has been processed. This is useful if the + // maximum number of leaves of the conversion is reached before the whole storage is + // processed. + StorageProcessed bool +} + +func (ts *TransitionState) Copy() *TransitionState { + ret := &TransitionState{ + started: ts.started, + ended: ts.ended, + CurrentSlotHash: ts.CurrentSlotHash, + CurrentPreimageOffset: ts.CurrentPreimageOffset, + } + + if ts.CurrentAccountAddress != nil { + ret.CurrentAccountAddress = &common.Address{} + copy(ret.CurrentAccountAddress[:], ts.CurrentAccountAddress[:]) + } + + return ret } type cachingDB struct { @@ -268,22 +300,16 @@ type cachingDB struct { codeCache *lru.SizeConstrainedCache[common.Hash, []byte] triedb *trie.Database - // Verkle specific fields + // Transition-specific fields // TODO ensure that this info is in the DB - started, ended bool - LastMerkleRoot common.Hash // root hash of the read-only base tree + LastMerkleRoot common.Hash // root hash of the read-only base tree + CurrentTransitionState *TransitionState + TransitionStatePerRoot map[common.Hash]*TransitionState addrToPoint *utils.PointCache - baseRoot common.Hash // hash of the read-only base tree - CurrentAccountAddress map[common.Hash]*common.Address // addresss of the last translated account - CurrentSlotHash map[common.Hash]common.Hash // hash of the last translated storage slot - CurrentPreimageOffset map[common.Hash]int64 // next byte to read from the preimage file + baseRoot common.Hash // hash of the read-only base tree - // Mark whether the storage for an account has been processed. This is useful if the - // maximum number of leaves of the conversion is reached before the whole storage is - // processed. - StorageProcessed map[common.Hash]bool } func (db *cachingDB) openMPTTrie(root common.Hash) (Trie, error) { @@ -297,14 +323,14 @@ func (db *cachingDB) openMPTTrie(root common.Hash) (Trie, error) { func (db *cachingDB) openVKTrie(root common.Hash) (Trie, error) { payload, err := db.DiskDB().Get(trie.FlatDBVerkleNodeKeyPrefix) if err != nil { - return trie.NewVerkleTrie(verkle.New(), db.triedb, db.addrToPoint, db.ended), nil + return trie.NewVerkleTrie(verkle.New(), db.triedb, db.addrToPoint, db.CurrentTransitionState.ended), nil } r, err := verkle.ParseNode(payload, 0) if err != nil { panic(err) } - return trie.NewVerkleTrie(r, db.triedb, db.addrToPoint, db.ended), err + return trie.NewVerkleTrie(r, db.triedb, db.addrToPoint, db.CurrentTransitionState.ended), err } // OpenTrie opens the main account trie at a specific root hash. @@ -316,7 +342,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { // TODO separate both cases when I can be certain that it won't // find a Verkle trie where is expects a Transitoion trie. - if db.started || db.ended { + if db.CurrentTransitionState != nil && (db.CurrentTransitionState.started || db.CurrentTransitionState.ended) { // NOTE this is a kaustinen-only change, it will break replay vkt, err := db.openVKTrie(root) if err != nil { @@ -325,7 +351,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { // If the verkle conversion has ended, return a single // verkle trie. - if db.ended { + if db.CurrentTransitionState.ended { return vkt, nil } @@ -358,7 +384,7 @@ func (db *cachingDB) openStorageMPTrie(stateRoot common.Hash, address common.Add // OpenStorageTrie opens the storage trie of an account func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) { // TODO this should only return a verkle tree - if db.ended { + if db.Transitioned() { mpt, err := db.openStorageMPTrie(types.EmptyRootHash, address, common.Hash{}, self) if err != nil { return nil, err @@ -374,7 +400,7 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre panic("unexpected trie type") } } - if db.started { + if db.InTransition() { mpt, err := db.openStorageMPTrie(db.LastMerkleRoot, address, root, nil) if err != nil { return nil, err @@ -463,44 +489,44 @@ func (db *cachingDB) GetTreeKeyHeader(addr []byte) *verkle.Point { return db.addrToPoint.GetTreeKeyHeader(addr) } -func (db *cachingDB) SetCurrentAccountAddress(addr common.Address, root common.Hash) { - db.CurrentAccountAddress[root] = &addr +func (db *cachingDB) SetCurrentAccountAddress(addr common.Address) { + db.CurrentTransitionState.CurrentAccountAddress = &addr } -func (db *cachingDB) GetCurrentAccountHash(root common.Hash) common.Hash { +func (db *cachingDB) GetCurrentAccountHash() common.Hash { var addrHash common.Hash - if db.CurrentAccountAddress[root] != nil { - addrHash = crypto.Keccak256Hash(db.CurrentAccountAddress[root][:]) + if db.CurrentTransitionState.CurrentAccountAddress != nil { + addrHash = crypto.Keccak256Hash(db.CurrentTransitionState.CurrentAccountAddress[:]) } return addrHash } -func (db *cachingDB) GetCurrentAccountAddress(root common.Hash) *common.Address { - return db.CurrentAccountAddress[root] +func (db *cachingDB) GetCurrentAccountAddress() *common.Address { + return db.CurrentTransitionState.CurrentAccountAddress } -func (db *cachingDB) GetCurrentPreimageOffset(root common.Hash) int64 { - return db.CurrentPreimageOffset[root] +func (db *cachingDB) GetCurrentPreimageOffset() int64 { + return db.CurrentTransitionState.CurrentPreimageOffset } -func (db *cachingDB) SetCurrentPreimageOffset(offset int64, root common.Hash) { - db.CurrentPreimageOffset[root] = offset +func (db *cachingDB) SetCurrentPreimageOffset(offset int64) { + db.CurrentTransitionState.CurrentPreimageOffset = offset } -func (db *cachingDB) SetCurrentSlotHash(hash common.Hash, root common.Hash) { - db.CurrentSlotHash[root] = hash +func (db *cachingDB) SetCurrentSlotHash(hash common.Hash) { + db.CurrentTransitionState.CurrentSlotHash = hash } -func (db *cachingDB) GetCurrentSlotHash(root common.Hash) common.Hash { - return db.CurrentSlotHash[root] +func (db *cachingDB) GetCurrentSlotHash() common.Hash { + return db.CurrentTransitionState.CurrentSlotHash } -func (db *cachingDB) SetStorageProcessed(processed bool, root common.Hash) { - db.StorageProcessed[root] = processed +func (db *cachingDB) SetStorageProcessed(processed bool) { + db.CurrentTransitionState.StorageProcessed = processed } -func (db *cachingDB) GetStorageProcessed(root common.Hash) bool { - return db.StorageProcessed[root] +func (db *cachingDB) GetStorageProcessed() bool { + return db.CurrentTransitionState.StorageProcessed } func (db *cachingDB) AddRootTranslation(originalRoot, translatedRoot common.Hash) { @@ -509,3 +535,29 @@ func (db *cachingDB) AddRootTranslation(originalRoot, translatedRoot common.Hash func (db *cachingDB) SetLastMerkleRoot(merkleRoot common.Hash) { db.LastMerkleRoot = merkleRoot } + +func (db *cachingDB) SaveTransitionState(root common.Hash) { + if db.TransitionStatePerRoot == nil { + db.TransitionStatePerRoot = make(map[common.Hash]*TransitionState) + } + + db.TransitionStatePerRoot[root] = db.CurrentTransitionState +} + +func (db *cachingDB) LoadTransitionState(root common.Hash) { + if db.TransitionStatePerRoot == nil { + db.TransitionStatePerRoot = make(map[common.Hash]*TransitionState) + } + + ts, ok := db.TransitionStatePerRoot[root] + if !ok || ts == nil { + // Start with a fresh state + ts = &TransitionState{ended: db.triedb.IsVerkle()} + } + + db.CurrentTransitionState = ts.Copy() + + if db.CurrentTransitionState != nil { + fmt.Println("address", db.CurrentTransitionState.CurrentAccountAddress) + } +} diff --git a/core/state_processor.go b/core/state_processor.go index 0842336f7a02..9ccdf51e3ef8 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -21,9 +21,6 @@ import ( "errors" "fmt" "math/big" - "runtime" - "sync" - "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" @@ -34,11 +31,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/trie" - "github.com/ethereum/go-ethereum/trie/utils" - tutils "github.com/ethereum/go-ethereum/trie/utils" - "github.com/ethereum/go-verkle" - "github.com/holiman/uint256" ) // StateProcessor is a basic Processor, which takes care of transitioning @@ -115,10 +107,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg } // Perform the overlay transition, if relevant - parent := p.bc.GetHeaderByHash(header.ParentHash) - if err := OverlayVerkleTransition(statedb, parent.Root); err != nil { - return nil, nil, 0, fmt.Errorf("error performing verkle overlay transition: %w", err) - } + //parent := p.bc.GetHeaderByHash(header.ParentHash) + //if err := OverlayVerkleTransition(statedb, parent.Root); err != nil { + // return nil, nil, 0, fmt.Errorf("error performing verkle overlay transition: %w", err) + //} // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals) @@ -194,190 +186,6 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv) } -var zeroTreeIndex uint256.Int - -// keyValueMigrator is a helper module that collects key-values from the overlay-tree migration for Verkle Trees. -// It assumes that the walk of the base tree is done in address-order, so it exploit that fact to -// collect the key-values in a way that is efficient. -type keyValueMigrator struct { - // leafData contains the values for the future leaf for a particular VKT branch. - leafData []migratedKeyValue - - // When prepare() is called, it will start a background routine that will process the leafData - // saving the result in newLeaves to be used by migrateCollectedKeyValues(). The background - // routine signals that it is done by closing processingReady. - processingReady chan struct{} - newLeaves []verkle.LeafNode - prepareErr error -} - -func newKeyValueMigrator() *keyValueMigrator { - // We do initialize the VKT config since prepare() might indirectly make multiple GetConfig() calls - // in different goroutines when we never called GetConfig() before, causing a race considering the way - // that `config` is designed in go-verkle. - // TODO: jsign as a fix for this in the PR where we move to a file-less precomp, since it allows safe - // concurrent calls to GetConfig(). When that gets merged, we can remove this line. - _ = verkle.GetConfig() - return &keyValueMigrator{ - processingReady: make(chan struct{}), - leafData: make([]migratedKeyValue, 0, 10_000), - } -} - -type migratedKeyValue struct { - branchKey branchKey - leafNodeData verkle.BatchNewLeafNodeData -} -type branchKey struct { - addr common.Address - treeIndex uint256.Int -} - -func newBranchKey(addr []byte, treeIndex *uint256.Int) branchKey { - var sk branchKey - copy(sk.addr[:], addr) - sk.treeIndex = *treeIndex - return sk -} - -func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) { - treeIndex, subIndex := tutils.GetTreeKeyStorageSlotTreeIndexes(slotNumber) - leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) - leafNodeData.Values[subIndex] = slotValue -} - -func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) { - leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) - - var version [verkle.LeafValueSize]byte - leafNodeData.Values[tutils.VersionLeafKey] = version[:] - - var balance [verkle.LeafValueSize]byte - for i, b := range acc.Balance.Bytes() { - balance[len(acc.Balance.Bytes())-1-i] = b - } - leafNodeData.Values[tutils.BalanceLeafKey] = balance[:] - - var nonce [verkle.LeafValueSize]byte - binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce) - leafNodeData.Values[tutils.NonceLeafKey] = nonce[:] - - leafNodeData.Values[tutils.CodeKeccakLeafKey] = acc.CodeHash[:] -} - -func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) { - leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex)) - - // Save the code size. - var codeSizeBytes [verkle.LeafValueSize]byte - binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize) - leafNodeData.Values[tutils.CodeSizeLeafKey] = codeSizeBytes[:] - - // The first 128 chunks are stored in the account header leaf. - for i := 0; i < 128 && i < len(chunks)/32; i++ { - leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)] - } - - // Potential further chunks, have their own leaf nodes. - for i := 128; i < len(chunks)/32; { - treeIndex, _ := tutils.GetTreeKeyCodeChunkIndices(uint256.NewInt(uint64(i))) - leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex)) - - j := i - for ; (j-i) < 256 && j < len(chunks)/32; j++ { - leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)] - } - i = j - } -} - -func (kvm *keyValueMigrator) getOrInitLeafNodeData(bk branchKey) *verkle.BatchNewLeafNodeData { - // Remember that keyValueMigration receives actions ordered by (address, subtreeIndex). - // This means that we can assume that the last element of leafData is the one that we - // are looking for, or that we need to create a new one. - if len(kvm.leafData) == 0 || kvm.leafData[len(kvm.leafData)-1].branchKey != bk { - kvm.leafData = append(kvm.leafData, migratedKeyValue{ - branchKey: bk, - leafNodeData: verkle.BatchNewLeafNodeData{ - Stem: nil, // It will be calculated in the prepare() phase, since it's CPU heavy. - Values: make(map[byte][]byte), - }, - }) - } - return &kvm.leafData[len(kvm.leafData)-1].leafNodeData -} - -func (kvm *keyValueMigrator) prepare() { - // We fire a background routine to process the leafData and save the result in newLeaves. - // The background routine signals that it is done by closing processingReady. - go func() { - // Step 1: We split kvm.leafData in numBatches batches, and we process each batch in a separate goroutine. - // This fills each leafNodeData.Stem with the correct value. - var wg sync.WaitGroup - batchNum := runtime.NumCPU() - batchSize := (len(kvm.leafData) + batchNum - 1) / batchNum - for i := 0; i < len(kvm.leafData); i += batchSize { - start := i - end := i + batchSize - if end > len(kvm.leafData) { - end = len(kvm.leafData) - } - wg.Add(1) - - batch := kvm.leafData[start:end] - go func() { - defer wg.Done() - var currAddr common.Address - var currPoint *verkle.Point - for i := range batch { - if batch[i].branchKey.addr != currAddr || currAddr == (common.Address{}) { - currAddr = batch[i].branchKey.addr - currPoint = tutils.EvaluateAddressPoint(currAddr[:]) - } - stem := tutils.GetTreeKeyWithEvaluatedAddess(currPoint, &batch[i].branchKey.treeIndex, 0) - stem = stem[:verkle.StemSize] - batch[i].leafNodeData.Stem = stem - } - }() - } - wg.Wait() - - // Step 2: Now that we have all stems (i.e: tree keys) calculated, we can create the new leaves. - nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData)) - for i := range kvm.leafData { - nodeValues[i] = kvm.leafData[i].leafNodeData - } - - // Create all leaves in batch mode so we can optimize cryptography operations. - kvm.newLeaves, kvm.prepareErr = verkle.BatchNewLeafNode(nodeValues) - close(kvm.processingReady) - }() -} - -func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error { - now := time.Now() - <-kvm.processingReady - if kvm.prepareErr != nil { - return fmt.Errorf("failed to prepare key values: %w", kvm.prepareErr) - } - log.Info("Prepared key values from base tree", "duration", time.Since(now)) - - // Insert into the tree. - if err := tree.InsertMigratedLeaves(kvm.newLeaves); err != nil { - return fmt.Errorf("failed to insert migrated leaves: %w", err) - } - - return nil -} - -func InsertBlockHashHistoryAtEip2935Fork(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash, chain consensus.ChainHeaderReader) { - ancestor := chain.GetHeader(prevHash, prevNumber) - for i := prevNumber; i > 0 && i >= prevNumber-256; i-- { - ProcessParentBlockHash(statedb, i, ancestor.Hash()) - ancestor = chain.GetHeader(ancestor.ParentHash, ancestor.Number.Uint64()-1) - } -} - func ProcessParentBlockHash(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash) { var key common.Hash binary.BigEndian.PutUint64(key[24:], prevNumber) diff --git a/light/trie.go b/light/trie.go index 6d0c654ff111..df300c8c6ed2 100644 --- a/light/trie.go +++ b/light/trie.go @@ -121,39 +121,43 @@ func (db *odrDatabase) Transitioned() bool { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetCurrentSlotHash(common.Hash, common.Hash) { +func (db *odrDatabase) InitTransitionStatus(bool, bool) { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) GetCurrentAccountAddress(common.Hash) *common.Address { +func (db *odrDatabase) SetCurrentSlotHash(common.Hash) { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetCurrentAccountAddress(common.Address, common.Hash) { +func (db *odrDatabase) GetCurrentAccountAddress() *common.Address { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) GetCurrentAccountHash(common.Hash) common.Hash { +func (db *odrDatabase) SetCurrentAccountAddress(common.Address) { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) GetCurrentSlotHash(common.Hash) common.Hash { +func (db *odrDatabase) GetCurrentAccountHash() common.Hash { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetStorageProcessed(bool, common.Hash) { +func (db *odrDatabase) GetCurrentSlotHash() common.Hash { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) GetStorageProcessed(common.Hash) bool { +func (db *odrDatabase) SetStorageProcessed(bool) { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) GetCurrentPreimageOffset(common.Hash) int64 { +func (db *odrDatabase) GetStorageProcessed() bool { panic("not implemented") // TODO: Implement } -func (db *odrDatabase) SetCurrentPreimageOffset(int64, common.Hash) { +func (db *odrDatabase) GetCurrentPreimageOffset() int64 { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) SetCurrentPreimageOffset(int64) { panic("not implemented") // TODO: Implement } @@ -165,6 +169,14 @@ func (db *odrDatabase) SetLastMerkleRoot(common.Hash) { panic("not implemented") // TODO: Implement } +func (db *odrDatabase) SaveTransitionState(common.Hash) { + panic("not implemented") // TODO: Implement +} + +func (db *odrDatabase) LoadTransitionState(common.Hash) { + panic("not implemented") // TODO: Implement +} + type odrTrie struct { db *odrDatabase id *TrieID diff --git a/miner/worker.go b/miner/worker.go index 1d1b2fda07a2..9ac258c77591 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -904,9 +904,6 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) { if err != nil { return nil, err } - if w.chain.Config().IsPrague(header.Number, header.Time) { - core.OverlayVerkleTransition(state, parent.Root) - } // Run the consensus preparation with the default or customized consensus engine. if err := w.engine.Prepare(w.chain, header); err != nil { log.Error("Failed to prepare header for sealing", "err", err)