diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index bf01c6f91857..4184290e86c6 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -175,6 +175,17 @@ func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) {
v := ctx.Uint64(utils.OverridePrague.Name)
cfg.Eth.OverridePrague = &v
}
+ if ctx.IsSet(utils.OverrideProofInBlock.Name) {
+ v := ctx.Bool(utils.OverrideProofInBlock.Name)
+ cfg.Eth.OverrideProofInBlock = &v
+ }
+ if ctx.IsSet(utils.OverrideOverlayStride.Name) {
+ v := ctx.Uint64(utils.OverrideOverlayStride.Name)
+ cfg.Eth.OverrideOverlayStride = &v
+ }
+ if ctx.IsSet(utils.ClearVerkleCosts.Name) {
+ params.ClearVerkleWitnessCosts()
+ }
backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Configure log filter RPC API.
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 38fb755b4b5a..5cb1580df3d1 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -67,8 +67,11 @@ var (
utils.NoUSBFlag,
utils.USBFlag,
utils.SmartCardDaemonPathFlag,
+ utils.OverrideOverlayStride,
utils.OverrideCancun,
utils.OverridePrague,
+ utils.OverrideProofInBlock,
+ utils.ClearVerkleCosts,
utils.EnablePersonal,
utils.TxPoolLocalsFlag,
utils.TxPoolNoLocalsFlag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index b927d0f94f83..18c7c396e4b5 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -263,6 +263,12 @@ var (
Value: 2048,
Category: flags.EthCategory,
}
+ OverrideOverlayStride = &cli.Uint64Flag{
+ Name: "override.overlay-stride",
+ Usage: "Manually specify the stride of the overlay transition, overriding the bundled setting",
+ Value: 10000,
+ Category: flags.EthCategory,
+ }
OverrideCancun = &cli.Uint64Flag{
Name: "override.cancun",
Usage: "Manually specify the Cancun fork timestamp, overriding the bundled setting",
@@ -273,6 +279,17 @@ var (
Usage: "Manually specify the Verkle fork timestamp, overriding the bundled setting",
Category: flags.EthCategory,
}
+ OverrideProofInBlock = &cli.BoolFlag{
+ Name: "override.blockproof",
+ Usage: "Manually specify the proof-in-block setting",
+ Value: true,
+ Category: flags.EthCategory,
+ }
+ ClearVerkleCosts = &cli.BoolFlag{
+ Name: "clear.verkle.costs",
+ Usage: "Clear verkle costs (for shadow forks)",
+ Category: flags.EthCategory,
+ }
// Light server and client settings
LightServeFlag = &cli.IntFlag{
Name: "light.serve",
diff --git a/consensus/beacon/consensus.go b/consensus/beacon/consensus.go
index ad8894cf4db0..e40c180aa421 100644
--- a/consensus/beacon/consensus.go
+++ b/consensus/beacon/consensus.go
@@ -25,8 +25,10 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc/eip1559"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
+ "github.com/ethereum/go-ethereum/core/overlay"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/trie"
@@ -358,6 +360,15 @@ func (beacon *Beacon) Finalize(chain consensus.ChainHeaderReader, header *types.
// The returned gas is not charged
state.Witness().TouchFullAccount(w.Address[:], true)
}
+
+ if chain.Config().IsPrague(header.Number, header.Time) {
+ // uncomment when debugging
+ // fmt.Println("at block", header.Number, "performing transition?", state.Database().InTransition())
+ parent := chain.GetHeaderByHash(header.ParentHash)
+ if err := overlay.OverlayVerkleTransition(state, parent.Root, chain.Config().OverlayStride); err != nil {
+ log.Error("error performing the transition", "err", err)
+ }
+ }
}
// FinalizeAndAssemble implements consensus.Engine, setting the final state and
@@ -382,51 +393,72 @@ func (beacon *Beacon) FinalizeAndAssemble(chain consensus.ChainHeaderReader, hea
// Assign the final state root to header.
header.Root = state.IntermediateRoot(true)
+ // Associate current conversion state to computed state
+ // root and store it in the database for later recovery.
+ state.Database().SaveTransitionState(header.Root)
var (
p *verkle.VerkleProof
k verkle.StateDiff
keys = state.Witness().Keys()
)
- if chain.Config().IsPrague(header.Number, header.Time) && chain.Config().ProofInBlocks {
+ if chain.Config().IsPrague(header.Number, header.Time) {
// Open the pre-tree to prove the pre-state against
parent := chain.GetHeaderByNumber(header.Number.Uint64() - 1)
if parent == nil {
return nil, fmt.Errorf("nil parent header for block %d", header.Number)
}
- preTrie, err := state.Database().OpenTrie(parent.Root)
- if err != nil {
- return nil, fmt.Errorf("error opening pre-state tree root: %w", err)
- }
+ // Load transition state at beginning of block, because
+ // OpenTrie needs to know what the conversion status is.
+ state.Database().LoadTransitionState(parent.Root)
- var okpre, okpost bool
- var vtrpre, vtrpost *trie.VerkleTrie
- switch pre := preTrie.(type) {
- case *trie.VerkleTrie:
- vtrpre, okpre = preTrie.(*trie.VerkleTrie)
- vtrpost, okpost = state.GetTrie().(*trie.VerkleTrie)
- case *trie.TransitionTrie:
- vtrpre = pre.Overlay()
- okpre = true
- post, _ := state.GetTrie().(*trie.TransitionTrie)
- vtrpost = post.Overlay()
- okpost = true
- default:
- // This should only happen for the first block of the
- // conversion, when the previous tree is a merkle tree.
- // Logically, the "previous" verkle tree is an empty tree.
- okpre = true
- vtrpre = trie.NewVerkleTrie(verkle.New(), state.Database().TrieDB(), utils.NewPointCache(), false)
- post := state.GetTrie().(*trie.TransitionTrie)
- vtrpost = post.Overlay()
- okpost = true
- }
- if okpre && okpost {
- if len(keys) > 0 {
- p, k, err = trie.ProveAndSerialize(vtrpre, vtrpost, keys, vtrpre.FlatdbNodeResolver)
- if err != nil {
- return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err)
+ if chain.Config().ProofInBlocks {
+ preTrie, err := state.Database().OpenTrie(parent.Root)
+ if err != nil {
+ return nil, fmt.Errorf("error opening pre-state tree root: %w", err)
+ }
+
+ var okpre, okpost bool
+ var vtrpre, vtrpost *trie.VerkleTrie
+ switch pre := preTrie.(type) {
+ case *trie.VerkleTrie:
+ vtrpre, okpre = preTrie.(*trie.VerkleTrie)
+ switch tr := state.GetTrie().(type) {
+ case *trie.VerkleTrie:
+ vtrpost = tr
+ okpost = true
+ // This is to handle a situation right at the start of the conversion:
+ // the post trie is a transition tree when the pre tree is an empty
+ // verkle tree.
+ case *trie.TransitionTrie:
+ vtrpost = tr.Overlay()
+ okpost = true
+ default:
+ okpost = false
+ }
+ case *trie.TransitionTrie:
+ vtrpre = pre.Overlay()
+ okpre = true
+ post, _ := state.GetTrie().(*trie.TransitionTrie)
+ vtrpost = post.Overlay()
+ okpost = true
+ default:
+ // This should only happen for the first block of the
+ // conversion, when the previous tree is a merkle tree.
+ // Logically, the "previous" verkle tree is an empty tree.
+ okpre = true
+ vtrpre = trie.NewVerkleTrie(verkle.New(), state.Database().TrieDB(), utils.NewPointCache(), false)
+ post := state.GetTrie().(*trie.TransitionTrie)
+ vtrpost = post.Overlay()
+ okpost = true
+ }
+ if okpre && okpost {
+ if len(keys) > 0 {
+ p, k, err = trie.ProveAndSerialize(vtrpre, vtrpost, keys, vtrpre.FlatdbNodeResolver)
+ if err != nil {
+ return nil, fmt.Errorf("error generating verkle proof for block %d: %w", header.Number, err)
+ }
}
}
}
diff --git a/core/block_validator.go b/core/block_validator.go
index b1ceab9d5c6c..337b61ac3396 100644
--- a/core/block_validator.go
+++ b/core/block_validator.go
@@ -131,6 +131,10 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x) dberr: %w", header.Root, root, statedb.Error())
}
+ // Verify that the advertised root is correct before
+ // it can be used as an identifier for the conversion
+ // status.
+ statedb.Database().SaveTransitionState(header.Root)
return nil
}
diff --git a/core/blockchain.go b/core/blockchain.go
index 797d31388476..79ac4df147b9 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -251,6 +251,14 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}
+ if overrides != nil {
+ if overrides.OverrideProofInBlock != nil {
+ chainConfig.ProofInBlocks = *overrides.OverrideProofInBlock
+ }
+ if overrides.OverrideOverlayStride != nil {
+ chainConfig.OverlayStride = *overrides.OverrideOverlayStride
+ }
+ }
log.Info("")
log.Info(strings.Repeat("-", 153))
for _, line := range strings.Split(chainConfig.Description(), "\n") {
@@ -312,7 +320,13 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis
head := bc.CurrentBlock()
// Declare the end of the verkle transition if need be
- if bc.chainConfig.Rules(head.Number, false /* XXX */, head.Time).IsPrague {
+ if bc.chainConfig.IsPrague(head.Number, head.Time) {
+ // TODO this only works when resuming a chain that has already gone
+ // through the conversion. All pointers should be saved to the DB
+ // for it to be able to recover if interrupted during the transition
+ // but that's left out to a later PR since there's not really a need
+ // right now.
+ bc.stateCache.InitTransitionStatus(true, true)
bc.stateCache.EndVerkleTransition()
}
@@ -1746,8 +1760,22 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
parent = bc.GetHeader(block.ParentHash(), block.NumberU64()-1)
}
+ if bc.Config().IsPrague(block.Number(), block.Time()) {
+ bc.stateCache.LoadTransitionState(parent.Root)
+
+ // pragueTime has been reached. If the transition isn't active, it means this
+ // is the fork block and that the conversion needs to be marked at started.
+ if !bc.stateCache.InTransition() && !bc.stateCache.Transitioned() {
+ bc.stateCache.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), bc.Config().PragueTime, parent.Root)
+ }
+ } else {
+ // If the verkle activation time hasn't started, declare it as "not started".
+ // This is so that if the miner activates the conversion, the insertion happens
+ // in the correct mode.
+ bc.stateCache.InitTransitionStatus(false, false)
+ }
if parent.Number.Uint64() == conversionBlock {
- bc.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), &parent.Time)
+ bc.StartVerkleTransition(parent.Root, emptyVerkleRoot, bc.Config(), &parent.Time, parent.Root)
bc.stateCache.SetLastMerkleRoot(parent.Root)
}
statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
@@ -1989,7 +2017,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
}
if parent == nil {
- return it.index, errors.New("missing parent")
+ return it.index, fmt.Errorf("missing parent: hash=%x, number=%d", current.Hash(), current.Number)
}
// Import all the pruned blocks to make the state available
var (
@@ -2050,7 +2078,7 @@ func (bc *BlockChain) recoverAncestors(block *types.Block) (common.Hash, error)
}
}
if parent == nil {
- return common.Hash{}, errors.New("missing parent")
+ return common.Hash{}, fmt.Errorf("missing parent during ancestor recovery: hash=%x, number=%d", block.ParentHash(), block.Number())
}
// Import all the pruned blocks to make the state available
for i := len(hashes) - 1; i >= 0; i-- {
@@ -2288,6 +2316,7 @@ func (bc *BlockChain) SetCanonical(head *types.Block) (common.Hash, error) {
defer bc.chainmu.Unlock()
// Re-execute the reorged chain in case the head state is missing.
+ log.Trace("looking for state", "root", head.Root(), "has state", bc.HasState(head.Root()))
if !bc.HasState(head.Root()) {
if latestValidHash, err := bc.recoverAncestors(head); err != nil {
return latestValidHash, err
@@ -2533,8 +2562,8 @@ func (bc *BlockChain) GetTrieFlushInterval() time.Duration {
return time.Duration(bc.flushInterval.Load())
}
-func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64) {
- bc.stateCache.StartVerkleTransition(originalRoot, translatedRoot, chainConfig, pragueTime)
+func (bc *BlockChain) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64, root common.Hash) {
+ bc.stateCache.StartVerkleTransition(originalRoot, translatedRoot, chainConfig, pragueTime, root)
}
func (bc *BlockChain) ReorgThroughVerkleTransition() {
bc.stateCache.ReorgThroughVerkleTransition()
diff --git a/core/chain_makers.go b/core/chain_makers.go
index 5b9dc0c6ff08..1c232e6b6d92 100644
--- a/core/chain_makers.go
+++ b/core/chain_makers.go
@@ -365,7 +365,7 @@ func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int,
return db, blocks, receipts
}
-func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) {
+func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, diskdb ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts, []*verkle.VerkleProof, []verkle.StateDiff) {
if config == nil {
config = params.TestChainConfig
}
@@ -434,13 +434,16 @@ func GenerateVerkleChain(config *params.ChainConfig, parent *types.Block, engine
return nil, nil
}
var snaps *snapshot.Tree
- triedb := state.NewDatabaseWithConfig(db, nil)
- triedb.EndVerkleTransition()
+ db := state.NewDatabaseWithConfig(diskdb, nil)
+ db.StartVerkleTransition(common.Hash{}, common.Hash{}, config, config.PragueTime, common.Hash{})
+ db.EndVerkleTransition()
+ db.SaveTransitionState(parent.Root())
for i := 0; i < n; i++ {
- statedb, err := state.New(parent.Root(), triedb, snaps)
+ statedb, err := state.New(parent.Root(), db, snaps)
if err != nil {
panic(fmt.Sprintf("could not find state for block %d: err=%v, parent root=%x", i, err, parent.Root()))
}
+ statedb.NewAccessWitness()
block, receipt := genblock(i, parent, statedb)
blocks[i] = block
receipts[i] = receipt
diff --git a/core/genesis.go b/core/genesis.go
index c8a4bc5952d9..a2a331d1fe33 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -126,6 +126,7 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c
// all the derived states will be discarded to not pollute disk.
db := state.NewDatabase(rawdb.NewMemoryDatabase())
if cfg.IsPrague(big.NewInt(int64(0)), timestamp) {
+ db.StartVerkleTransition(common.Hash{}, common.Hash{}, cfg, ×tamp, common.Hash{})
db.EndVerkleTransition()
}
statedb, err := state.New(types.EmptyRootHash, db, nil)
@@ -146,15 +147,17 @@ func (ga *GenesisAlloc) deriveHash(cfg *params.ChainConfig, timestamp uint64) (c
// flush is very similar with deriveHash, but the main difference is
// all the generated states will be persisted into the given database.
// Also, the genesis state specification will be flushed as well.
-func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash, cfg *params.ChainConfig) error {
- statedb, err := state.New(types.EmptyRootHash, state.NewDatabaseWithNodeDB(db, triedb), nil)
- if err != nil {
- return err
+func (ga *GenesisAlloc) flush(db ethdb.Database, triedb *trie.Database, blockhash common.Hash, cfg *params.ChainConfig, timestamp *uint64) error {
+ database := state.NewDatabaseWithNodeDB(db, triedb)
+ // End the verkle conversion at genesis if the fork block is 0
+ if timestamp != nil && cfg.IsPrague(big.NewInt(int64(0)), *timestamp) {
+ database.StartVerkleTransition(common.Hash{}, common.Hash{}, cfg, timestamp, common.Hash{})
+ database.EndVerkleTransition()
}
- // End the verkle conversion at genesis if the fork block is 0
- if triedb.IsVerkle() {
- statedb.Database().EndVerkleTransition()
+ statedb, err := state.New(types.EmptyRootHash, database, nil)
+ if err != nil {
+ return err
}
for addr, account := range *ga {
@@ -221,7 +224,7 @@ func CommitGenesisState(db ethdb.Database, triedb *trie.Database, blockhash comm
return errors.New("not found")
}
}
- return alloc.flush(db, triedb, blockhash, config)
+ return alloc.flush(db, triedb, blockhash, config, nil)
}
// GenesisAccount is an account in the state of the genesis block.
@@ -288,8 +291,10 @@ func (e *GenesisMismatchError) Error() string {
// ChainOverrides contains the changes to chain config.
type ChainOverrides struct {
- OverrideCancun *uint64
- OverridePrague *uint64
+ OverrideCancun *uint64
+ OverridePrague *uint64
+ OverrideProofInBlock *bool
+ OverrideOverlayStride *uint64
}
// SetupGenesisBlock writes or updates the genesis block in db.
@@ -536,7 +541,7 @@ func (g *Genesis) Commit(db ethdb.Database, triedb *trie.Database) (*types.Block
// All the checks has passed, flush the states derived from the genesis
// specification as well as the specification itself into the provided
// database.
- if err := g.Alloc.flush(db, triedb, block.Hash(), g.Config); err != nil {
+ if err := g.Alloc.flush(db, triedb, block.Hash(), g.Config, &g.Timestamp); err != nil {
return nil, err
}
rawdb.WriteTd(db, block.Hash(), block.NumberU64(), block.Difficulty())
diff --git a/core/overlay_transition.go b/core/overlay/conversion.go
similarity index 54%
rename from core/overlay_transition.go
rename to core/overlay/conversion.go
index 35c09d22d938..0e83e2066353 100644
--- a/core/overlay_transition.go
+++ b/core/overlay/conversion.go
@@ -14,14 +14,17 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package core
+package overlay
import (
"bufio"
"bytes"
+ "encoding/binary"
"fmt"
"io"
"os"
+ "runtime"
+ "sync"
"time"
"github.com/ethereum/go-ethereum/common"
@@ -32,15 +35,199 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
+ "github.com/ethereum/go-ethereum/trie/utils"
+ "github.com/ethereum/go-verkle"
+ "github.com/holiman/uint256"
)
+var zeroTreeIndex uint256.Int
+
+// keyValueMigrator is a helper module that collects key-values from the overlay-tree migration for Verkle Trees.
+// It assumes that the walk of the base tree is done in address-order, so it exploit that fact to
+// collect the key-values in a way that is efficient.
+type keyValueMigrator struct {
+ // leafData contains the values for the future leaf for a particular VKT branch.
+ leafData map[branchKey]*migratedKeyValue
+
+ // When prepare() is called, it will start a background routine that will process the leafData
+ // saving the result in newLeaves to be used by migrateCollectedKeyValues(). The background
+ // routine signals that it is done by closing processingReady.
+ processingReady chan struct{}
+ newLeaves []verkle.LeafNode
+ prepareErr error
+}
+
+func newKeyValueMigrator() *keyValueMigrator {
+ // We do initialize the VKT config since prepare() might indirectly make multiple GetConfig() calls
+ // in different goroutines when we never called GetConfig() before, causing a race considering the way
+ // that `config` is designed in go-verkle.
+ // TODO: jsign as a fix for this in the PR where we move to a file-less precomp, since it allows safe
+ // concurrent calls to GetConfig(). When that gets merged, we can remove this line.
+ _ = verkle.GetConfig()
+ return &keyValueMigrator{
+ processingReady: make(chan struct{}),
+ leafData: make(map[branchKey]*migratedKeyValue, 10_000),
+ }
+}
+
+type migratedKeyValue struct {
+ branchKey branchKey
+ leafNodeData verkle.BatchNewLeafNodeData
+}
+type branchKey struct {
+ addr common.Address
+ treeIndex uint256.Int
+}
+
+func newBranchKey(addr []byte, treeIndex *uint256.Int) branchKey {
+ var sk branchKey
+ copy(sk.addr[:], addr)
+ sk.treeIndex = *treeIndex
+ return sk
+}
+
+func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) {
+ treeIndex, subIndex := utils.GetTreeKeyStorageSlotTreeIndexes(slotNumber)
+ leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex))
+ leafNodeData.Values[subIndex] = slotValue
+}
+
+func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) {
+ leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex))
+
+ var version [verkle.LeafValueSize]byte
+ leafNodeData.Values[utils.VersionLeafKey] = version[:]
+
+ var balance [verkle.LeafValueSize]byte
+ for i, b := range acc.Balance.Bytes() {
+ balance[len(acc.Balance.Bytes())-1-i] = b
+ }
+ leafNodeData.Values[utils.BalanceLeafKey] = balance[:]
+
+ var nonce [verkle.LeafValueSize]byte
+ binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce)
+ leafNodeData.Values[utils.NonceLeafKey] = nonce[:]
+
+ leafNodeData.Values[utils.CodeHashLeafKey] = acc.CodeHash[:]
+}
+
+func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) {
+ leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex))
+
+ // Save the code size.
+ var codeSizeBytes [verkle.LeafValueSize]byte
+ binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize)
+ leafNodeData.Values[utils.CodeSizeLeafKey] = codeSizeBytes[:]
+
+ // The first 128 chunks are stored in the account header leaf.
+ for i := 0; i < 128 && i < len(chunks)/32; i++ {
+ leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)]
+ }
+
+ // Potential further chunks, have their own leaf nodes.
+ for i := 128; i < len(chunks)/32; {
+ treeIndex, _ := utils.GetTreeKeyCodeChunkIndices(uint256.NewInt(uint64(i)))
+ leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex))
+
+ j := i
+ for ; (j-i) < 256 && j < len(chunks)/32; j++ {
+ leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)]
+ }
+ i = j
+ }
+}
+
+func (kvm *keyValueMigrator) getOrInitLeafNodeData(bk branchKey) *verkle.BatchNewLeafNodeData {
+ if ld, ok := kvm.leafData[bk]; ok {
+ return &ld.leafNodeData
+ }
+ kvm.leafData[bk] = &migratedKeyValue{
+ branchKey: bk,
+ leafNodeData: verkle.BatchNewLeafNodeData{
+ Stem: nil, // It will be calculated in the prepare() phase, since it's CPU heavy.
+ Values: make(map[byte][]byte, 256),
+ },
+ }
+ return &kvm.leafData[bk].leafNodeData
+}
+
+func (kvm *keyValueMigrator) prepare() {
+ // We fire a background routine to process the leafData and save the result in newLeaves.
+ // The background routine signals that it is done by closing processingReady.
+ go func() {
+ // Step 1: We split kvm.leafData in numBatches batches, and we process each batch in a separate goroutine.
+ // This fills each leafNodeData.Stem with the correct value.
+ leafData := make([]migratedKeyValue, 0, len(kvm.leafData))
+ for _, v := range kvm.leafData {
+ leafData = append(leafData, *v)
+ }
+ var wg sync.WaitGroup
+ batchNum := runtime.NumCPU()
+ batchSize := (len(kvm.leafData) + batchNum - 1) / batchNum
+ for i := 0; i < len(kvm.leafData); i += batchSize {
+ start := i
+ end := i + batchSize
+ if end > len(kvm.leafData) {
+ end = len(kvm.leafData)
+ }
+ wg.Add(1)
+
+ batch := leafData[start:end]
+ go func() {
+ defer wg.Done()
+ var currAddr common.Address
+ var currPoint *verkle.Point
+ for i := range batch {
+ if batch[i].branchKey.addr != currAddr || currAddr == (common.Address{}) {
+ currAddr = batch[i].branchKey.addr
+ currPoint = utils.EvaluateAddressPoint(currAddr[:])
+ }
+ stem := utils.GetTreeKeyWithEvaluatedAddess(currPoint, &batch[i].branchKey.treeIndex, 0)
+ stem = stem[:verkle.StemSize]
+ batch[i].leafNodeData.Stem = stem
+ }
+ }()
+ }
+ wg.Wait()
+
+ // Step 2: Now that we have all stems (i.e: tree keys) calculated, we can create the new leaves.
+ nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData))
+ for i := range leafData {
+ nodeValues[i] = leafData[i].leafNodeData
+ }
+
+ // Create all leaves in batch mode so we can optimize cryptography operations.
+ kvm.newLeaves, kvm.prepareErr = verkle.BatchNewLeafNode(nodeValues)
+ close(kvm.processingReady)
+ }()
+}
+
+func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error {
+ now := time.Now()
+ <-kvm.processingReady
+ if kvm.prepareErr != nil {
+ return fmt.Errorf("failed to prepare key values: %w", kvm.prepareErr)
+ }
+ log.Info("Prepared key values from base tree", "duration", time.Since(now))
+
+ // Insert into the tree.
+ if err := tree.InsertMigratedLeaves(kvm.newLeaves); err != nil {
+ return fmt.Errorf("failed to insert migrated leaves: %w", err)
+ }
+
+ return nil
+}
+
// OverlayVerkleTransition contains the overlay conversion logic
-func OverlayVerkleTransition(statedb *state.StateDB) error {
+func OverlayVerkleTransition(statedb *state.StateDB, root common.Hash, maxMovedCount uint64) error {
migrdb := statedb.Database()
+ migrdb.LockCurrentTransitionState()
+ defer migrdb.UnLockCurrentTransitionState()
// verkle transition: if the conversion process is in progress, move
// N values from the MPT into the verkle tree.
if migrdb.InTransition() {
+ log.Debug("Processing verkle conversion starting", "account hash", migrdb.GetCurrentAccountHash(), "slot hash", migrdb.GetCurrentSlotHash(), "state root", root)
var (
now = time.Now()
tt = statedb.GetTrie().(*trie.TransitionTrie)
@@ -92,14 +279,13 @@ func OverlayVerkleTransition(statedb *state.StateDB) error {
preimageSeek += int64(len(addr))
}
- const maxMovedCount = 10000
// mkv will be assiting in the collection of up to maxMovedCount key values to be migrated to the VKT.
// It has internal caches to do efficient MPT->VKT key calculations, which will be discarded after
// this function.
mkv := newKeyValueMigrator()
// move maxCount accounts into the verkle tree, starting with the
// slots from the previous account.
- count := 0
+ count := uint64(0)
// if less than maxCount slots were moved, move to the next account
for count < maxMovedCount {
@@ -124,7 +310,12 @@ func OverlayVerkleTransition(statedb *state.StateDB) error {
if err != nil {
return err
}
- stIt.Next()
+ processed := stIt.Next()
+ if processed {
+ log.Debug("account has storage and a next item")
+ } else {
+ log.Debug("account has storage and NO next item")
+ }
// fdb.StorageProcessed will be initialized to `true` if the
// entire storage for an account was not entirely processed
@@ -133,6 +324,7 @@ func OverlayVerkleTransition(statedb *state.StateDB) error {
// If the entire storage was processed, then the iterator was
// created in vain, but it's ok as this will not happen often.
for ; !migrdb.GetStorageProcessed() && count < maxMovedCount; count++ {
+ log.Trace("Processing storage", "count", count, "slot", stIt.Slot(), "storage processed", migrdb.GetStorageProcessed(), "current account", migrdb.GetCurrentAccountAddress(), "current account hash", migrdb.GetCurrentAccountHash())
var (
value []byte // slot value after RLP decoding
safeValue [32]byte // 32-byte aligned value
@@ -155,6 +347,7 @@ func OverlayVerkleTransition(statedb *state.StateDB) error {
return fmt.Errorf("slotnr len is zero is not 32: %d", len(slotnr))
}
}
+ log.Trace("found slot number", "number", slotnr)
if crypto.Keccak256Hash(slotnr[:]) != stIt.Hash() {
return fmt.Errorf("preimage file does not match storage hash: %s!=%s", crypto.Keccak256Hash(slotnr), stIt.Hash())
}
@@ -196,6 +389,7 @@ func OverlayVerkleTransition(statedb *state.StateDB) error {
// Move to the next account, if available - or end
// the transition otherwise.
if accIt.Next() {
+ log.Trace("Found another account to convert", "hash", accIt.Hash())
var addr common.Address
if hasPreimagesBin {
if _, err := io.ReadFull(fpreimages, addr[:]); err != nil {
@@ -207,10 +401,10 @@ func OverlayVerkleTransition(statedb *state.StateDB) error {
return fmt.Errorf("account address len is zero is not 20: %d", len(addr))
}
}
- // fmt.Printf("account switch: %s != %s\n", crypto.Keccak256Hash(addr[:]), accIt.Hash())
if crypto.Keccak256Hash(addr[:]) != accIt.Hash() {
return fmt.Errorf("preimage file does not match account hash: %s != %s", crypto.Keccak256Hash(addr[:]), accIt.Hash())
}
+ log.Trace("Converting account address", "hash", accIt.Hash(), "addr", addr)
preimageSeek += int64(len(addr))
migrdb.SetCurrentAccountAddress(addr)
} else {
@@ -223,7 +417,7 @@ func OverlayVerkleTransition(statedb *state.StateDB) error {
}
migrdb.SetCurrentPreimageOffset(preimageSeek)
- log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account", statedb.Database().GetCurrentAccountHash())
+ log.Info("Collected key values from base tree", "count", count, "duration", time.Since(now), "last account hash", statedb.Database().GetCurrentAccountHash(), "last account address", statedb.Database().GetCurrentAccountAddress(), "storage processed", statedb.Database().GetStorageProcessed(), "last storage", statedb.Database().GetCurrentSlotHash())
// Take all the collected key-values and prepare the new leaf values.
// This fires a background routine that will start doing the work that
diff --git a/core/rawdb/accessors_overlay.go b/core/rawdb/accessors_overlay.go
new file mode 100644
index 000000000000..5a371b9d307f
--- /dev/null
+++ b/core/rawdb/accessors_overlay.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rawdb
+
+import (
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb"
+)
+
+func ReadVerkleTransitionState(db ethdb.KeyValueReader, hash common.Hash) ([]byte, error) {
+ return db.Get(transitionStateKey(hash))
+}
+
+func WriteVerkleTransitionState(db ethdb.KeyValueWriter, hash common.Hash, state []byte) error {
+ return db.Put(transitionStateKey(hash), state)
+}
diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go
index 940ce01549cd..029c09aec370 100644
--- a/core/rawdb/schema.go
+++ b/core/rawdb/schema.go
@@ -129,6 +129,8 @@ var (
CliqueSnapshotPrefix = []byte("clique-")
+ VerkleTransitionStatePrefix = []byte("verkle-transition-state-")
+
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
)
@@ -262,6 +264,11 @@ func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...)
}
+// transitionStateKey = transitionStatusKey + hash
+func transitionStateKey(hash common.Hash) []byte {
+ return append(VerkleTransitionStatePrefix, hash.Bytes()...)
+}
+
// IsLegacyTrieNode reports whether a provided database entry is a legacy trie
// node. The characteristics of legacy trie node are:
// - the key length is 32 bytes
diff --git a/core/state/database.go b/core/state/database.go
index 5707e2c88b60..826c03cd9f05 100644
--- a/core/state/database.go
+++ b/core/state/database.go
@@ -17,8 +17,11 @@
package state
import (
+ "bytes"
+ "encoding/gob"
"errors"
"fmt"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/lru"
@@ -26,6 +29,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
"github.com/ethereum/go-ethereum/trie/trienode"
@@ -64,7 +68,7 @@ type Database interface {
// TrieDB retrieves the low level trie database used for data storage.
TrieDB() *trie.Database
- StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64)
+ StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, cancunTime *uint64, root common.Hash)
ReorgThroughVerkleTransition()
@@ -74,7 +78,9 @@ type Database interface {
Transitioned() bool
- SetCurrentSlotHash(hash common.Hash)
+ InitTransitionStatus(bool, bool)
+
+ SetCurrentSlotHash(common.Hash)
GetCurrentAccountAddress() *common.Address
@@ -94,7 +100,15 @@ type Database interface {
AddRootTranslation(originalRoot, translatedRoot common.Hash)
- SetLastMerkleRoot(root common.Hash)
+ SetLastMerkleRoot(common.Hash)
+
+ SaveTransitionState(common.Hash)
+
+ LoadTransitionState(common.Hash)
+
+ LockCurrentTransitionState()
+
+ UnLockCurrentTransitionState()
}
// Trie is a Ethereum Merkle Patricia trie.
@@ -182,36 +196,37 @@ func NewDatabase(db ethdb.Database) Database {
// large memory cache.
func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database {
return &cachingDB{
- disk: db,
- codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
- codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
- triedb: trie.NewDatabaseWithConfig(db, config),
- addrToPoint: utils.NewPointCache(),
+ disk: db,
+ codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
+ codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
+ triedb: trie.NewDatabaseWithConfig(db, config),
+ addrToPoint: utils.NewPointCache(),
+ TransitionStatePerRoot: lru.NewBasicLRU[common.Hash, *TransitionState](100),
}
}
// NewDatabaseWithNodeDB creates a state database with an already initialized node database.
func NewDatabaseWithNodeDB(db ethdb.Database, triedb *trie.Database) Database {
return &cachingDB{
- disk: db,
- codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
- codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
- triedb: triedb,
- addrToPoint: utils.NewPointCache(),
- ended: triedb.IsVerkle(),
+ disk: db,
+ codeSizeCache: lru.NewCache[common.Hash, int](codeSizeCacheSize),
+ codeCache: lru.NewSizeConstrainedCache[common.Hash, []byte](codeCacheSize),
+ triedb: triedb,
+ addrToPoint: utils.NewPointCache(),
+ TransitionStatePerRoot: lru.NewBasicLRU[common.Hash, *TransitionState](100),
}
}
func (db *cachingDB) InTransition() bool {
- return db.started && !db.ended
+ return db.CurrentTransitionState != nil && db.CurrentTransitionState.Started && !db.CurrentTransitionState.Ended
}
func (db *cachingDB) Transitioned() bool {
- return db.ended
+ return db.CurrentTransitionState != nil && db.CurrentTransitionState.Ended
}
// Fork implements the fork
-func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64) {
+func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.Hash, chainConfig *params.ChainConfig, pragueTime *uint64, root common.Hash) {
fmt.Println(`
__________.__ .__ .__ __ .__ .__ ____
\__ ___| |__ ____ ____ | | ____ ______ | |__ _____ _____/ |_ | |__ _____ ______ __ _ _|__| ____ / ___\ ______
@@ -219,24 +234,35 @@ func (db *cachingDB) StartVerkleTransition(originalRoot, translatedRoot common.H
| | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ \ /| | | \\___ /\___ \
|____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ \/\_/ |__|___| /_____//_____/
|__|`)
- db.started = true
- db.ended = false
+ db.CurrentTransitionState = &TransitionState{
+ Started: true,
+ // initialize so that the first storage-less accounts are processed
+ StorageProcessed: true,
+ }
// db.AddTranslation(originalRoot, translatedRoot)
db.baseRoot = originalRoot
- // initialize so that the first storage-less accounts are processed
- db.StorageProcessed = true
+
+ // Reinitialize values in case of a reorg
if pragueTime != nil {
chainConfig.PragueTime = pragueTime
}
}
func (db *cachingDB) ReorgThroughVerkleTransition() {
- db.ended, db.started = false, false
+ log.Warn("trying to reorg through the transition, which makes no sense at this point")
+}
+
+func (db *cachingDB) InitTransitionStatus(started, ended bool) {
+ db.CurrentTransitionState = &TransitionState{
+ Ended: ended,
+ Started: started,
+ // TODO add other fields when we handle mid-transition interrupts
+ }
}
func (db *cachingDB) EndVerkleTransition() {
- if !db.started {
- db.started = true
+ if !db.CurrentTransitionState.Started {
+ db.CurrentTransitionState.Started = true
}
fmt.Println(`
@@ -246,7 +272,36 @@ func (db *cachingDB) EndVerkleTransition() {
| | | Y \ ___/ \ ___/| |_\ ___/| |_> | Y \/ __ \| | | | | Y \/ __ \_\___ \ | |__/ __ \| | / /_/ \ ___// /_/ |
|____| |___| /\___ \___ |____/\___ | __/|___| (____ |___| |__| |___| (____ /_____/ |____(____ |___| \____ |\___ \____ |
|__|`)
- db.ended = true
+ db.CurrentTransitionState.Ended = true
+}
+
+type TransitionState struct {
+ CurrentAccountAddress *common.Address // addresss of the last translated account
+ CurrentSlotHash common.Hash // hash of the last translated storage slot
+ CurrentPreimageOffset int64 // next byte to read from the preimage file
+ Started, Ended bool
+
+ // Mark whether the storage for an account has been processed. This is useful if the
+ // maximum number of leaves of the conversion is reached before the whole storage is
+ // processed.
+ StorageProcessed bool
+}
+
+func (ts *TransitionState) Copy() *TransitionState {
+ ret := &TransitionState{
+ Started: ts.Started,
+ Ended: ts.Ended,
+ CurrentSlotHash: ts.CurrentSlotHash,
+ CurrentPreimageOffset: ts.CurrentPreimageOffset,
+ StorageProcessed: ts.StorageProcessed,
+ }
+
+ if ts.CurrentAccountAddress != nil {
+ ret.CurrentAccountAddress = &common.Address{}
+ copy(ret.CurrentAccountAddress[:], ts.CurrentAccountAddress[:])
+ }
+
+ return ret
}
type cachingDB struct {
@@ -255,22 +310,16 @@ type cachingDB struct {
codeCache *lru.SizeConstrainedCache[common.Hash, []byte]
triedb *trie.Database
- // Verkle specific fields
+ // Transition-specific fields
// TODO ensure that this info is in the DB
- started, ended bool
- LastMerkleRoot common.Hash // root hash of the read-only base tree
+ LastMerkleRoot common.Hash // root hash of the read-only base tree
+ CurrentTransitionState *TransitionState
+ TransitionStatePerRoot lru.BasicLRU[common.Hash, *TransitionState]
+ transitionStateLock sync.Mutex
addrToPoint *utils.PointCache
- baseRoot common.Hash // hash of the read-only base tree
- CurrentAccountAddress *common.Address // addresss of the last translated account
- CurrentSlotHash common.Hash // hash of the last translated storage slot
- CurrentPreimageOffset int64 // next byte to read from the preimage file
-
- // Mark whether the storage for an account has been processed. This is useful if the
- // maximum number of leaves of the conversion is reached before the whole storage is
- // processed.
- StorageProcessed bool
+ baseRoot common.Hash // hash of the read-only base tree
}
func (db *cachingDB) openMPTTrie(root common.Hash) (Trie, error) {
@@ -284,14 +333,14 @@ func (db *cachingDB) openMPTTrie(root common.Hash) (Trie, error) {
func (db *cachingDB) openVKTrie(root common.Hash) (Trie, error) {
payload, err := db.DiskDB().Get(trie.FlatDBVerkleNodeKeyPrefix)
if err != nil {
- return trie.NewVerkleTrie(verkle.New(), db.triedb, db.addrToPoint, db.ended), nil
+ return trie.NewVerkleTrie(verkle.New(), db.triedb, db.addrToPoint, db.CurrentTransitionState.Ended), nil
}
r, err := verkle.ParseNode(payload, 0)
if err != nil {
panic(err)
}
- return trie.NewVerkleTrie(r, db.triedb, db.addrToPoint, db.ended), err
+ return trie.NewVerkleTrie(r, db.triedb, db.addrToPoint, db.CurrentTransitionState.Ended), err
}
// OpenTrie opens the main account trie at a specific root hash.
@@ -300,19 +349,22 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
mpt Trie
err error
)
+ fmt.Printf("opening trie with root %x, %v %v\n", root, db.InTransition(), db.Transitioned())
// TODO separate both cases when I can be certain that it won't
// find a Verkle trie where is expects a Transitoion trie.
- if db.started || db.ended {
+ if db.InTransition() || db.Transitioned() {
// NOTE this is a kaustinen-only change, it will break replay
vkt, err := db.openVKTrie(root)
if err != nil {
+ log.Error("failed to open the vkt", "err", err)
return nil, err
}
// If the verkle conversion has ended, return a single
// verkle trie.
- if db.ended {
+ if db.CurrentTransitionState.Ended {
+ log.Debug("transition ended, returning a simple verkle tree")
return vkt, nil
}
@@ -320,6 +372,7 @@ func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) {
// trie and an overlay, verkle trie.
mpt, err = db.openMPTTrie(db.baseRoot)
if err != nil {
+ log.Error("failed to open the mpt", "err", err, "root", db.baseRoot)
return nil, err
}
@@ -345,7 +398,7 @@ func (db *cachingDB) openStorageMPTrie(stateRoot common.Hash, address common.Add
// OpenStorageTrie opens the storage trie of an account
func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Address, root common.Hash, self Trie) (Trie, error) {
// TODO this should only return a verkle tree
- if db.ended {
+ if db.Transitioned() {
mpt, err := db.openStorageMPTrie(types.EmptyRootHash, address, common.Hash{}, self)
if err != nil {
return nil, err
@@ -361,7 +414,8 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre
panic("unexpected trie type")
}
}
- if db.started {
+ if db.InTransition() {
+ fmt.Printf("OpenStorageTrie during transition, state root=%x root=%x\n", stateRoot, root)
mpt, err := db.openStorageMPTrie(db.LastMerkleRoot, address, root, nil)
if err != nil {
return nil, err
@@ -374,7 +428,7 @@ func (db *cachingDB) OpenStorageTrie(stateRoot common.Hash, address common.Addre
case *trie.TransitionTrie:
return trie.NewTransitionTree(mpt.(*trie.SecureTrie), self.Overlay(), true), nil
default:
- panic("unexpected trie type")
+ return nil, errors.New("expected a verkle account tree, and found another type")
}
}
mpt, err := db.openStorageMPTrie(stateRoot, address, root, nil)
@@ -451,48 +505,128 @@ func (db *cachingDB) GetTreeKeyHeader(addr []byte) *verkle.Point {
}
func (db *cachingDB) SetCurrentAccountAddress(addr common.Address) {
- db.CurrentAccountAddress = &addr
+ db.CurrentTransitionState.CurrentAccountAddress = &addr
}
func (db *cachingDB) GetCurrentAccountHash() common.Hash {
var addrHash common.Hash
- if db.CurrentAccountAddress != nil {
- addrHash = crypto.Keccak256Hash(db.CurrentAccountAddress[:])
+ if db.CurrentTransitionState.CurrentAccountAddress != nil {
+ addrHash = crypto.Keccak256Hash(db.CurrentTransitionState.CurrentAccountAddress[:])
}
return addrHash
}
func (db *cachingDB) GetCurrentAccountAddress() *common.Address {
- return db.CurrentAccountAddress
+ return db.CurrentTransitionState.CurrentAccountAddress
}
func (db *cachingDB) GetCurrentPreimageOffset() int64 {
- return db.CurrentPreimageOffset
+ return db.CurrentTransitionState.CurrentPreimageOffset
}
func (db *cachingDB) SetCurrentPreimageOffset(offset int64) {
- db.CurrentPreimageOffset = offset
+ db.CurrentTransitionState.CurrentPreimageOffset = offset
}
func (db *cachingDB) SetCurrentSlotHash(hash common.Hash) {
- db.CurrentSlotHash = hash
+ db.CurrentTransitionState.CurrentSlotHash = hash
}
func (db *cachingDB) GetCurrentSlotHash() common.Hash {
- return db.CurrentSlotHash
+ return db.CurrentTransitionState.CurrentSlotHash
}
func (db *cachingDB) SetStorageProcessed(processed bool) {
- db.StorageProcessed = processed
+ db.CurrentTransitionState.StorageProcessed = processed
}
func (db *cachingDB) GetStorageProcessed() bool {
- return db.StorageProcessed
+ return db.CurrentTransitionState.StorageProcessed
}
func (db *cachingDB) AddRootTranslation(originalRoot, translatedRoot common.Hash) {
}
-func (db *cachingDB) SetLastMerkleRoot(root common.Hash) {
- db.LastMerkleRoot = root
+func (db *cachingDB) SetLastMerkleRoot(merkleRoot common.Hash) {
+ db.LastMerkleRoot = merkleRoot
+}
+
+func (db *cachingDB) SaveTransitionState(root common.Hash) {
+ db.transitionStateLock.Lock()
+ defer db.transitionStateLock.Unlock()
+ if db.CurrentTransitionState != nil {
+ var buf bytes.Buffer
+ enc := gob.NewEncoder(&buf)
+ err := enc.Encode(db.CurrentTransitionState)
+ if err != nil {
+ log.Error("failed to encode transition state", "err", err)
+ return
+ }
+
+ if !db.TransitionStatePerRoot.Contains(root) {
+ // Copy so that the address pointer isn't updated after
+ // it has been saved.
+ db.TransitionStatePerRoot.Add(root, db.CurrentTransitionState.Copy())
+
+ rawdb.WriteVerkleTransitionState(db.DiskDB(), root, buf.Bytes())
+ }
+
+ log.Debug("saving transition state", "storage processed", db.CurrentTransitionState.StorageProcessed, "addr", db.CurrentTransitionState.CurrentAccountAddress, "slot hash", db.CurrentTransitionState.CurrentSlotHash, "root", root, "ended", db.CurrentTransitionState.Ended, "started", db.CurrentTransitionState.Started)
+ }
+}
+
+func (db *cachingDB) LoadTransitionState(root common.Hash) {
+ db.transitionStateLock.Lock()
+ defer db.transitionStateLock.Unlock()
+ // Try to get the transition state from the cache and
+ // the DB if it's not there.
+ ts, ok := db.TransitionStatePerRoot.Get(root)
+ if !ok {
+ // Not in the cache, try getting it from the DB
+ data, err := rawdb.ReadVerkleTransitionState(db.DiskDB(), root)
+ if err != nil {
+ log.Error("failed to read transition state", "err", err)
+ return
+ }
+
+ // if a state could be read from the db, attempt to decode it
+ if len(data) > 0 {
+ var (
+ newts TransitionState
+ buf = bytes.NewBuffer(data[:])
+ dec = gob.NewDecoder(buf)
+ )
+ // Decode transition state
+ err = dec.Decode(&newts)
+ if err != nil {
+ log.Error("failed to decode transition state", "err", err)
+ return
+ }
+ ts = &newts
+ }
+
+ // Fallback that should only happen before the transition
+ if ts == nil {
+ // Initialize the first transition state, with the "ended"
+ // field set to true if the database was created
+ // as a verkle database.
+ log.Debug("no transition state found, starting fresh", "is verkle", db.triedb.IsVerkle())
+ // Start with a fresh state
+ ts = &TransitionState{Ended: db.triedb.IsVerkle()}
+ }
+ }
+
+ // Copy so that the CurrentAddress pointer in the map
+ // doesn't get overwritten.
+ db.CurrentTransitionState = ts.Copy()
+
+ log.Debug("loaded transition state", "storage processed", db.CurrentTransitionState.StorageProcessed, "addr", db.CurrentTransitionState.CurrentAccountAddress, "slot hash", db.CurrentTransitionState.CurrentSlotHash, "root", root, "ended", db.CurrentTransitionState.Ended, "started", db.CurrentTransitionState.Started)
+}
+
+func (db *cachingDB) LockCurrentTransitionState() {
+ db.transitionStateLock.Lock()
+}
+
+func (db *cachingDB) UnLockCurrentTransitionState() {
+ db.transitionStateLock.Unlock()
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index ab1065eb4cf5..48bcca154343 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -176,10 +176,11 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error)
if tr.IsVerkle() {
sdb.witness = sdb.NewAccessWitness()
}
- // if sdb.snaps != nil {
- // if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil {
- // }
- // }
+ if sdb.snaps != nil {
+ // if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap == nil {
+ // }
+ sdb.snap = sdb.snaps.Snapshot(root)
+ }
return sdb, nil
}
@@ -1317,7 +1318,7 @@ func (s *StateDB) Commit(block uint64, deleteEmptyObjects bool) (common.Hash, er
// - head layer is paired with HEAD state
// - head-1 layer is paired with HEAD-1 state
// - head-127 layer(bottom-most diff layer) is paired with HEAD-127 state
- if err := s.snaps.Cap(root, 128); err != nil {
+ if err := s.snaps.Cap(root, 8192); err != nil {
log.Warn("Failed to cap snapshot tree", "root", root, "layers", 128, "err", err)
}
}
diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go
index 6c5c158cc239..4521736c7beb 100644
--- a/core/state/trie_prefetcher.go
+++ b/core/state/trie_prefetcher.go
@@ -302,7 +302,7 @@ func (sf *subfetcher) loop() {
}
sf.trie = trie
} else {
- trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, nil /* safe to set to nil for now, as there is no prefetcher for verkle */)
+ trie, err := sf.db.OpenStorageTrie(sf.state, sf.addr, sf.root, sf.trie)
if err != nil {
log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err)
return
diff --git a/core/state_processor.go b/core/state_processor.go
index fbc6beda4a08..d6a01673c6ab 100644
--- a/core/state_processor.go
+++ b/core/state_processor.go
@@ -21,9 +21,6 @@ import (
"errors"
"fmt"
"math/big"
- "runtime"
- "sync"
- "time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
@@ -34,10 +31,6 @@ import (
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
- "github.com/ethereum/go-ethereum/trie"
- tutils "github.com/ethereum/go-ethereum/trie/utils"
- "github.com/ethereum/go-verkle"
- "github.com/holiman/uint256"
)
// StateProcessor is a basic Processor, which takes care of transitioning
@@ -113,11 +106,6 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
return nil, nil, 0, errors.New("withdrawals before shanghai")
}
- // Perform the overlay transition, if relevant
- if err := OverlayVerkleTransition(statedb); err != nil {
- return nil, nil, 0, fmt.Errorf("error performing verkle overlay transition: %w", err)
- }
-
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
p.engine.Finalize(p.bc, header, statedb, block.Transactions(), block.Uncles(), withdrawals)
@@ -192,185 +180,6 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo
return applyTransaction(msg, config, gp, statedb, header.Number, header.Hash(), tx, usedGas, vmenv)
}
-var zeroTreeIndex uint256.Int
-
-// keyValueMigrator is a helper module that collects key-values from the overlay-tree migration for Verkle Trees.
-// It assumes that the walk of the base tree is done in address-order, so it exploit that fact to
-// collect the key-values in a way that is efficient.
-type keyValueMigrator struct {
- // leafData contains the values for the future leaf for a particular VKT branch.
- leafData []migratedKeyValue
-
- // When prepare() is called, it will start a background routine that will process the leafData
- // saving the result in newLeaves to be used by migrateCollectedKeyValues(). The background
- // routine signals that it is done by closing processingReady.
- processingReady chan struct{}
- newLeaves []verkle.LeafNode
- prepareErr error
-}
-
-func newKeyValueMigrator() *keyValueMigrator {
- // We do initialize the VKT config since prepare() might indirectly make multiple GetConfig() calls
- // in different goroutines when we never called GetConfig() before, causing a race considering the way
- // that `config` is designed in go-verkle.
- // TODO: jsign as a fix for this in the PR where we move to a file-less precomp, since it allows safe
- // concurrent calls to GetConfig(). When that gets merged, we can remove this line.
- _ = verkle.GetConfig()
- return &keyValueMigrator{
- processingReady: make(chan struct{}),
- leafData: make([]migratedKeyValue, 0, 10_000),
- }
-}
-
-type migratedKeyValue struct {
- branchKey branchKey
- leafNodeData verkle.BatchNewLeafNodeData
-}
-type branchKey struct {
- addr common.Address
- treeIndex uint256.Int
-}
-
-func newBranchKey(addr []byte, treeIndex *uint256.Int) branchKey {
- var sk branchKey
- copy(sk.addr[:], addr)
- sk.treeIndex = *treeIndex
- return sk
-}
-
-func (kvm *keyValueMigrator) addStorageSlot(addr []byte, slotNumber []byte, slotValue []byte) {
- treeIndex, subIndex := tutils.GetTreeKeyStorageSlotTreeIndexes(slotNumber)
- leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex))
- leafNodeData.Values[subIndex] = slotValue
-}
-
-func (kvm *keyValueMigrator) addAccount(addr []byte, acc *types.StateAccount) {
- leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex))
-
- var version [verkle.LeafValueSize]byte
- leafNodeData.Values[tutils.VersionLeafKey] = version[:]
-
- var balance [verkle.LeafValueSize]byte
- for i, b := range acc.Balance.Bytes() {
- balance[len(acc.Balance.Bytes())-1-i] = b
- }
- leafNodeData.Values[tutils.BalanceLeafKey] = balance[:]
-
- var nonce [verkle.LeafValueSize]byte
- binary.LittleEndian.PutUint64(nonce[:8], acc.Nonce)
- leafNodeData.Values[tutils.NonceLeafKey] = nonce[:]
-
- leafNodeData.Values[tutils.CodeHashLeafKey] = acc.CodeHash[:]
-}
-
-func (kvm *keyValueMigrator) addAccountCode(addr []byte, codeSize uint64, chunks []byte) {
- leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, &zeroTreeIndex))
-
- // Save the code size.
- var codeSizeBytes [verkle.LeafValueSize]byte
- binary.LittleEndian.PutUint64(codeSizeBytes[:8], codeSize)
- leafNodeData.Values[tutils.CodeSizeLeafKey] = codeSizeBytes[:]
-
- // The first 128 chunks are stored in the account header leaf.
- for i := 0; i < 128 && i < len(chunks)/32; i++ {
- leafNodeData.Values[byte(128+i)] = chunks[32*i : 32*(i+1)]
- }
-
- // Potential further chunks, have their own leaf nodes.
- for i := 128; i < len(chunks)/32; {
- treeIndex, _ := tutils.GetTreeKeyCodeChunkIndices(uint256.NewInt(uint64(i)))
- leafNodeData := kvm.getOrInitLeafNodeData(newBranchKey(addr, treeIndex))
-
- j := i
- for ; (j-i) < 256 && j < len(chunks)/32; j++ {
- leafNodeData.Values[byte((j-128)%256)] = chunks[32*j : 32*(j+1)]
- }
- i = j
- }
-}
-
-func (kvm *keyValueMigrator) getOrInitLeafNodeData(bk branchKey) *verkle.BatchNewLeafNodeData {
- // Remember that keyValueMigration receives actions ordered by (address, subtreeIndex).
- // This means that we can assume that the last element of leafData is the one that we
- // are looking for, or that we need to create a new one.
- if len(kvm.leafData) == 0 || kvm.leafData[len(kvm.leafData)-1].branchKey != bk {
- kvm.leafData = append(kvm.leafData, migratedKeyValue{
- branchKey: bk,
- leafNodeData: verkle.BatchNewLeafNodeData{
- Stem: nil, // It will be calculated in the prepare() phase, since it's CPU heavy.
- Values: make(map[byte][]byte),
- },
- })
- }
- return &kvm.leafData[len(kvm.leafData)-1].leafNodeData
-}
-
-func (kvm *keyValueMigrator) prepare() {
- // We fire a background routine to process the leafData and save the result in newLeaves.
- // The background routine signals that it is done by closing processingReady.
- go func() {
- // Step 1: We split kvm.leafData in numBatches batches, and we process each batch in a separate goroutine.
- // This fills each leafNodeData.Stem with the correct value.
- var wg sync.WaitGroup
- batchNum := runtime.NumCPU()
- batchSize := (len(kvm.leafData) + batchNum - 1) / batchNum
- for i := 0; i < len(kvm.leafData); i += batchSize {
- start := i
- end := i + batchSize
- if end > len(kvm.leafData) {
- end = len(kvm.leafData)
- }
- wg.Add(1)
-
- batch := kvm.leafData[start:end]
- go func() {
- defer wg.Done()
- var currAddr common.Address
- var currPoint *verkle.Point
- for i := range batch {
- if batch[i].branchKey.addr != currAddr {
- currAddr = batch[i].branchKey.addr
- currPoint = tutils.EvaluateAddressPoint(currAddr[:])
- }
- stem := tutils.GetTreeKeyWithEvaluatedAddess(currPoint, &batch[i].branchKey.treeIndex, 0)
- stem = stem[:verkle.StemSize]
- batch[i].leafNodeData.Stem = stem
- }
- }()
- }
- wg.Wait()
-
- // Step 2: Now that we have all stems (i.e: tree keys) calculated, we can create the new leaves.
- nodeValues := make([]verkle.BatchNewLeafNodeData, len(kvm.leafData))
- for i := range kvm.leafData {
- nodeValues[i] = kvm.leafData[i].leafNodeData
- }
-
- // Create all leaves in batch mode so we can optimize cryptography operations.
- kvm.newLeaves, kvm.prepareErr = verkle.BatchNewLeafNode(nodeValues)
- close(kvm.processingReady)
- }()
-}
-
-func (kvm *keyValueMigrator) migrateCollectedKeyValues(tree *trie.VerkleTrie) error {
- now := time.Now()
- <-kvm.processingReady
- if kvm.prepareErr != nil {
- return fmt.Errorf("failed to prepare key values: %w", kvm.prepareErr)
- }
- log.Info("Prepared key values from base tree", "duration", time.Since(now))
-
- // Insert into the tree.
- if err := tree.InsertMigratedLeaves(kvm.newLeaves); err != nil {
- return fmt.Errorf("failed to insert migrated leaves: %w", err)
- }
-
- return nil
-}
-
-// InsertBlockHashHistoryAtEip2935Fork handles the insertion of all previous 256
-// blocks on the eip2935 activation block. It also adds the account header of the
-// history contract to the witness.
func InsertBlockHashHistoryAtEip2935Fork(statedb *state.StateDB, prevNumber uint64, prevHash common.Hash, chain consensus.ChainHeaderReader) {
// Make sure that the historical contract is added to the witness
statedb.Witness().TouchFullAccount(params.HistoryStorageAddress[:], true)
diff --git a/eth/api_debug.go b/eth/api_debug.go
index 9cfa9103fb58..3e0daac1b5b0 100644
--- a/eth/api_debug.go
+++ b/eth/api_debug.go
@@ -17,7 +17,9 @@
package eth
import (
+ "bytes"
"context"
+ "encoding/gob"
"errors"
"fmt"
"time"
@@ -432,3 +434,41 @@ func (api *DebugAPI) SetTrieFlushInterval(interval string) error {
func (api *DebugAPI) GetTrieFlushInterval() string {
return api.eth.blockchain.GetTrieFlushInterval().String()
}
+
+type ConversionStatusResult struct {
+ Started bool `json:"started"`
+ Ended bool `json:"ended"`
+}
+
+func (api *DebugAPI) ConversionStatus(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*ConversionStatusResult, error) {
+ block, err := api.eth.APIBackend.BlockByNumberOrHash(ctx, blockNrOrHash)
+ if err != nil {
+ return nil, err
+ }
+ data, err := rawdb.ReadVerkleTransitionState(api.eth.ChainDb(), block.Root())
+ if err != nil {
+ if err.Error() == "pebble: not found" {
+ return &ConversionStatusResult{}, nil
+ }
+ return nil, err
+ }
+ log.Info("found entry", "data", data)
+ if len(data) == 0 {
+ log.Info("found no data")
+ // started and ended will be false as no conversion has started
+ return &ConversionStatusResult{}, nil
+ }
+
+ var (
+ ts state.TransitionState
+ buf = bytes.NewBuffer(data[:])
+ dec = gob.NewDecoder(buf)
+ )
+ // Decode transition state
+ err = dec.Decode(&ts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decode transition state, err=%v", err)
+ }
+
+ return &ConversionStatusResult{Started: ts.Started, Ended: ts.Ended}, nil
+}
diff --git a/eth/backend.go b/eth/backend.go
index a6c80159077d..c47bc6b5bb35 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -201,6 +201,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if config.OverridePrague != nil {
overrides.OverridePrague = config.OverridePrague
}
+ if config.OverrideProofInBlock != nil {
+ overrides.OverrideProofInBlock = config.OverrideProofInBlock
+ }
+ if config.OverrideOverlayStride != nil {
+ overrides.OverrideOverlayStride = config.OverrideOverlayStride
+ }
eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, &overrides, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit)
if err != nil {
return nil, err
diff --git a/eth/catalyst/api.go b/eth/catalyst/api.go
index 63079415fc14..925494a74d5f 100644
--- a/eth/catalyst/api.go
+++ b/eth/catalyst/api.go
@@ -532,13 +532,9 @@ func (api *ConsensusAPI) newPayload(params engine.ExecutableData, versionedHashe
if api.eth.BlockChain().Config().IsPrague(block.Number(), block.Time()) && !api.eth.BlockChain().Config().IsPrague(parent.Number(), parent.Time()) {
parent := api.eth.BlockChain().GetHeaderByNumber(block.NumberU64() - 1)
if !api.eth.BlockChain().Config().IsPrague(parent.Number, parent.Time) {
- api.eth.BlockChain().StartVerkleTransition(parent.Root, common.Hash{}, api.eth.BlockChain().Config(), nil)
+ api.eth.BlockChain().StartVerkleTransition(parent.Root, common.Hash{}, api.eth.BlockChain().Config(), nil, parent.Root)
}
}
- // Reset db merge state in case of a reorg
- if !api.eth.BlockChain().Config().IsPrague(block.Number(), block.Time()) {
- api.eth.BlockChain().ReorgThroughVerkleTransition()
- }
// Another cornercase: if the node is in snap sync mode, but the CL client
// tries to make it import a block. That should be denied as pushing something
// into the database directly will conflict with the assumptions of snap sync
diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go
index 4606b60408dd..fc9550147bcc 100644
--- a/eth/ethconfig/config.go
+++ b/eth/ethconfig/config.go
@@ -158,6 +158,12 @@ type Config struct {
// OverrideVerkle (TODO: remove after the fork)
OverridePrague *uint64 `toml:",omitempty"`
+
+ // OverrideProofInBlock
+ OverrideProofInBlock *bool `toml:",omitempty"`
+
+ // OverrideOverlayStride
+ OverrideOverlayStride *uint64 `toml:",omitempty"`
}
// CreateConsensusEngine creates a consensus engine for the given chain config.
diff --git a/light/trie.go b/light/trie.go
index 53d54615d909..7e7c03bc16c1 100644
--- a/light/trie.go
+++ b/light/trie.go
@@ -101,7 +101,7 @@ func (db *odrDatabase) DiskDB() ethdb.KeyValueStore {
panic("not implemented")
}
-func (db *odrDatabase) StartVerkleTransition(originalRoot common.Hash, translatedRoot common.Hash, chainConfig *params.ChainConfig, _ *uint64) {
+func (db *odrDatabase) StartVerkleTransition(originalRoot common.Hash, translatedRoot common.Hash, chainConfig *params.ChainConfig, _ *uint64, _ common.Hash) {
panic("not implemented") // TODO: Implement
}
@@ -121,7 +121,11 @@ func (db *odrDatabase) Transitioned() bool {
panic("not implemented") // TODO: Implement
}
-func (db *odrDatabase) SetCurrentSlotHash(hash common.Hash) {
+func (db *odrDatabase) InitTransitionStatus(bool, bool) {
+ panic("not implemented") // TODO: Implement
+}
+
+func (db *odrDatabase) SetCurrentSlotHash(common.Hash) {
panic("not implemented") // TODO: Implement
}
@@ -129,7 +133,7 @@ func (db *odrDatabase) GetCurrentAccountAddress() *common.Address {
panic("not implemented") // TODO: Implement
}
-func (db *odrDatabase) SetCurrentAccountAddress(_ common.Address) {
+func (db *odrDatabase) SetCurrentAccountAddress(common.Address) {
panic("not implemented") // TODO: Implement
}
@@ -141,7 +145,7 @@ func (db *odrDatabase) GetCurrentSlotHash() common.Hash {
panic("not implemented") // TODO: Implement
}
-func (db *odrDatabase) SetStorageProcessed(_ bool) {
+func (db *odrDatabase) SetStorageProcessed(bool) {
panic("not implemented") // TODO: Implement
}
@@ -153,15 +157,30 @@ func (db *odrDatabase) GetCurrentPreimageOffset() int64 {
panic("not implemented") // TODO: Implement
}
-func (db *odrDatabase) SetCurrentPreimageOffset(_ int64) {
+func (db *odrDatabase) SetCurrentPreimageOffset(int64) {
+ panic("not implemented") // TODO: Implement
+}
+
+func (db *odrDatabase) AddRootTranslation(common.Hash, common.Hash) {
+ panic("not implemented") // TODO: Implement
+}
+
+func (db *odrDatabase) SetLastMerkleRoot(common.Hash) {
panic("not implemented") // TODO: Implement
}
-func (db *odrDatabase) AddRootTranslation(originalRoot common.Hash, translatedRoot common.Hash) {
+func (db *odrDatabase) SaveTransitionState(common.Hash) {
panic("not implemented") // TODO: Implement
}
-func (db *odrDatabase) SetLastMerkleRoot(root common.Hash) {
+func (db *odrDatabase) LoadTransitionState(common.Hash) {
+ panic("not implemented") // TODO: Implement
+}
+
+func (db *odrDatabase) LockCurrentTransitionState() {
+ panic("not implemented") // TODO: Implement
+}
+func (db *odrDatabase) UnLockCurrentTransitionState() {
panic("not implemented") // TODO: Implement
}
diff --git a/miner/worker.go b/miner/worker.go
index aae4fe8b6454..3fb4a3fa43e5 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -852,7 +852,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
if genParams.parentHash != (common.Hash{}) {
block := w.chain.GetBlockByHash(genParams.parentHash)
if block == nil {
- return nil, fmt.Errorf("missing parent")
+ return nil, fmt.Errorf("missing parent: %x", genParams.parentHash)
}
parent = block.Header()
}
@@ -894,7 +894,7 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
if w.chain.Config().IsPrague(header.Number, header.Time) {
parent := w.chain.GetHeaderByNumber(header.Number.Uint64() - 1)
if !w.chain.Config().IsPrague(parent.Number, parent.Time) {
- w.chain.StartVerkleTransition(parent.Root, common.Hash{}, w.chain.Config(), nil)
+ w.chain.StartVerkleTransition(parent.Root, common.Hash{}, w.chain.Config(), w.chain.Config().PragueTime, parent.Root)
}
}
@@ -904,9 +904,6 @@ func (w *worker) prepareWork(genParams *generateParams) (*environment, error) {
if err != nil {
return nil, err
}
- if w.chain.Config().IsPrague(header.Number, header.Time) {
- core.OverlayVerkleTransition(state)
- }
// Run the consensus preparation with the default or customized consensus engine.
if err := w.engine.Prepare(w.chain, header); err != nil {
log.Error("Failed to prepare header for sealing", "err", err)
diff --git a/params/config.go b/params/config.go
index 94dcb57b2fe2..5b55c5197700 100644
--- a/params/config.go
+++ b/params/config.go
@@ -301,7 +301,8 @@ type ChainConfig struct {
IsDevMode bool `json:"isDev,omitempty"`
// Proof in block
- ProofInBlocks bool `json:"proofInBlocks,omitempty"`
+ ProofInBlocks bool `json:"proofInBlocks,omitempty"`
+ OverlayStride uint64 `json:"overlayStride,omitempty"`
}
// EthashConfig is the consensus engine configs for proof-of-work based sealing.
diff --git a/trie/transition.go b/trie/transition.go
index 24daf436ed8a..0fe197336524 100644
--- a/trie/transition.go
+++ b/trie/transition.go
@@ -17,6 +17,8 @@
package trie
import (
+ "fmt"
+
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
@@ -62,7 +64,11 @@ func (t *TransitionTrie) GetKey(key []byte) []byte {
// not be modified by the caller. If a node was not found in the database, a
// trie.MissingNodeError is returned.
func (t *TransitionTrie) GetStorage(addr common.Address, key []byte) ([]byte, error) {
- if val, err := t.overlay.GetStorage(addr, key); len(val) != 0 || err != nil {
+ val, err := t.overlay.GetStorage(addr, key)
+ if err != nil {
+ return nil, fmt.Errorf("get storage from overlay: %s", err)
+ }
+ if len(val) != 0 {
return val, nil
}
// TODO also insert value into overlay