Skip to content

Commit

Permalink
Merge pull request ethereum#2035 from weiihann/v1.3.4-snapsync
Browse files Browse the repository at this point in the history
all: pull snap sync PRs from upstream v1.13.5
  • Loading branch information
zzzckck authored Dec 19, 2023
2 parents 4d2bd12 + 3a5ec36 commit 474860e
Show file tree
Hide file tree
Showing 24 changed files with 715 additions and 414 deletions.
2 changes: 1 addition & 1 deletion cmd/clef/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -1206,7 +1206,7 @@ func GenDoc(ctx *cli.Context) error {
URL: accounts.URL{Path: ".. ignored .."},
},
{
Address: common.HexToAddress("0xffffffffffffffffffffffffffffffffffffffff"),
Address: common.MaxAddress,
},
}})
}
Expand Down
4 changes: 2 additions & 2 deletions cmd/devp2p/internal/ethtest/snap.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ type accRangeTest struct {
func (s *Suite) TestSnapGetAccountRange(t *utesting.T) {
var (
root = s.chain.RootAt(999)
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
ffHash = common.MaxHash
zero = common.Hash{}
firstKeyMinus1 = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf29")
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
Expand Down Expand Up @@ -125,7 +125,7 @@ type stRangesTest struct {
// TestSnapGetStorageRanges various forms of GetStorageRanges requests.
func (s *Suite) TestSnapGetStorageRanges(t *utesting.T) {
var (
ffHash = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
ffHash = common.MaxHash
zero = common.Hash{}
firstKey = common.HexToHash("0x00bf49f440a1cd0527e4d06e2765654c0f56452257516d793a9b8d604dcfdf2a")
secondKey = common.HexToHash("0x09e47cd5056a689e708f22fe1f932709a320518e444f5f7d8d46a3da523d6606")
Expand Down
6 changes: 6 additions & 0 deletions common/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,12 @@ const (
var (
hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{})

// MaxAddress represents the maximum possible address value.
MaxAddress = HexToAddress("0xffffffffffffffffffffffffffffffffffffffff")

// MaxHash represents the maximum possible hash value.
MaxHash = HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
)

// Hash represents the 32 byte Keccak256 hash of arbitrary data.
Expand Down
2 changes: 2 additions & 0 deletions core/rawdb/freezer_resettable.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"sync"

"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
)

const tmpSuffix = ".tmp"
Expand Down Expand Up @@ -240,6 +241,7 @@ func cleanup(path string) error {
}
for _, name := range names {
if name == filepath.Base(path)+tmpSuffix {
log.Info("Removed leftover freezer directory", "name", name)
return os.RemoveAll(filepath.Join(parent, name))
}
}
Expand Down
21 changes: 16 additions & 5 deletions core/rawdb/freezer_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -265,6 +265,12 @@ func (t *freezerTable) repair() error {
t.index.ReadAt(buffer, offsetsSize-indexEntrySize)
lastIndex.unmarshalBinary(buffer)
}
// Print an error log if the index is corrupted due to an incorrect
// last index item. While it is theoretically possible to have a zero offset
// by storing all zero-size items, it is highly unlikely to occur in practice.
if lastIndex.offset == 0 && offsetsSize%indexEntrySize > 1 {
log.Error("Corrupted index file detected", "lastOffset", lastIndex.offset, "items", offsetsSize%indexEntrySize-1)
}
if t.readonly {
t.head, err = t.openFile(lastIndex.filenum, openFreezerFileForReadOnly)
} else {
Expand Down Expand Up @@ -357,7 +363,7 @@ func (t *freezerTable) repair() error {
return err
}
if verbose {
t.logger.Info("Chain freezer table opened", "items", t.items.Load(), "size", t.headBytes)
t.logger.Info("Chain freezer table opened", "items", t.items.Load(), "deleted", t.itemOffset.Load(), "hidden", t.itemHidden.Load(), "tailId", t.tailId, "headId", t.headId, "size", t.headBytes)
} else {
t.logger.Debug("Chain freezer table opened", "items", t.items.Load(), "size", common.StorageSize(t.headBytes))
}
Expand Down Expand Up @@ -530,6 +536,10 @@ func (t *freezerTable) truncateTail(items uint64) error {
if err := t.meta.Sync(); err != nil {
return err
}
// Close the index file before shorten it.
if err := t.index.Close(); err != nil {
return err
}
// Truncate the deleted index entries from the index file.
err = copyFrom(t.index.Name(), t.index.Name(), indexEntrySize*(newDeleted-deleted+1), func(f *os.File) error {
tailIndex := indexEntry{
Expand All @@ -543,13 +553,14 @@ func (t *freezerTable) truncateTail(items uint64) error {
return err
}
// Reopen the modified index file to load the changes
if err := t.index.Close(); err != nil {
return err
}
t.index, err = openFreezerFileForAppend(t.index.Name())
if err != nil {
return err
}
// Sync the file to ensure changes are flushed to disk
if err := t.index.Sync(); err != nil {
return err
}
// Release any files before the current tail
t.tailId = newTailId
t.itemOffset.Store(newDeleted)
Expand Down Expand Up @@ -782,7 +793,7 @@ func (t *freezerTable) retrieveItems(start, count, maxBytes uint64) ([]byte, []i
return fmt.Errorf("missing data file %d", fileId)
}
if _, err := dataFile.ReadAt(output[len(output)-length:], int64(start)); err != nil {
return err
return fmt.Errorf("%w, fileid: %d, start: %d, length: %d", err, fileId, start, length)
}
return nil
}
Expand Down
16 changes: 5 additions & 11 deletions core/state/snapshot/conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -365,21 +365,15 @@ func generateTrieRoot(db ethdb.KeyValueWriter, scheme string, it Iterator, accou
}

func stackTrieGenerate(db ethdb.KeyValueWriter, scheme string, owner common.Hash, in chan trieKV, out chan common.Hash) {
var nodeWriter trie.NodeWriteFunc
options := trie.NewStackTrieOptions()
if db != nil {
nodeWriter = func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
rawdb.WriteTrieNode(db, owner, path, hash, blob, scheme)
}
})
}
t := trie.NewStackTrieWithOwner(nodeWriter, owner)
t := trie.NewStackTrie(options)
for leaf := range in {
t.Update(leaf.key[:], leaf.value)
}
var root common.Hash
if db == nil {
root = t.Hash()
} else {
root, _ = t.Commit()
}
out <- root
out <- t.Commit()
}
4 changes: 3 additions & 1 deletion core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -1363,10 +1363,12 @@ func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (boo
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
options := trie.NewStackTrieOptions()
options = options.WithWriter(func(path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted())
size += common.StorageSize(len(path))
})
stack := trie.NewStackTrie(options)
for iter.Next() {
if size > storageDeleteLimit {
return true, size, nil, nil, nil
Expand Down
2 changes: 1 addition & 1 deletion core/state_processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func TestStateProcessorErrors(t *testing.T) {
)

defer blockchain.Stop()
bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"))
bigNumber := new(big.Int).SetBytes(common.MaxHash.Bytes())
tooBigNumber := new(big.Int).Set(bigNumber)
tooBigNumber.Add(tooBigNumber, common.Big1)
for i, tt := range []struct {
Expand Down
2 changes: 1 addition & 1 deletion eth/protocols/snap/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ func ServiceGetStorageRangesQuery(chain *core.BlockChain, req *GetStorageRangesP
if len(req.Origin) > 0 {
origin, req.Origin = common.BytesToHash(req.Origin), nil
}
var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
var limit = common.MaxHash
if len(req.Limit) > 0 {
limit, req.Limit = common.BytesToHash(req.Limit), nil
}
Expand Down
28 changes: 28 additions & 0 deletions eth/protocols/snap/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,32 @@ var (

IngressRegistrationErrorMeter = metrics.NewRegisteredMeter(ingressRegistrationErrorName, nil)
EgressRegistrationErrorMeter = metrics.NewRegisteredMeter(egressRegistrationErrorName, nil)

// deletionGauge is the metric to track how many trie node deletions
// are performed in total during the sync process.
deletionGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/delete", nil)

// lookupGauge is the metric to track how many trie node lookups are
// performed to determine if node needs to be deleted.
lookupGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/lookup", nil)

// boundaryAccountNodesGauge is the metric to track how many boundary trie
// nodes in account trie are met.
boundaryAccountNodesGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/boundary/account", nil)

// boundaryAccountNodesGauge is the metric to track how many boundary trie
// nodes in storage tries are met.
boundaryStorageNodesGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/boundary/storage", nil)

// smallStorageGauge is the metric to track how many storages are small enough
// to retrieved in one or two request.
smallStorageGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/small", nil)

// largeStorageGauge is the metric to track how many storages are large enough
// to retrieved concurrently.
largeStorageGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/large", nil)

// skipStorageHealingGauge is the metric to track how many storages are retrieved
// in multiple requests but healing is not necessary.
skipStorageHealingGauge = metrics.NewRegisteredGauge("eth/protocols/snap/sync/storage/noheal", nil)
)
2 changes: 1 addition & 1 deletion eth/protocols/snap/range.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func (r *hashRange) End() common.Hash {
// If the end overflows (non divisible range), return a shorter interval
next, overflow := new(uint256.Int).AddOverflow(r.current, r.step)
if overflow {
return common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
return common.MaxHash
}
return next.SubUint64(next, 1).Bytes32()
}
Expand Down
10 changes: 5 additions & 5 deletions eth/protocols/snap/range_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func TestHashRanges(t *testing.T) {
common.HexToHash("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
// Split a divisible part of the hash range up into 2 chunks
Expand All @@ -58,7 +58,7 @@ func TestHashRanges(t *testing.T) {
},
ends: []common.Hash{
common.HexToHash("0x8fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
// Split the entire hash range into a non divisible 3 chunks
Expand All @@ -73,7 +73,7 @@ func TestHashRanges(t *testing.T) {
ends: []common.Hash{
common.HexToHash("0x5555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
// Split a part of hash range into a non divisible 3 chunks
Expand All @@ -88,7 +88,7 @@ func TestHashRanges(t *testing.T) {
ends: []common.Hash{
common.HexToHash("0x6aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
common.HexToHash("0xb555555555555555555555555555555555555555555555555555555555555555"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
// Split a part of hash range into a non divisible 3 chunks, but with a
Expand All @@ -108,7 +108,7 @@ func TestHashRanges(t *testing.T) {
ends: []common.Hash{
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5"),
common.HexToHash("0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb"),
common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"),
common.MaxHash,
},
},
}
Expand Down
Loading

0 comments on commit 474860e

Please sign in to comment.