From 6b11a70d323acc26de3e334e928a050594301297 Mon Sep 17 00:00:00 2001 From: VM Date: Mon, 18 Nov 2024 11:16:06 +0800 Subject: [PATCH] fix: optimize logs --- core/rawdb/freezer.go | 2 +- eth/backend.go | 27 ++++++++++++++------------- triedb/pathdb/database_test.go | 2 -- triedb/pathdb/journal.go | 33 +++++++-------------------------- triedb/pathdb/nodebufferlist.go | 18 +++++++++++++++--- 5 files changed, 37 insertions(+), 45 deletions(-) diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index 54b911f1cb..ed0d582380 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -127,7 +127,7 @@ func NewFreezer(datadir string, namespace string, readonly, writeTrieNode bool, // Create the tables. for name, disableSnappy := range tables { if name == stateHistoryTrieNodesData && !writeTrieNode { - log.Info("Not create trie node data") + log.Info("Not create trie node data in freezer db") continue } table, err := newTable(datadir, name, readMeter, writeMeter, sizeGauge, maxTableSize, disableSnappy, readonly) diff --git a/eth/backend.go b/eth/backend.go index d96c20ce0f..dc27436f31 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -21,7 +21,6 @@ import ( "context" "errors" "fmt" - "github.com/ethereum/go-ethereum/core/txpool/bundlepool" "math/big" "runtime" "sync" @@ -39,6 +38,7 @@ import ( "github.com/ethereum/go-ethereum/core/state/pruner" "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/txpool/blobpool" + "github.com/ethereum/go-ethereum/core/txpool/bundlepool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" @@ -133,6 +133,17 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) } + // Assemble the Ethereum object + chainDb, err := stack.OpenAndMergeDatabase("chaindata", ChainDBNamespace, false, config.DatabaseCache, config.DatabaseHandles, + config.DatabaseFreezer) + if err != nil { + return nil, err + } + config.StateScheme, err = rawdb.ParseStateScheme(config.StateScheme, chainDb) + if err != nil { + return nil, err + } + if config.StateScheme == rawdb.HashScheme && config.NoPruning && config.TrieDirtyCache > 0 { if config.SnapshotCache > 0 { config.TrieCleanCache += config.TrieDirtyCache * 3 / 5 @@ -152,21 +163,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { config.TrieCleanCache += config.TrieDirtyCache - pathdb.MaxBufferSize/1024/1024 config.TrieDirtyCache = pathdb.MaxBufferSize / 1024 / 1024 } - log.Info("Allocated memory caches", - "state_scheme", config.StateScheme, + log.Info("Allocated memory caches", "state_scheme", config.StateScheme, "trie_clean_cache", common.StorageSize(config.TrieCleanCache)*1024*1024, "trie_dirty_cache", common.StorageSize(config.TrieDirtyCache)*1024*1024, "snapshot_cache", common.StorageSize(config.SnapshotCache)*1024*1024) - // Assemble the Ethereum object - chainDb, err := stack.OpenAndMergeDatabase("chaindata", ChainDBNamespace, false, config.DatabaseCache, config.DatabaseHandles, - config.DatabaseFreezer) - if err != nil { - return nil, err - } - config.StateScheme, err = rawdb.ParseStateScheme(config.StateScheme, chainDb) - if err != nil { - return nil, err - } + // Try to recover offline state pruning only in hash-based. if config.StateScheme == rawdb.HashScheme { if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb); err != nil { diff --git a/triedb/pathdb/database_test.go b/triedb/pathdb/database_test.go index 3674064cf3..c823f05291 100644 --- a/triedb/pathdb/database_test.go +++ b/triedb/pathdb/database_test.go @@ -319,7 +319,6 @@ func (t *tester) verifyState(root common.Hash) error { } _, err = reader.Node(common.Hash{}, nil, root) if err != nil { - fmt.Println("error: ", err) return errors.New("root node is not available") } for addrHash, account := range t.snapAccounts[root] { @@ -460,7 +459,6 @@ func TestDisable(t *testing.T) { t.Fatalf("Invalid activation should be rejected") } if err := tester.db.Enable(stored); err != nil { - fmt.Println(err) t.Fatal("Failed to activate database") } diff --git a/triedb/pathdb/journal.go b/triedb/pathdb/journal.go index 4579bb41aa..fab4495d88 100644 --- a/triedb/pathdb/journal.go +++ b/triedb/pathdb/journal.go @@ -250,7 +250,6 @@ func (db *Database) loadLayers() layer { _, root := rawdb.ReadAccountTrieNode(db.diskdb, nil) root = types.TrieRootHash(root) - fmt.Println("1 useBase, fastRecovery", db.useBase, db.fastRecovery) // Load the layers by resolving the journal head, err := db.loadJournal(root) if err == nil { @@ -269,7 +268,6 @@ func (db *Database) loadLayers() layer { stateID = rawdb.ReadPersistentStateID(db.diskdb) ) - fmt.Println("2 useBase, fastRecovery", db.useBase, db.fastRecovery) if (errors.Is(err, errMissJournal) || errors.Is(err, errUnmatchedJournal)) && db.fastRecovery && db.config.TrieNodeBufferType == NodeBufferList && !db.useBase { start := time.Now() @@ -332,23 +330,13 @@ func (db *Database) loadDiskLayer(r *rlp.Stream, journalTypeForReader JournalTyp if stored > id { return nil, fmt.Errorf("invalid state id: stored %d resolved %d", stored, id) } + // Resolve nodes cached in node buffer var encoded []journalNodes if err := journalBuf.Decode(&encoded); err != nil { return nil, fmt.Errorf("failed to load disk nodes: %v", err) } - nodes := make(map[common.Hash]map[string]*trienode.Node) - for _, entry := range encoded { - subset := make(map[string]*trienode.Node) - for _, n := range entry.Nodes { - if len(n.Blob) > 0 { - subset[string(n.Path)] = trienode.New(crypto.Keccak256Hash(n.Blob), n.Blob) - } else { - subset[string(n.Path)] = trienode.NewDeleted() - } - } - nodes[entry.Owner] = subset - } + nodes := flattenTrieNodes(encoded) if journalTypeForReader == JournalFileType { var shaSum [32]byte @@ -362,7 +350,6 @@ func (db *Database) loadDiskLayer(r *rlp.Stream, journalTypeForReader JournalTyp } } - fmt.Println("3 useBase, fastRecovery", db.useBase, db.fastRecovery) // Calculate the internal state transitions by id difference. nb, err := NewTrieNodeBuffer(db.diskdb, db.config.TrieNodeBufferType, db.bufferSize, nodes, id-stored, db.config.ProposeBlockInterval, db.config.NotifyKeep, db.freezer, db.fastRecovery, db.useBase) @@ -371,14 +358,15 @@ func (db *Database) loadDiskLayer(r *rlp.Stream, journalTypeForReader JournalTyp return nil, err } - if db.config.TrieNodeBufferType == NodeBufferList && !db.useBase { + if db.config.TrieNodeBufferType == NodeBufferList && !db.useBase && db.fastRecovery { recoveredRoot, recoveredStateID, _ := nb.getLatestStatus() if recoveredRoot != root && recoveredStateID != id { - log.Error("unequal state root and state id") + log.Error("Recovered state root and state id are different from recording ones", + "recovered_root", recoveredRoot, "root", root, "recovered_state_id", recoveredStateID, "id", id) return nil, errors.New("Unmatched root and state id with recovered") } - log.Info("Finish recovering node buffer list", "latest root hash", recoveredRoot.String(), + log.Info("Disk layer finishes recovering node buffer list", "latest root hash", recoveredRoot.String(), "latest state_id", recoveredStateID) } @@ -498,14 +486,7 @@ func (dl *diskLayer) journal(w io.Writer, journalType JournalType) error { } // Step three, write all unwritten nodes into the journal bufferNodes := dl.buffer.getAllNodes() - nodes := make([]journalNodes, 0, len(bufferNodes)) - for owner, subset := range bufferNodes { - entry := journalNodes{Owner: owner} - for path, node := range subset { - entry.Nodes = append(entry.Nodes, journalNode{Path: []byte(path), Blob: node.Blob}) - } - nodes = append(nodes, entry) - } + nodes := compressTrieNodes(bufferNodes) if err := rlp.Encode(journalBuf, nodes); err != nil { return err } diff --git a/triedb/pathdb/nodebufferlist.go b/triedb/pathdb/nodebufferlist.go index d1b43d70e0..0fe2b1c687 100644 --- a/triedb/pathdb/nodebufferlist.go +++ b/triedb/pathdb/nodebufferlist.go @@ -108,13 +108,26 @@ func newNodeBufferList( dlInMd = wpBlocks } + var base *multiDifflayer + if nodes != nil && !fastRecovery { + var size uint64 + for _, subset := range nodes { + for path, n := range subset { + size += uint64(len(n.Blob) + len(path)) + } + } + base = newMultiDifflayer(limit, size, common.Hash{}, nodes, layers) + } else { + base = newMultiDifflayer(limit, 0, common.Hash{}, make(map[common.Hash]map[string]*trienode.Node), 0) + } + nf := &nodebufferlist{ db: db, wpBlocks: wpBlocks, rsevMdNum: rsevMdNum, dlInMd: dlInMd, limit: limit, - base: newMultiDifflayer(limit, 0, common.Hash{}, make(map[common.Hash]map[string]*trienode.Node), 0), + base: base, persistID: rawdb.ReadPersistentStateID(db), stopCh: make(chan struct{}), waitStopCh: make(chan struct{}), @@ -123,7 +136,6 @@ func newNodeBufferList( keepFunc: keepFunc, } - fmt.Println("useBase, fastRecovery", useBase, fastRecovery) if !useBase && fastRecovery { if freezer == nil { log.Crit("Use unopened freezer db to recover node buffer list") @@ -213,7 +225,7 @@ func (nf *nodebufferlist) recoverNodeBufferList(freezer *rawdb.ResettableFreezer } nf.diffToBase() - log.Info("Succeed to add diff layer", "base_size", nf.base.size, "tail_state_id", nf.tail.id, + log.Info("Succeed to recover node buffer list", "base_size", nf.base.size, "tail_state_id", nf.tail.id, "head_state_id", nf.head.id, "nbl_layers", nf.layers, "base_layers", nf.base.layers) return nil }