Skip to content

Commit

Permalink
chore: add more metrics to perf pbss
Browse files Browse the repository at this point in the history
  • Loading branch information
will@2012 committed May 20, 2024
1 parent f45305b commit 93c03fa
Show file tree
Hide file tree
Showing 11 changed files with 117 additions and 17 deletions.
1 change: 1 addition & 0 deletions cmd/geth/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,7 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
if ctx.IsSet(utils.MetricsEnabledExpensiveFlag.Name) {
cfg.Metrics.EnabledExpensive = ctx.Bool(utils.MetricsEnabledExpensiveFlag.Name)
}
cfg.Metrics.EnabledExpensive = true
if ctx.IsSet(utils.MetricsHTTPFlag.Name) {
cfg.Metrics.HTTP = ctx.String(utils.MetricsHTTPFlag.Name)
}
Expand Down
26 changes: 11 additions & 15 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -2267,20 +2267,16 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
bc.cacheBlock(block.Hash(), block)

// Update the metrics touched during block processing and validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing)
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete(in processing)
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation)
triehash := statedb.AccountHashes + statedb.StorageHashes // The time spent on tries hashing
trieUpdate := statedb.AccountUpdates + statedb.StorageUpdates // The time spent on tries update
trieRead := statedb.SnapshotAccountReads + statedb.AccountReads // The time spent on account read
trieRead += statedb.SnapshotStorageReads + statedb.StorageReads // The time spent on storage read
blockExecutionTimer.Update(ptime - trieRead) // The time spent on EVM processing
blockValidationTimer.Update(vtime - (triehash + trieUpdate)) // The time spent on block validation
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete(in processing)
storageReadTimer.Update(statedb.StorageReads) // Storage reads are complete(in processing)
snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete(in processing)
snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete(in processing)
accountUpdateTimer.Update(statedb.AccountUpdates) // Account updates are complete(in validation)
storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete(in validation)
accountHashTimer.Update(statedb.AccountHashes) // Account hashes are complete(in validation)
storageHashTimer.Update(statedb.StorageHashes) // Storage hashes are complete(in validation)
blockExecutionTimer.Update(ptime) // The time spent on EVM processing
blockValidationTimer.Update(vtime) // The time spent on block validation

// Write the block to the chain and get the status.
var (
Expand All @@ -2305,7 +2301,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
snapshotCommitTimer.Update(statedb.SnapshotCommits) // Snapshot commits are complete, we can mark them
triedbCommitTimer.Update(statedb.TrieDBCommits) // Trie database commits are complete, we can mark them

blockWriteTimer.Update(time.Since(wstart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits)
blockWriteTimer.UpdateSince(wstart)
blockInsertTimer.UpdateSince(start)

// Report the import stats before returning the various results
Expand Down
10 changes: 10 additions & 0 deletions core/rawdb/accessors_snapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@ package rawdb

import (
"encoding/binary"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)

// ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
Expand Down Expand Up @@ -74,6 +76,10 @@ func DeleteSnapshotRoot(db ethdb.KeyValueWriter) {

// ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
func ReadAccountSnapshot(db ethdb.KeyValueReader, hash common.Hash) []byte {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { rawdbGetAccountSnapNodeTimer.UpdateSince(start) }()
}
data, _ := db.Get(accountSnapshotKey(hash))
return data
}
Expand All @@ -94,6 +100,10 @@ func DeleteAccountSnapshot(db ethdb.KeyValueWriter, hash common.Hash) {

// ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
func ReadStorageSnapshot(db ethdb.KeyValueReader, accountHash, storageHash common.Hash) []byte {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { rawdbGetStorageSnapNodeTimer.UpdateSince(start) }()
}
data, _ := db.Get(storageSnapshotKey(accountHash, storageHash))
return data
}
Expand Down
10 changes: 10 additions & 0 deletions core/rawdb/accessors_trie.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@ package rawdb
import (
"fmt"
"sync"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"golang.org/x/crypto/sha3"
)

Expand Down Expand Up @@ -68,6 +70,10 @@ func (h *hasher) release() {
// ReadAccountTrieNode retrieves the account trie node and the associated node
// hash with the specified node path.
func ReadAccountTrieNode(db ethdb.KeyValueReader, path []byte) ([]byte, common.Hash) {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { rawdbGetAccountTrieNodeTimer.UpdateSince(start) }()
}
data, err := db.Get(accountTrieNodeKey(path))
if err != nil {
return nil, common.Hash{}
Expand Down Expand Up @@ -116,6 +122,10 @@ func DeleteAccountTrieNode(db ethdb.KeyValueWriter, path []byte) {
// ReadStorageTrieNode retrieves the storage trie node and the associated node
// hash with the specified node path.
func ReadStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) ([]byte, common.Hash) {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { rawdbGetStorageTrieNodeTimer.UpdateSince(start) }()
}
data, err := db.Get(storageTrieNodeKey(accountHash, path))
if err != nil {
return nil, common.Hash{}
Expand Down
10 changes: 10 additions & 0 deletions core/rawdb/metrics.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
package rawdb

import "github.com/ethereum/go-ethereum/metrics"

var (
rawdbGetAccountTrieNodeTimer = metrics.NewRegisteredTimer("rawdb/get/account/trienode/time", nil)
rawdbGetStorageTrieNodeTimer = metrics.NewRegisteredTimer("rawdb/get/storage/trienode/time", nil)
rawdbGetAccountSnapNodeTimer = metrics.NewRegisteredTimer("rawdb/get/account/snapnode/time", nil)
rawdbGetStorageSnapNodeTimer = metrics.NewRegisteredTimer("rawdb/get/storage/snapnode/time", nil)
)
10 changes: 10 additions & 0 deletions core/state_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"errors"
"fmt"
"math/big"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
Expand All @@ -29,9 +30,14 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params"
)

var (
processTxTimer = metrics.NewRegisteredTimer("process/tx/time", nil)
)

// StateProcessor is a basic Processor, which takes care of transitioning
// state from one point to another.
//
Expand Down Expand Up @@ -104,6 +110,10 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg
systemTxs := make([]*types.Transaction, 0, 2)

for i, tx := range block.Transactions() {
if metrics.EnabledExpensive {
start := time.Now()
defer processTxTimer.UpdateSince(start)
}
if isPoSA {
if isSystemTx, err := posa.IsSystemTransaction(tx, block.Header()); err != nil {
bloomProcessors.Close()
Expand Down
5 changes: 3 additions & 2 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,11 +161,12 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
// Optimize memory distribution by reallocating surplus allowance from the
// dirty cache to the clean cache.
if config.StateScheme == rawdb.PathScheme && config.TrieDirtyCache > pathdb.MaxDirtyBufferSize/1024/1024 {
config.TrieDirtyCache = pathdb.MaxDirtyBufferSize / 1024 / 1024
config.TrieCleanCache += config.TrieDirtyCache - pathdb.MaxDirtyBufferSize/1024/1024
log.Info("Capped dirty cache size", "provided", common.StorageSize(config.TrieDirtyCache)*1024*1024, "adjusted", common.StorageSize(pathdb.MaxDirtyBufferSize))
log.Info("Clean cache size", "provided", common.StorageSize(config.TrieCleanCache)*1024*1024)
config.TrieDirtyCache = pathdb.MaxDirtyBufferSize / 1024 / 1024
}
log.Info("Allocated trie memory caches", "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024)
log.Info("Allocated trie memory caches", "schema", config.StateScheme, "trie_clean_cache", common.StorageSize(config.TrieCleanCache)*1024*1024, "trie_dirty_cache", common.StorageSize(config.TrieDirtyCache)*1024*1024, "snapshot_cache", common.StorageSize(config.SnapshotCache)*1024*1024)

// Try to recover offline state pruning only in hash-based.
if config.StateScheme == rawdb.HashScheme {
Expand Down
16 changes: 16 additions & 0 deletions ethdb/leveldb/leveldb.go
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,10 @@ func (db *Database) Has(key []byte) (bool, error) {

// Get retrieves the given key if it's present in the key-value store.
func (db *Database) Get(key []byte) ([]byte, error) {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { ethdb.EthdbGetTimer.UpdateSince(start) }()
}
dat, err := db.db.Get(key, nil)
if err != nil {
return nil, err
Expand All @@ -199,11 +203,19 @@ func (db *Database) Get(key []byte) ([]byte, error) {

// Put inserts the given value into the key-value store.
func (db *Database) Put(key []byte, value []byte) error {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { ethdb.EthdbPutTimer.UpdateSince(start) }()
}
return db.db.Put(key, value, nil)
}

// Delete removes the key from the key-value store.
func (db *Database) Delete(key []byte) error {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { ethdb.EthdbDeleteTimer.UpdateSince(start) }()
}
return db.db.Delete(key, nil)
}

Expand Down Expand Up @@ -414,6 +426,10 @@ func (b *batch) ValueSize() int {

// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { ethdb.EthdbBatchWriteTimer.UpdateSince(start) }()
}
return b.db.Write(b.b, nil)
}

Expand Down
10 changes: 10 additions & 0 deletions ethdb/metrics.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
package ethdb

import "github.com/ethereum/go-ethereum/metrics"

var (
EthdbGetTimer = metrics.NewRegisteredTimer("ethdb/get/time", nil)
EthdbPutTimer = metrics.NewRegisteredTimer("ethdb/put/time", nil)
EthdbDeleteTimer = metrics.NewRegisteredTimer("ethdb/delete/time", nil)
EthdbBatchWriteTimer = metrics.NewRegisteredTimer("ethdb/batch/write/time", nil)
)
16 changes: 16 additions & 0 deletions ethdb/pebble/pebble.go
Original file line number Diff line number Diff line change
Expand Up @@ -309,6 +309,10 @@ func (d *Database) Has(key []byte) (bool, error) {

// Get retrieves the given key if it's present in the key-value store.
func (d *Database) Get(key []byte) ([]byte, error) {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { ethdb.EthdbGetTimer.UpdateSince(start) }()
}
d.quitLock.RLock()
defer d.quitLock.RUnlock()
if d.closed {
Expand All @@ -326,6 +330,10 @@ func (d *Database) Get(key []byte) ([]byte, error) {

// Put inserts the given value into the key-value store.
func (d *Database) Put(key []byte, value []byte) error {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { ethdb.EthdbPutTimer.UpdateSince(start) }()
}
d.quitLock.RLock()
defer d.quitLock.RUnlock()
if d.closed {
Expand All @@ -336,6 +344,10 @@ func (d *Database) Put(key []byte, value []byte) error {

// Delete removes the key from the key-value store.
func (d *Database) Delete(key []byte) error {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { ethdb.EthdbDeleteTimer.UpdateSince(start) }()
}
d.quitLock.RLock()
defer d.quitLock.RUnlock()
if d.closed {
Expand Down Expand Up @@ -599,6 +611,10 @@ func (b *batch) ValueSize() int {

// Write flushes any accumulated data to disk.
func (b *batch) Write() error {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { ethdb.EthdbBatchWriteTimer.UpdateSince(start) }()
}
b.db.quitLock.RLock()
defer b.db.quitLock.RUnlock()
if b.db.closed {
Expand Down
20 changes: 20 additions & 0 deletions trie/trie.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,22 @@ import (
"bytes"
"errors"
"fmt"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/trie/trienode"
"github.com/ethereum/go-ethereum/triedb/database"
)

var (
trieGetTimer = metrics.NewRegisteredTimer("trie/get/time", nil)
trieReaderGetTimer = metrics.NewRegisteredTimer("trie/reader/get/time", nil)
trieReaderTotalTimer = metrics.NewRegisteredTimer("trie/reader/total/time", nil)
)

// Trie is a Merkle Patricia Trie. Use New to create a trie that sits on
// top of a database. Whenever trie performs a commit operation, the generated
// nodes will be gathered and returned in a set. Once the trie is committed,
Expand Down Expand Up @@ -146,6 +154,10 @@ func (t *Trie) Get(key []byte) ([]byte, error) {
if t.committed {
return nil, ErrCommitted
}
if metrics.EnabledExpensive {
start := time.Now()
defer func() { trieGetTimer.UpdateSince(start) }()
}
value, newroot, didResolve, err := t.get(t.root, keybytesToHex(key), 0)
if err == nil && didResolve {
t.root = newroot
Expand Down Expand Up @@ -178,7 +190,11 @@ func (t *Trie) get(origNode node, key []byte, pos int) (value []byte, newnode no
}
return value, n, didResolve, err
case hashNode:
start := time.Now()
child, err := t.resolveAndTrack(n, key[:pos])
if metrics.EnabledExpensive {
trieReaderGetTimer.UpdateSince(start)
}
if err != nil {
return nil, n, true, err
}
Expand Down Expand Up @@ -586,6 +602,10 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) {
// node's original value. The rlp-encoded blob is preferred to be loaded from
// database because it's easy to decode node while complex to encode node to blob.
func (t *Trie) resolveAndTrack(n hashNode, prefix []byte) (node, error) {
if metrics.EnabledExpensive {
start := time.Now()
defer func() { trieReaderTotalTimer.UpdateSince(start) }()
}
blob, err := t.reader.node(prefix, common.BytesToHash(n))
if err != nil {
return nil, err
Expand Down

0 comments on commit 93c03fa

Please sign in to comment.