From b2c531c8bb62d19b9a836a1864a5748f43096614 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Tue, 3 May 2022 22:43:59 +0100 Subject: [PATCH] [alpha] Move from devel (#4059) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Change version to alpha (#3926) Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp * docs: update libmdbx links (#3929) * Makefile: refactor build flags and fix 1.17 (#3930) * Fix some cli flag descriptions (#3933) * Fix some cli flag descriptions * add node about verbosity * min requirement to go 1.18 (#3934) * save * save * save * Added Ethstats service (#3931) * somewhat there but not yet * lol * more efficient ethstats * lint * not die on no wifi * Update bor mumbai config (#3937) * Update ci.yml (#3936) * Use heimdall url in integration bor consensus (#3940) * Downloader: re-use flags defaults (#3941) * torrent: print peers amount in logs (#3942) * Observer - P2P network crawler (#3928) Observer crawls the Ethereum network and collects information about the nodes. * Torrent conns print (#3943) * save * save * [erigon2] Fuzz tests for commitment (#3939) * [erigon2] Fuzz tests for commitment * Cleanup * Update to erigon-lib main Co-authored-by: Alexey Sharp * Introduce unlimited download rate (#3945) * Introduce unlimited download rate * More generous burst Co-authored-by: Alexey Sharp * Replace ioutil with io and os (#3946) * Sentry GRPC: rename Peers to PeerEvents (#3944) * Sentry GRPC: rename Peers to PeerEvents see https://github.com/ledgerwatch/interfaces/pull/101 * Update to erigon-lib main Co-authored-by: Alexey Sharp * cleaned up forkchoices db insertions #3949 * fixed ethstats (#3951) * bsc: disable snap sync (#3955) * bsc: disable snap sync (#3956) * Snapshots: support empty buf case (#3957) * Snapshots: rare nil pointer at fresh start (#3958) * got rid of the automatic usage of net api (#3952) * got rid of the automatic usage of net api * less confusing comment * ops * ops2 * important * ops * RPC: admin.peers() (#3960) * RPC: admin.peers() This RPC method returns information about the connected remote nodes. https://geth.ethereum.org/docs/rpc/ns-admin#admin_peers The peers are collected from all configured sentries. See: https://github.com/ledgerwatch/interfaces/pull/102 Test with: curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc": "2.0", "method": "admin_peers", "params": [], "id":1}' localhost:8545 * save * liner fix Co-authored-by: alex.sharov * sentry: refactor flags, add maxpeers. (#3961) * Experiment files 1 by 1 (#3959) * Experiment files 1 by 1 * Remove check * sort preverified snapshots * docs: docker permissions * sort preverified snapshots * sort preverified snapshots * sort preverified snapshots * sort preverified snapshots * sort preverified snapshots * sort preverified snapshots * save * Fix speed log, remove file name * Move timer out of the loop * Calculate total size of downloaded files * Fixes * Fix * Fix * Fix * Move downloadData * Fix * Revert "Fix" This reverts commit 038e02b8a4d23cd32ddb111e9f325fc4ce1bbe2b. * Revert "Move downloadData" This reverts commit 8130a4d9bdc0705082eb7fe94e2261c9313f8482. * Revert "Fix" This reverts commit 1dca25bd68772bc42ac710c24698c8670f9f6b86. * Revert "Fix" This reverts commit ee5a1e82abd47bef4f9d8f0f68b8497476d29c0b. * Revert "Fix" This reverts commit 8af7be71d4685e0d6115fef91ed2f304695e1df9. * Revert "Fixes" This reverts commit 50509af81f3721cca957cd15d0286e8f30e5097b. * Revert "Calculate total size of downloaded files" This reverts commit 64a26df54f6226d739c8a5b57b32ad5af07d3061. * Remove progress * Remove progress Co-authored-by: Alexey Sharp Co-authored-by: alex.sharov * Update stage_headers.go (#3966) * Snapshots: open bittorrent udp port in docker (#3969) * Snapshots: open torrent udp in docker-compose.yml * Snapshots: open torrent udp in docker-compose.yml * Delete blocks in [from, to) range (#3970) * Snapshots: allow stage_headers --unwind behind available snapshots (#3971) * save * save * save * Integration: allow headers --reset (#3972) * Bsc: enable syncmode=snap by default #3973 * rlp: add support for optional struct fields (#22832) (#3977) This adds support for a new struct tag "optional". Using this tag, structs used for RLP encoding/decoding can be extended in a backwards-compatible way, by adding new fields at the end. see geth commit https://github.com/ethereum/go-ethereum/commit/700df1442d714cb3c42a602c39c042ce88be463f Co-authored-by: Felix Lange * Forgot to check err status (#3978) * Forgot to check err status * Invalid header shouldn't fail the entire stage * Potential fix for verification (#3962) * Potential fix for verification * multi verify Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp * p2p/discover/v4wire: use optional RLP field for EIP-868 seq (#3963) This changes the definitions of Ping and Pong, adding an optional field for the sequence number. This field was previously encoded/decoded using the "tail" struct tag, but using "optional" is much nicer. see https://github.com/ethereum/go-ethereum/pull/22842 Co-authored-by: Felix Lange * FullSync instead of FastSync (#3980) * Update README.md (#3984) * Update README.md (#3985) * Update README.md (#3987) * Update README.md (#3988) * Update README.md (#3989) * save (#3983) * Update to erigon-lib main (#3992) Co-authored-by: Alex Sharp * TxLookup fix 2 (#3994) * save * save * tolerate some fails * tolerate some fails Co-authored-by: Alexey Sharp * No NewBlock gossip after Merge (#3995) * Check that safe & finalized blocks are canonical for no-op forkChoice (#3997) * Place finishHandlingForkChoice after startHandlingForkChoice * forkChoiceMessage -> forkChoice * Check that safe & finalized blocks are canonical for no-op forkChoice * Re-introduced cleanup of temporary table (#3999) * Re-introduced cleanup of temporary table * Fix sign * Fix lint * Fix lint * Revert Co-authored-by: Alex Sharp * Update skip_analysis.go (#4003) * Downloader: calc stat inside, add --torrent.download.slots and limit downloads inside (#3986) * save * save * save * save * save * save * save * save * save * p2p: speed-up TestUDPv4_LookupIterator (#4000) The test was slow, because it was trying to find predefined nodeIDs (lookupTestnet) by generating random keys and trying to find their neighbours until it hits all nodes of the lookupTestnet. In addition each FindNode response was waited for 0.5 sec (respTimeout). This could take up to 30 sec and fail the test suite. A fake random key generator is now used during the test. It issues the expected keys, and the lookup converges quickly. The reply timeout is reduced for the test. Now it normally takes less than.1 sec. * p2p: refactor MaxPendingPeers handling (#3981) * use semaphore instead of a chan struct{} * move MaxPendingPeers default value to DefaultConfig.P2P * log Error if Accept fails * replace quit channel with context * downloader stuck on 99.9% fix #4004 * Open only existing torrent files (#4007) * save * save * save * save * save * Open shorter logs #400 * Fix empty "Tables" log line (#4008) * save * save * save * Torrent: maxpeers flag were used incorrectly * reduce downloader deps (#4010) * reduce downloader deps * reduce downloader deps * reduce downloader deps (#4011) * Handle system-txn in block_reader (#4012) * reduce downloader deps * reduce downloader deps * save * reduce downloader deps * [integration tool] Clean BorReceipt when reset state (#4013) * Update reset_state.go * Update reset_state.go * rename field "type" (#4015) * save * save * save * typed sender (#4016) * save * save * Observer: fix panic on clean start (#4002) (#4017) Problem: (nil, nil) from CountPingErrors was not handled. This happens if the node is not in the db (a bootstrap node), and was never crawled before. * Add override.terminaltotaldifficulty flag (#4018) * cmd/utils: initialize f.Value before setting variable * override.terminaltotaldifficulty flag * Add OverrideTerminalTotalDifficulty to default_flags * p2p: fix flaky TestUDPv5_lookupE2E (#4020) The test was flaky, because of the "endpoint prediction". The test starts 5 nodes one by one. Node 0 is used as a bootstrap node for nodes 1-4. When it is about to add, say, node 3, nodes 0 and 1 might already have had a chance to communicate, and updateEndpoints() deletes the node 0 UDP port, because fallbackUDP port was not configured. In this case node 3 would get a bootstrap node 0 without a port and lead to an error: v5_udp_test.go:110: bad bootstrap node "enr:...": missing UDP port The problem was reproducible by this command: go test ./p2p/discover -run TestUDPv5_lookupE2E -count 500 * Added Goerli Full Node Space Requirements (#4021) * p2p: crawler-friendly handshake (#3982) * exchange RLPx Hello even when maxpeers limit is reached * bump MaxPendingPeers to increase the default handshake queue (and the likelyhood of Hello exchange) * Add link about rqspbery po (#4022) * More efficient header verification of headers for Parlia when snapshots are used (#3998) * Update stageloop.go * Print * Consider snapshot headers as parlia checkpoints * Not fail after not loading snapshot * Lazy snapshots * Print number of validators * More printing * Use epoch instead of checkpoint interval * Reduce logging * Fix compilation * Remove trace jump dest * Fix lint * Not store snapshots every epoch * Separate snapshot for verification and finalisation Co-authored-by: Alex Sharp Co-authored-by: Alexey Sharp * Docker build: make db-tools to depend on git-submodules (#4024) * save * save * save * save * save * More careful handle of sequences in stage_headers --reset (#4023) * save * save * save * save * added ovveride merge fork block (#4027) * Fix non-starting download (#4031) * save * save * save * save * save (#4032) * Truncate bor receipts on unwind (#4033) Co-authored-by: Alexey Sharp * eth/filters: Fix filterLogs() (#4036) * index segments by maximum by 2 workers #4041 * trace read parent header from snapshot and lru #4042 * make sure stage_headers --reset doesn't left garbage in bodies table #4043 * Fix for Bor (Polygon) (#4044) * print branchHash * Print state changes * Print val * Fix for author * Remove prints Co-authored-by: Alexey Sharp * Cleanup isBor (#4045) Co-authored-by: Alexey Sharp * Speed up docker image build by use layer cache (#4038) * speed up docker image build by use layer cache * rearrenge Dockerfile * enable docker layer cache in github action * state_processor: fix ignored SkipAnalysis() result (#4046) `cfg` is not a pointer * p2p: improve test TestTable_findnodeByID (#4047) * refactor test * add a fast fixed examples test for the main suite * split slow test for the integration suite * Update skip_analysis.go (#4052) * More relax inclusion of headers in the downloader (#4050) * More relax inclusion of headers in the downloader * Fix Co-authored-by: Alexey Sharp * Revert "Speed up docker image build by use layer cache (#4038)" (#4054) This reverts commit e758fb800befc36185b49f7ab8c21223ff476d8c. * Increase max DB size to 8 Tb for chain data only (#4055) * Update node.go * Update node.go * Point to erigon-lib alpha Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp Co-authored-by: battlmonstr Co-authored-by: Chase Wright Co-authored-by: Alex Sharov Co-authored-by: Giulio rebuffo Co-authored-by: Krishna Upadhyaya Co-authored-by: HÃ¥vard Anda Estensen Co-authored-by: Enrique Jose Avila Asapche Co-authored-by: Felix Lange Co-authored-by: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Co-authored-by: gaia Co-authored-by: EXEC Co-authored-by: Groute --- Dockerfile | 3 +- Makefile | 7 +- README.md | 6 +- cmd/downloader/downloader/downloader.go | 388 +++++++++--------- cmd/downloader/downloader/grpc_server.go | 118 ++---- .../downloader/torrentcfg/torrentcfg.go | 20 +- cmd/downloader/downloader/util.go | 105 ++++- cmd/downloader/main.go | 12 +- cmd/hack/hack.go | 13 +- cmd/integration/commands/reset_state.go | 3 + cmd/integration/commands/stages.go | 75 ++-- cmd/observer/observer/crawler.go | 8 +- cmd/rpcdaemon/commands/trace_adhoc.go | 16 +- cmd/rpcdaemon/commands/txpool_api.go | 5 +- cmd/sentry/sentry/downloader.go | 2 +- cmd/state/commands/erigon2.go | 16 +- cmd/utils/customflags.go | 5 +- cmd/utils/flags.go | 40 +- consensus/parlia/api.go | 8 +- consensus/parlia/parlia.go | 66 ++- core/blockchain.go | 11 +- core/genesis_test.go | 42 +- core/rawdb/accessors_chain.go | 190 +++++---- core/rawdb/accessors_chain_test.go | 38 +- core/rawdb/bor_receipts.go | 11 + core/skip_analysis.go | 2 +- core/state/plain_state_writer.go | 2 + core/state_processor.go | 8 +- eth/backend.go | 89 ++-- eth/ethconfig/config.go | 69 ---- eth/ethconsensusconfig/config.go | 78 ++++ eth/filters/filter.go | 2 +- eth/stagedsync/stage.go | 4 +- eth/stagedsync/stage_bodies.go | 10 +- eth/stagedsync/stage_call_traces.go | 46 ++- eth/stagedsync/stage_execute.go | 25 +- eth/stagedsync/stage_headers.go | 277 +++++++------ eth/stagedsync/stage_mining_exec.go | 3 +- eth/stagedsync/stage_senders.go | 27 +- eth/stagedsync/sync.go | 3 +- ethdb/privateapi/txpool.go | 14 +- go.mod | 22 +- go.sum | 99 +++-- migrations/txs_begin_end_test.go | 2 +- node/defaults.go | 9 +- node/node.go | 4 +- p2p/dial.go | 3 - p2p/discover/common.go | 11 + p2p/discover/table_integration_test.go | 28 ++ p2p/discover/table_test.go | 125 +++--- p2p/discover/v4_lookup_test.go | 24 +- p2p/discover/v4_udp.go | 14 +- p2p/discover/v4_udp_test.go | 42 +- p2p/discover/v5_udp_test.go | 24 +- p2p/server.go | 99 ++--- p2p/server_test.go | 152 ++++--- p2p/simulations/adapters/inproc.go | 1 + params/networkname/network_name.go | 18 + params/version.go | 4 +- turbo/app/snapshots.go | 3 +- turbo/cli/default_flags.go | 4 + turbo/snapshotsync/block_reader.go | 8 +- turbo/snapshotsync/block_snapshots.go | 314 +++----------- turbo/snapshotsync/block_snapshots_test.go | 55 +-- turbo/snapshotsync/snap/files.go | 202 +++++++++ .../{snapshotsynccli => snap}/flags.go | 2 +- turbo/stages/bodydownload/body_algos.go | 7 +- turbo/stages/headerdownload/header_algos.go | 50 ++- .../headerdownload/header_data_struct.go | 4 + turbo/stages/headerdownload/header_test.go | 4 +- 70 files changed, 1828 insertions(+), 1373 deletions(-) create mode 100644 eth/ethconsensusconfig/config.go create mode 100644 p2p/discover/table_integration_test.go create mode 100644 turbo/snapshotsync/snap/files.go rename turbo/snapshotsync/{snapshotsynccli => snap}/flags.go (97%) diff --git a/Dockerfile b/Dockerfile index 006dd7bcd52..a9836e39f3f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,8 +6,7 @@ RUN apk --no-cache add make gcc g++ linux-headers git bash ca-certificates libgc WORKDIR /app ADD . . -# expect that host run `git submodule update --init` -RUN make erigon rpcdaemon integration sentry txpool downloader hack db-tools +RUN make erigon rpcdaemon integration sentry txpool downloader hack observer db-tools FROM docker.io/library/alpine:3.15 diff --git a/Makefile b/Makefile index 4845587a9c7..fbd0118c98c 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ GO_FLAGS += -ldflags "-X ${PACKAGE}/params.GitCommit=${GIT_COMMIT} -X ${PACKAGE} GOBUILD = $(CGO_CFLAGS) $(GO) build $(GO_FLAGS) GO_DBG_BUILD = $(DBG_CGO_CFLAGS) $(GO) build $(GO_FLAGS) -tags $(BUILD_TAGS),debug -gcflags=all="-N -l" # see delve docs -GOTEST = GODEBUG=cgocheck=0 $(GO) test $(GO_FLAGS) ./... -p 2 +GOTEST = GODEBUG=cgocheck=0 $(GO) test $(GO_FLAGS) ./... -p 2 -tags $(BUILD_TAGS),integration default: all @@ -29,7 +29,7 @@ go-version: exit 1 ;\ fi -docker: +docker: git-submodules DOCKER_BUILDKIT=1 docker build -t erigon:latest --build-arg git_commit='${GIT_COMMIT}' --build-arg git_branch='${GIT_BRANCH}' --build-arg git_tag='${GIT_TAG}' . xdg_data_home := ~/.local/share @@ -74,7 +74,7 @@ $(COMMANDS): %: %.cmd all: erigon $(COMMANDS) -db-tools: +db-tools: git-submodules @echo "Building db-tools" # hub.docker.com setup incorrect gitpath for git modules. Just remove it and re-init submodule. @@ -137,6 +137,7 @@ escape: cd $(path) && go test -gcflags "-m -m" -run none -bench=BenchmarkJumpdest* -benchmem -memprofile mem.out git-submodules: + @[ -d ".git" ] || (echo "Not a git repository" && exit 1) @echo "Updating git submodules" @# Dockerhub using ./hooks/post-checkout to set submodules, so this line will fail on Dockerhub @git submodule update --quiet --init --recursive --force || true diff --git a/README.md b/README.md index 10aabb68b0f..d280cba9d25 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ System Requirements =================== For an Archive node of Mainnet we recommend >=3TB storage space: 1.8TB state (as of March 2022), -200GB temp files (can symlink or mount folder `/etl-tmp` to another disk). Mainnet Full node (see `--prune*` flags): 400Gb (April 2022), BSC Archive: 7Tb. BSC Full: 1Tb +200GB temp files (can symlink or mount folder `/etl-tmp` to another disk). Mainnet Full node (see `--prune*` flags): 400Gb (April 2022), BSC Archive: 7Tb. BSC Full: 1Tb. Goerli Full node (see `--prune*` flags): 189GB on Beta, 114GB on Alpha (April 2022). SSD or NVMe. Do not recommend HDD - on HDD Erigon will always stay N blocks behind chain tip, but not fall behind. Bear in mind that SSD performance deteriorates when close to capacity. @@ -368,6 +368,10 @@ Docker uses user erigon with UID/GID 1000 (for security reasons). You can see th Can fix by giving a host's user ownership of the folder, where the host's user UID/GID is the same as the docker's user UID/GID (1000). More details in [post](https://www.fullstaq.com/knowledge-hub/blogs/docker-and-the-host-filesystem-owner-matching-problem) +### Run RaspberyPI + +https://github.com/mathMakesArt/Erigon-on-RPi-4 + Getting in touch ================ diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index ac417a7d3f7..4d33c1092f4 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "runtime" + "sync" "time" "github.com/anacrolix/torrent" @@ -13,14 +14,17 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/semaphore" ) -const ASSERT = false - type Protocols struct { TorrentClient *torrent.Client DB kv.RwDB cfg *torrentcfg.Cfg + + statsLock *sync.RWMutex + stats AggStats + snapshotDir *dir.Rw } func New(cfg *torrentcfg.Cfg, snapshotDir *dir.Rw) (*Protocols, error) { @@ -43,6 +47,8 @@ func New(cfg *torrentcfg.Cfg, snapshotDir *dir.Rw) (*Protocols, error) { cfg: cfg, TorrentClient: torrentClient, DB: cfg.DB, + statsLock: &sync.RWMutex{}, + snapshotDir: snapshotDir, }, nil } @@ -66,245 +72,219 @@ func readPeerID(db kv.RoDB) (peerID []byte, err error) { return peerID, nil } -func (cli *Protocols) Close() { - for _, tr := range cli.TorrentClient.Torrents() { - tr.Drop() +func (cli *Protocols) Start(ctx context.Context, silent bool) error { + if err := BuildTorrentsAndAdd(ctx, cli.snapshotDir, cli.TorrentClient); err != nil { + return fmt.Errorf("BuildTorrentsAndAdd: %w", err) } - cli.TorrentClient.Close() - cli.DB.Close() - if cli.cfg.CompletionCloser != nil { - cli.cfg.CompletionCloser.Close() //nolint - } -} - -func (cli *Protocols) PeerID() []byte { - peerID := cli.TorrentClient.PeerID() - return peerID[:] -} -func LoggingLoop(ctx context.Context, torrentClient *torrent.Client) { - interval := time.Second * 20 - logEvery := time.NewTicker(interval) - defer logEvery.Stop() - var m runtime.MemStats - var stats AggStats + var sem = semaphore.NewWeighted(int64(cli.cfg.DownloadSlots)) - for { - select { - case <-ctx.Done(): - return - case <-logEvery.C: - torrents := torrentClient.Torrents() - allComplete := true - gotInfo := 0 + go func() { + for { + torrents := cli.TorrentClient.Torrents() for _, t := range torrents { - select { - case <-t.GotInfo(): // all good - gotInfo++ - default: + <-t.GotInfo() + if t.Complete.Bool() { + continue } - allComplete = allComplete && t.Complete.Bool() - } - if gotInfo < len(torrents) { - log.Info(fmt.Sprintf("[torrent] Waiting for torrents metadata: %d/%d", gotInfo, len(torrents))) - continue - } + if err := sem.Acquire(ctx, 1); err != nil { + return + } + t.AllowDataDownload() + t.DownloadAll() + go func(t *torrent.Torrent) { + //r := t.NewReader() + //_, _ = io.Copy(io.Discard, r) // enable streaming - it will prioritize sequential download - runtime.ReadMemStats(&m) - stats = CalcStats(stats, interval, torrentClient) - if allComplete { - log.Info("[torrent] Seeding", - "download", common2.ByteCount(uint64(stats.readBytesPerSec))+"/s", - "upload", common2.ByteCount(uint64(stats.writeBytesPerSec))+"/s", - "unique_peers", stats.peersCount, - "files", stats.torrentsCount, - "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) - continue + <-t.Complete.On() + sem.Release(1) + }(t) } + time.Sleep(30 * time.Second) + } + }() + + go func() { + var m runtime.MemStats + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() - log.Info("[torrent] Downloading", - "Progress", fmt.Sprintf("%.2f%%", stats.Progress), - "download", common2.ByteCount(uint64(stats.readBytesPerSec))+"/s", - "upload", common2.ByteCount(uint64(stats.writeBytesPerSec))+"/s", - "unique_peers", stats.peersCount, - "files", stats.torrentsCount, - "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) - if stats.peersCount == 0 { - ips := torrentClient.BadPeerIPs() - if len(ips) > 0 { - log.Info("[torrent] Stats", "banned", ips) + interval := 5 * time.Second + statEvery := time.NewTicker(interval) + defer statEvery.Stop() + for { + select { + case <-ctx.Done(): + return + case <-statEvery.C: + cli.ReCalcStats(interval) + + case <-logEvery.C: + if silent { + continue } + stats := cli.Stats() + + if stats.MetadataReady < stats.FilesTotal { + log.Info(fmt.Sprintf("[Snapshots] Waiting for torrents metadata: %d/%d", stats.MetadataReady, stats.FilesTotal)) + continue + } + + runtime.ReadMemStats(&m) + if stats.Completed { + log.Info("[Snapshots] Seeding", + "up", common2.ByteCount(stats.UploadRate)+"/s", + "peers", stats.PeersUnique, + "connections", stats.ConnectionsTotal, + "files", stats.FilesTotal, + "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) + continue + } + + log.Info("[Snapshots] Downloading", + "progress", fmt.Sprintf("%.2f%% %s/%s", stats.Progress, common2.ByteCount(stats.BytesCompleted), common2.ByteCount(stats.BytesTotal)), + "download", common2.ByteCount(stats.DownloadRate)+"/s", + "upload", common2.ByteCount(stats.UploadRate)+"/s", + "peers", stats.PeersUnique, + "connections", stats.ConnectionsTotal, + "files", stats.FilesTotal, + "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys)) + if stats.PeersUnique == 0 { + ips := cli.TorrentClient.BadPeerIPs() + if len(ips) > 0 { + log.Info("[Snapshots] Stats", "banned", ips) + } + } } } - } -} - -func (cli *Protocols) StopSeeding(hash metainfo.Hash) error { - t, ok := cli.TorrentClient.Torrent(hash) - if !ok { - return nil - } - ch := t.Closed() - t.Drop() - <-ch + }() return nil } -type AggStats struct { - readBytesPerSec int64 - writeBytesPerSec int64 - peersCount int64 - - Progress float32 - torrentsCount int +func (cli *Protocols) ReCalcStats(interval time.Duration) { + cli.statsLock.Lock() + defer cli.statsLock.Unlock() + prevStats, stats := cli.stats, cli.stats - bytesRead int64 - bytesWritten int64 -} + peers := make(map[torrent.PeerID]struct{}, 16) + torrents := cli.TorrentClient.Torrents() + connStats := cli.TorrentClient.ConnStats() -func CalcStats(prevStats AggStats, interval time.Duration, client *torrent.Client) (result AggStats) { - var aggBytesCompleted, aggLen int64 - //var aggCompletedPieces, aggNumPieces, aggPartialPieces int - peers := map[torrent.PeerID]*torrent.PeerConn{} - torrents := client.Torrents() - connStats := client.ConnStats() - - result.bytesRead += connStats.BytesReadUsefulIntendedData.Int64() - result.bytesWritten += connStats.BytesWrittenData.Int64() + stats.Completed = true + stats.BytesRead = uint64(connStats.BytesReadUsefulIntendedData.Int64()) + stats.BytesWritten = uint64(connStats.BytesWrittenData.Int64()) + stats.BytesTotal, stats.BytesCompleted, stats.ConnectionsTotal, stats.MetadataReady = 0, 0, 0, 0 for _, t := range torrents { - aggBytesCompleted += t.BytesCompleted() - aggLen += t.Length() - - for _, peer := range t.PeerConns() { - peers[peer.PeerID] = peer + select { + case <-t.GotInfo(): + stats.MetadataReady++ + for _, peer := range t.PeerConns() { + stats.ConnectionsTotal++ + peers[peer.PeerID] = struct{}{} + } + stats.BytesCompleted += uint64(t.BytesCompleted()) + stats.BytesTotal += uint64(t.Length()) + default: } + + stats.Completed = stats.Completed && t.Complete.Bool() } - result.readBytesPerSec += (result.bytesRead - prevStats.bytesRead) / int64(interval.Seconds()) - result.writeBytesPerSec += (result.bytesWritten - prevStats.bytesWritten) / int64(interval.Seconds()) + stats.DownloadRate = (stats.BytesRead - prevStats.BytesRead) / uint64(interval.Seconds()) + stats.UploadRate = (stats.BytesWritten - prevStats.BytesWritten) / uint64(interval.Seconds()) - result.Progress = float32(float64(100) * (float64(aggBytesCompleted) / float64(aggLen))) + if stats.BytesTotal == 0 { + stats.Progress = 0 + } else { + stats.Progress = float32(float64(100) * (float64(stats.BytesCompleted) / float64(stats.BytesTotal))) + if stats.Progress == 100 && !stats.Completed { + stats.Progress = 99.99 + } + } + stats.PeersUnique = int32(len(peers)) + stats.FilesTotal = int32(len(torrents)) - result.peersCount = int64(len(peers)) - result.torrentsCount = len(torrents) - return result + cli.stats = stats } -func AddTorrentFile(ctx context.Context, torrentFilePath string, torrentClient *torrent.Client) (mi *metainfo.MetaInfo, err error) { - mi, err = metainfo.LoadFromFile(torrentFilePath) - if err != nil { - return nil, err - } - mi.AnnounceList = Trackers +func (cli *Protocols) Stats() AggStats { + cli.statsLock.RLock() + defer cli.statsLock.RUnlock() + return cli.stats +} - t := time.Now() - _, err = torrentClient.AddTorrent(mi) - if err != nil { - return mi, err - } - took := time.Since(t) - if took > 3*time.Second { - log.Info("[torrent] Check validity", "file", torrentFilePath, "took", took) +func (cli *Protocols) Close() { + //for _, tr := range cli.TorrentClient.Torrents() { + // go func() {}() + // fmt.Printf("alex: CLOse01: %s\n", tr.Name()) + // tr.DisallowDataUpload() + // fmt.Printf("alex: CLOse02: %s\n", tr.Name()) + // tr.DisallowDataDownload() + // fmt.Printf("alex: CLOse03: %s\n", tr.Name()) + // ch := t.Closed() + // tr.Drop() + // <-ch + //} + cli.TorrentClient.Close() + cli.DB.Close() + if cli.cfg.CompletionCloser != nil { + if err := cli.cfg.CompletionCloser.Close(); err != nil { + log.Warn("[Snapshots] CompletionCloser", "err", err) + } } - return mi, nil } -// AddTorrentFiles - adding .torrent files to torrentClient (and checking their hashes), if .torrent file -// added first time - pieces verification process will start (disk IO heavy) - Progress -// kept in `piece completion storage` (surviving reboot). Once it done - no disk IO needed again. -// Don't need call torrent.VerifyData manually -func AddTorrentFiles(ctx context.Context, snapshotsDir *dir.Rw, torrentClient *torrent.Client) error { - files, err := AllTorrentPaths(snapshotsDir.Path) - if err != nil { - return err - } - for _, torrentFilePath := range files { - if _, err := AddTorrentFile(ctx, torrentFilePath, torrentClient); err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } +func (cli *Protocols) PeerID() []byte { + peerID := cli.TorrentClient.PeerID() + return peerID[:] +} +func (cli *Protocols) StopSeeding(hash metainfo.Hash) error { + t, ok := cli.TorrentClient.Torrent(hash) + if !ok { + return nil } - + ch := t.Closed() + t.Drop() + <-ch return nil } -// ResolveAbsentTorrents - add hard-coded hashes (if client doesn't have) as magnet links and download everything -func ResolveAbsentTorrents(ctx context.Context, torrentClient *torrent.Client, preverifiedHashes []metainfo.Hash, snapshotDir *dir.Rw, silent bool) error { - mi := &metainfo.MetaInfo{AnnounceList: Trackers} - for i := range preverifiedHashes { - if _, ok := torrentClient.Torrent(preverifiedHashes[i]); ok { - continue - } - magnet := mi.Magnet(&preverifiedHashes[i], nil) - t, err := torrentClient.AddMagnet(magnet.String()) - if err != nil { - return err - } - t.AllowDataDownload() - t.AllowDataUpload() - } - if !silent { - ctxLocal, cancel := context.WithCancel(ctx) - defer cancel() - go LoggingLoop(ctxLocal, torrentClient) - } +type AggStats struct { + MetadataReady, FilesTotal int32 + PeersUnique int32 + ConnectionsTotal uint64 - for _, t := range torrentClient.Torrents() { - select { - case <-ctx.Done(): - return ctx.Err() - case <-t.GotInfo(): - if !t.Complete.Bool() { - t.DownloadAll() - } - mi := t.Metainfo() - if err := CreateTorrentFileIfNotExists(snapshotDir, t.Info(), &mi); err != nil { - return err - } - } - } + Completed bool + Progress float32 - return nil -} + BytesCompleted, BytesTotal uint64 -//nolint -func waitForChecksumVerify(ctx context.Context, torrentClient *torrent.Client) { - //TODO: tr.VerifyData() - find when to call it - ctx, cancel := context.WithCancel(ctx) - defer cancel() - go func() { - interval := time.Second * 5 - logEvery := time.NewTicker(interval) - defer logEvery.Stop() + UploadRate, DownloadRate uint64 - for { - select { - case <-ctx.Done(): - return - case <-logEvery.C: - var aggBytesCompleted, aggLen int64 - for _, t := range torrentClient.Torrents() { - aggBytesCompleted += t.BytesCompleted() - aggLen += t.Length() - } + BytesRead uint64 + BytesWritten uint64 +} - line := fmt.Sprintf( - "[torrent] verifying snapshots: %s/%s", - common2.ByteCount(uint64(aggBytesCompleted)), - common2.ByteCount(uint64(aggLen)), - ) - log.Info(line) - } - } - }() - torrentClient.WaitAll() // wait for checksum verify +// AddTorrentFile - adding .torrent file to torrentClient (and checking their hashes), if .torrent file +// added first time - pieces verification process will start (disk IO heavy) - Progress +// kept in `piece completion storage` (surviving reboot). Once it done - no disk IO needed again. +// Don't need call torrent.VerifyData manually +func AddTorrentFile(ctx context.Context, torrentFilePath string, torrentClient *torrent.Client) (*torrent.Torrent, error) { + mi, err := metainfo.LoadFromFile(torrentFilePath) + if err != nil { + return nil, err + } + mi.AnnounceList = Trackers + t, err := torrentClient.AddTorrent(mi) + if err != nil { + return nil, err + } + t.DisallowDataDownload() + t.AllowDataUpload() + return t, nil } func VerifyDtaFiles(ctx context.Context, snapshotDir string) error { @@ -341,12 +321,12 @@ func VerifyDtaFiles(ctx context.Context, snapshotDir string) error { err = verifyTorrent(&info, snapshotDir, func(i int, good bool) error { j++ if !good { - log.Error("[torrent] Verify hash mismatch", "at piece", i, "file", f) + log.Error("[Snapshots] Verify hash mismatch", "at piece", i, "file", f) return fmt.Errorf("invalid file") } select { case <-logEvery.C: - log.Info("[torrent] Verify", "Progress", fmt.Sprintf("%.2f%%", 100*float64(j)/float64(totalPieces))) + log.Info("[Snapshots] Verify", "Progress", fmt.Sprintf("%.2f%%", 100*float64(j)/float64(totalPieces))) case <-ctx.Done(): return ctx.Err() default: @@ -357,6 +337,6 @@ func VerifyDtaFiles(ctx context.Context, snapshotDir string) error { return err } } - log.Info("[torrent] Verify succeed") + log.Info("[Snapshots] Verify succeed") return nil } diff --git a/cmd/downloader/downloader/grpc_server.go b/cmd/downloader/downloader/grpc_server.go index 06db650ed42..94997ded747 100644 --- a/cmd/downloader/downloader/grpc_server.go +++ b/cmd/downloader/downloader/grpc_server.go @@ -3,15 +3,14 @@ package downloader import ( "context" "errors" - "path/filepath" - "github.com/anacrolix/torrent" "github.com/anacrolix/torrent/metainfo" "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/gointerfaces" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" prototypes "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" "google.golang.org/protobuf/types/known/emptypb" ) @@ -23,108 +22,79 @@ var ( _ proto_downloader.DownloaderServer = &GrpcServer{} ) -func NewGrpcServer(db kv.RwDB, client *Protocols, snapshotDir *dir.Rw, silent bool) (*GrpcServer, error) { +func NewGrpcServer(db kv.RwDB, client *Protocols, snapshotDir *dir.Rw) (*GrpcServer, error) { sn := &GrpcServer{ db: db, t: client, snapshotDir: snapshotDir, - silent: silent, } return sn, nil } -func CreateTorrentFilesAndAdd(ctx context.Context, snapshotDir *dir.Rw, torrentClient *torrent.Client) error { - if err := BuildTorrentFilesIfNeed(ctx, snapshotDir); err != nil { - return err - } - if err := AddTorrentFiles(ctx, snapshotDir, torrentClient); err != nil { - return err - } - for _, t := range torrentClient.Torrents() { - t.AllowDataUpload() - if !t.Complete.Bool() { - t.AllowDataDownload() - t.DownloadAll() - } - } - return nil -} - type GrpcServer struct { proto_downloader.UnimplementedDownloaderServer t *Protocols db kv.RwDB snapshotDir *dir.Rw - silent bool } func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.DownloadRequest) (*emptypb.Empty, error) { - infoHashes := make([]metainfo.Hash, len(request.Items)) - for i, it := range request.Items { + torrentClient := s.t.TorrentClient + mi := &metainfo.MetaInfo{AnnounceList: Trackers} + for _, it := range request.Items { if it.TorrentHash == nil { - if err := BuildTorrentFileIfNeed(ctx, it.Path, s.snapshotDir); err != nil { - return nil, err - } - metaInfo, err := AddTorrentFile(ctx, filepath.Join(s.snapshotDir.Path, it.Path+".torrent"), s.t.TorrentClient) + err := BuildTorrentAndAdd(ctx, it.Path, s.snapshotDir, s.t.TorrentClient) if err != nil { return nil, err } - infoHashes[i] = metaInfo.HashInfoBytes() - } else { - infoHashes[i] = gointerfaces.ConvertH160toAddress(it.TorrentHash) + continue } - } - if err := ResolveAbsentTorrents(ctx, s.t.TorrentClient, infoHashes, s.snapshotDir, s.silent); err != nil { - return nil, err - } - for _, t := range s.t.TorrentClient.Torrents() { - t.AllowDataDownload() - t.AllowDataUpload() - if !t.Complete.Bool() { - t.DownloadAll() + + hash := Proto2InfoHash(it.TorrentHash) + if _, ok := torrentClient.Torrent(hash); ok { + continue } + + magnet := mi.Magnet(&hash, nil) + go func(magnetUrl string) { + t, err := torrentClient.AddMagnet(magnetUrl) + if err != nil { + log.Warn("[downloader] add magnet link", "err", err) + return + } + t.DisallowDataDownload() + t.AllowDataUpload() + <-t.GotInfo() + mi := t.Metainfo() + if err := CreateTorrentFileIfNotExists(s.snapshotDir, t.Info(), &mi); err != nil { + log.Warn("[downloader] create torrent file", "err", err) + return + } + }(magnet.String()) + } return &emptypb.Empty{}, nil } func (s *GrpcServer) Stats(ctx context.Context, request *proto_downloader.StatsRequest) (*proto_downloader.StatsReply, error) { - torrents := s.t.TorrentClient.Torrents() - reply := &proto_downloader.StatsReply{Completed: true, Torrents: int32(len(torrents))} + stats := s.t.Stats() + return &proto_downloader.StatsReply{ + MetadataReady: stats.MetadataReady, + FilesTotal: stats.FilesTotal, - peers := map[torrent.PeerID]struct{}{} + Completed: stats.Completed, + Progress: stats.Progress, - for _, t := range torrents { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-t.GotInfo(): - reply.BytesCompleted += uint64(t.BytesCompleted()) - reply.BytesTotal += uint64(t.Info().TotalLength()) - reply.Completed = reply.Completed && t.Complete.Bool() - reply.Connections += uint64(len(t.PeerConns())) + PeersUnique: stats.PeersUnique, + ConnectionsTotal: stats.ConnectionsTotal, - for _, peer := range t.PeerConns() { - peers[peer.PeerID] = struct{}{} - } - default: - reply.Completed = false - } - } - - reply.Peers = int32(len(peers)) - reply.Progress = int32(100 * (float64(reply.BytesCompleted) / float64(reply.BytesTotal))) - if reply.Progress == 100 && !reply.Completed { - reply.Progress = 99 - } - return reply, nil + BytesCompleted: stats.BytesCompleted, + BytesTotal: stats.BytesTotal, + UploadRate: stats.UploadRate, + DownloadRate: stats.DownloadRate, + }, nil } -func Proto2InfoHashes(in []*prototypes.H160) []metainfo.Hash { - infoHashes := make([]metainfo.Hash, len(in)) - i := 0 - for _, h := range in { - infoHashes[i] = gointerfaces.ConvertH160toAddress(h) - i++ - } - return infoHashes +func Proto2InfoHash(in *prototypes.H160) metainfo.Hash { + return gointerfaces.ConvertH160toAddress(in) } diff --git a/cmd/downloader/downloader/torrentcfg/torrentcfg.go b/cmd/downloader/downloader/torrentcfg/torrentcfg.go index 6ce8d71b276..e263773e21f 100644 --- a/cmd/downloader/downloader/torrentcfg/torrentcfg.go +++ b/cmd/downloader/downloader/torrentcfg/torrentcfg.go @@ -25,6 +25,7 @@ type Cfg struct { *torrent.ClientConfig DB kv.RwDB CompletionCloser io.Closer + DownloadSlots int } func Default() *torrent.ClientConfig { @@ -33,10 +34,10 @@ func Default() *torrent.ClientConfig { // enable dht torrentConfig.NoDHT = true //torrentConfig.DisableTrackers = true - //torrentConfig.DisableWebtorrent = true + torrentConfig.DisableWebtorrent = true //torrentConfig.DisableWebseeds = true - // Increase default timeouts, because we often run on commodity networks + // Reduce defaults - to avoid peers with very bad geography torrentConfig.MinDialTimeout = 1 * time.Second // default: 3sec torrentConfig.NominalDialTimeout = 10 * time.Second // default: 20sec torrentConfig.HandshakesTimeout = 1 * time.Second // default: 4sec @@ -44,14 +45,17 @@ func Default() *torrent.ClientConfig { return torrentConfig } -func New(snapshotsDir *dir.Rw, verbosity lg.Level, natif nat.Interface, downloadRate, uploadRate datasize.ByteSize, port, maxPeers, connsPerFile int, db kv.RwDB) (*Cfg, error) { +func New(snapshotsDir *dir.Rw, verbosity lg.Level, natif nat.Interface, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile int, db kv.RwDB, downloadSlots int) (*Cfg, error) { torrentConfig := Default() // We would-like to reduce amount of goroutines in Erigon, so reducing next params torrentConfig.EstablishedConnsPerTorrent = connsPerFile // default: 50 - torrentConfig.TorrentPeersHighWater = maxPeers // default: 500 - torrentConfig.TorrentPeersLowWater = 50 // default: 50 - torrentConfig.HalfOpenConnsPerTorrent = 25 // default: 25 - torrentConfig.TotalHalfOpenConns = 50 // default: 100 + + // see: https://en.wikipedia.org/wiki/TCP_half-open + torrentConfig.TotalHalfOpenConns = 100 // default: 100 + torrentConfig.HalfOpenConnsPerTorrent = 25 // default: 25 + + torrentConfig.TorrentPeersHighWater = 500 // default: 500 + torrentConfig.TorrentPeersLowWater = 50 // default: 50 torrentConfig.ListenPort = port torrentConfig.Seed = true @@ -100,5 +104,5 @@ func New(snapshotsDir *dir.Rw, verbosity lg.Level, natif nat.Interface, download } m := storage.NewMMapWithCompletion(snapshotsDir.Path, c) torrentConfig.DefaultStorage = m - return &Cfg{ClientConfig: torrentConfig, DB: db, CompletionCloser: m}, nil + return &Cfg{ClientConfig: torrentConfig, DB: db, CompletionCloser: m, DownloadSlots: downloadSlots}, nil } diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index d06e466c5c1..644e57fa5dc 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -9,9 +9,11 @@ import ( "io" "os" "path/filepath" + "runtime" "sync" "time" + "github.com/anacrolix/torrent" "github.com/anacrolix/torrent/bencode" "github.com/anacrolix/torrent/metainfo" "github.com/anacrolix/torrent/mmap_span" @@ -19,13 +21,14 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" "github.com/ledgerwatch/erigon/cmd/downloader/trackers" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/log/v3" + "golang.org/x/sync/semaphore" ) // Trackers - break down by priority tier var Trackers = [][]string{ - trackers.First(10, trackers.Best), + trackers.First(7, trackers.Best), //trackers.First(3, trackers.Udp), //trackers.First(3, trackers.Https), //trackers.First(10, trackers.Ws), @@ -51,7 +54,7 @@ func AllTorrentFiles(dir string) ([]string, error) { } var res []string for _, f := range files { - if !snapshotsync.IsCorrectFileName(f.Name()) { + if !snap.IsCorrectFileName(f.Name()) { continue } fileInfo, err := f.Info() @@ -75,7 +78,7 @@ func allSegmentFiles(dir string) ([]string, error) { } var res []string for _, f := range files { - if !snapshotsync.IsCorrectFileName(f.Name()) { + if !snap.IsCorrectFileName(f.Name()) { continue } fileInfo, err := f.Info() @@ -94,36 +97,55 @@ func allSegmentFiles(dir string) ([]string, error) { } // BuildTorrentFileIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually -func BuildTorrentFileIfNeed(ctx context.Context, originalFileName string, root *dir.Rw) (err error) { - f, err := snapshotsync.ParseFileName(root.Path, originalFileName) +func BuildTorrentFileIfNeed(ctx context.Context, originalFileName string, root *dir.Rw) (ok bool, err error) { + f, err := snap.ParseFileName(root.Path, originalFileName) if err != nil { - return err + return false, err } - if f.To-f.From != snapshotsync.DEFAULT_SEGMENT_SIZE { - return nil + if f.To-f.From != snap.DEFAULT_SEGMENT_SIZE { + return false, nil } torrentFilePath := filepath.Join(root.Path, originalFileName+".torrent") if _, err := os.Stat(torrentFilePath); err != nil { if !errors.Is(err, os.ErrNotExist) { - return err + return false, err + } + info := &metainfo.Info{PieceLength: torrentcfg.DefaultPieceSize} + if err := info.BuildFromFilePath(filepath.Join(root.Path, originalFileName)); err != nil { + return false, err } - info, err := BuildInfoBytesForFile(root.Path, originalFileName) if err != nil { - return err + return false, err } if err := CreateTorrentFile(root, info, nil); err != nil { - return err + return false, err } } + return true, nil +} + +func BuildTorrentAndAdd(ctx context.Context, originalFileName string, snapshotDir *dir.Rw, client *torrent.Client) error { + ok, err := BuildTorrentFileIfNeed(ctx, originalFileName, snapshotDir) + if err != nil { + return fmt.Errorf("BuildTorrentFileIfNeed: %w", err) + } + if !ok { + return nil + } + torrentFilePath := filepath.Join(snapshotDir.Path, originalFileName+".torrent") + _, err = AddTorrentFile(ctx, torrentFilePath, client) + if err != nil { + return fmt.Errorf("AddTorrentFile: %w", err) + } return nil } // BuildTorrentFilesIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually -func BuildTorrentFilesIfNeed(ctx context.Context, root *dir.Rw) error { +func BuildTorrentFilesIfNeed(ctx context.Context, snapshotDir *dir.Rw) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - files, err := allSegmentFiles(root.Path) + files, err := allSegmentFiles(snapshotDir.Path) if err != nil { return err } @@ -133,14 +155,17 @@ func BuildTorrentFilesIfNeed(ctx context.Context, root *dir.Rw) error { wg.Add(1) go func(f string, i int) { defer wg.Done() - errs <- BuildTorrentFileIfNeed(ctx, f, root) + _, err = BuildTorrentFileIfNeed(ctx, f, snapshotDir) + if err != nil { + errs <- err + } select { default: case <-ctx.Done(): errs <- ctx.Err() case <-logEvery.C: - log.Info("[torrent] Creating .torrent files", "Progress", fmt.Sprintf("%d/%d", i, len(files))) + log.Info("[Snapshots] Creating .torrent files", "Progress", fmt.Sprintf("%d/%d", i, len(files))) } }(f, i) } @@ -156,12 +181,48 @@ func BuildTorrentFilesIfNeed(ctx context.Context, root *dir.Rw) error { return nil } -func BuildInfoBytesForFile(root string, fileName string) (*metainfo.Info, error) { - info := &metainfo.Info{PieceLength: torrentcfg.DefaultPieceSize} - if err := info.BuildFromFilePath(filepath.Join(root, fileName)); err != nil { - return nil, err +// BuildTorrentsAndAdd - create .torrent files from .seg files (big IO) - if .seg files were placed manually to snapshotDir +func BuildTorrentsAndAdd(ctx context.Context, snapshotDir *dir.Rw, client *torrent.Client) error { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + files, err := allSegmentFiles(snapshotDir.Path) + if err != nil { + return fmt.Errorf("allSegmentFiles: %w", err) + } + errs := make(chan error, len(files)*2) + wg := &sync.WaitGroup{} + workers := runtime.GOMAXPROCS(-1) - 1 + if workers < 1 { + workers = 1 + } + var sem = semaphore.NewWeighted(int64(workers)) + for i, f := range files { + wg.Add(1) + if err := sem.Acquire(ctx, 1); err != nil { + return err + } + go func(f string, i int) { + defer sem.Release(1) + defer wg.Done() + + select { + case <-ctx.Done(): + errs <- ctx.Err() + default: + } + errs <- BuildTorrentAndAdd(ctx, f, snapshotDir, client) + }(f, i) } - return info, nil + go func() { + wg.Wait() + close(errs) + }() + for err := range errs { + if err != nil { + return err + } + } + return nil } func CreateTorrentFileIfNotExists(root *dir.Rw, info *metainfo.Info, mi *metainfo.MetaInfo) error { diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index 95c1fcafaaa..295dc7e45a9 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -43,6 +43,7 @@ var ( natSetting string torrentVerbosity string downloadRateStr, uploadRateStr string + torrentDownloadSlots int torrentPort int torrentMaxPeers int torrentConnsPerFile int @@ -63,6 +64,7 @@ func init() { rootCmd.Flags().IntVar(&torrentPort, "torrent.port", utils.TorrentPortFlag.Value, utils.TorrentPortFlag.Usage) rootCmd.Flags().IntVar(&torrentMaxPeers, "torrent.maxpeers", utils.TorrentMaxPeersFlag.Value, utils.TorrentMaxPeersFlag.Usage) rootCmd.Flags().IntVar(&torrentConnsPerFile, "torrent.conns.perfile", utils.TorrentConnsPerFileFlag.Value, utils.TorrentConnsPerFileFlag.Usage) + rootCmd.Flags().IntVar(&torrentDownloadSlots, "torrent.download.slots", utils.TorrentDownloadSlotsFlag.Value, utils.TorrentDownloadSlotsFlag.Usage) withDataDir(printTorrentHashes) printTorrentHashes.PersistentFlags().BoolVar(&forceRebuild, "rebuild", false, "Force re-create .torrent files") @@ -148,7 +150,7 @@ func Downloader(ctx context.Context) error { return err } - cfg, err := torrentcfg.New(snapshotDir, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentMaxPeers, torrentConnsPerFile, db) + cfg, err := torrentcfg.New(snapshotDir, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, db, torrentDownloadSlots) if err != nil { return err } @@ -160,13 +162,11 @@ func Downloader(ctx context.Context) error { } defer protocols.Close() log.Info("[torrent] Start", "my peerID", fmt.Sprintf("%x", protocols.TorrentClient.PeerID())) - if err = downloader.CreateTorrentFilesAndAdd(ctx, snapshotDir, protocols.TorrentClient); err != nil { - return fmt.Errorf("CreateTorrentFilesAndAdd: %w", err) + if err := protocols.Start(ctx, false); err != nil { + return err } - go downloader.LoggingLoop(ctx, protocols.TorrentClient) - - bittorrentServer, err := downloader.NewGrpcServer(protocols.DB, protocols, snapshotDir, true) + bittorrentServer, err := downloader.NewGrpcServer(protocols.DB, protocols, snapshotDir) if err != nil { return fmt.Errorf("new server: %w", err) } diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 1927a91cc66..520a2186808 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -26,6 +26,7 @@ import ( "github.com/RoaringBitmap/roaring/roaring64" "github.com/holiman/uint256" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" @@ -1019,7 +1020,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo headNumber := rawdb.ReadHeaderNumber(tx, headHash) block := *headNumber - uint64(rewind) log.Info("GetProof", "address", address, "storage keys", len(storageKeys), "head", *headNumber, "block", block, - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) accountMap := make(map[string]*accounts.Account) @@ -1047,7 +1048,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo } runtime.ReadMemStats(&m) log.Info("Constructed account map", "size", len(accountMap), - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) storageMap := make(map[string][]byte) if err := changeset.ForRange(tx, kv.StorageChangeSet, block+1, *headNumber+1, func(blockN uint64, address, v []byte) error { var addrHash, err = common.HashData(address) @@ -1064,7 +1065,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo } runtime.ReadMemStats(&m) log.Info("Constructed storage map", "size", len(storageMap), - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) var unfurlList = make([]string, len(accountMap)+len(storageMap)) unfurl := trie.NewRetainList(0) i := 0 @@ -1109,7 +1110,7 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo sort.Strings(unfurlList) runtime.ReadMemStats(&m) log.Info("Constructed account unfurl lists", - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) loader := trie.NewFlatDBTrieLoader("checkRoots") if err = loader.Reset(unfurl, nil, nil, false); err != nil { @@ -1128,13 +1129,13 @@ func testGetProof(chaindata string, address common.Address, rewind int, regen bo } runtime.ReadMemStats(&m) log.Info("Loaded subtries", - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) hash, err := rawdb.ReadCanonicalHash(tx, block) tool.Check(err) header := rawdb.ReadHeader(tx, hash, block) runtime.ReadMemStats(&m) log.Info("Constructed trie", - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) fmt.Printf("Resulting root: %x, expected root: %x\n", root, header.Root) return nil } diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 9b6c7bfd467..ba60f9c65d3 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -138,6 +138,9 @@ func resetExec(tx kv.RwTx, g *core.Genesis) error { if err := tx.ClearBucket(kv.PendingEpoch); err != nil { return err } + if err := tx.ClearBucket(kv.BorReceipts); err != nil { + return err + } if err := stages.SaveStageProgress(tx, stages.Execution, 0); err != nil { return err } diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 8318260036f..d764c198509 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -15,14 +15,8 @@ import ( "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshotsynccli" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/secp256k1" - "github.com/spf13/cobra" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces" "github.com/ledgerwatch/erigon/cmd/sentry/sentry" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" @@ -30,6 +24,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/ethconsensusconfig" "github.com/ledgerwatch/erigon/eth/integrity" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" @@ -39,7 +34,11 @@ import ( "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" stages2 "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/secp256k1" + "github.com/spf13/cobra" ) var cmdStageHeaders = &cobra.Command{ @@ -296,7 +295,7 @@ var cmdSetSnapshto = &cobra.Command{ snCfg = allSnapshots(chainConfig).Cfg() } if err := db.Update(context.Background(), func(tx kv.RwTx) error { - return snapshotsynccli.ForceSetFlags(tx, snCfg) + return snap.ForceSetFlags(tx, snCfg) }); err != nil { return err } @@ -483,7 +482,7 @@ func stageHeaders(db kv.RwDB, ctx context.Context) error { return fmt.Errorf("re-read Bodies progress: %w", err) } { // hard-unwind stage_body also - if err := rawdb.DeleteNewBlocks(tx, progress+1); err != nil { + if err := rawdb.TruncateBlocks(ctx, tx, progress+1); err != nil { return err } progressBodies, err := stages.GetStageProgress(tx, stages.Bodies) @@ -497,21 +496,17 @@ func stageHeaders(db kv.RwDB, ctx context.Context) error { } } // remove all canonical markers from this point - if err := tx.ForEach(kv.HeaderCanonical, dbutils.EncodeBlockNumber(progress+1), func(k, v []byte) error { - return tx.Delete(kv.HeaderCanonical, k, nil) - }); err != nil { + if err = rawdb.TruncateCanonicalHash(tx, progress+1); err != nil { return err } - if err := tx.ForEach(kv.HeaderTD, dbutils.EncodeBlockNumber(progress+1), func(k, v []byte) error { - return tx.Delete(kv.HeaderTD, k, nil) - }); err != nil { + if err = rawdb.TruncateTd(tx, progress+1); err != nil { return err } hash, err := rawdb.ReadCanonicalHash(tx, progress-1) if err != nil { return err } - if err = tx.Put(kv.HeadHeaderKey, []byte(kv.HeadHeaderKey), hash[:]); err != nil { + if err = rawdb.WriteHeadHeaderHash(tx, hash); err != nil { return err } @@ -1149,24 +1144,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) } vmConfig := &vm.Config{} - genesis, chainConfig := byChain(chain) - var engine consensus.Engine - config := ðconfig.Defaults - if chainConfig.Clique != nil { - c := params.CliqueSnapshot - c.DBPath = filepath.Join(datadir, "clique", "db") - engine = ethconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadir) - } else if chainConfig.Aura != nil { - engine = ethconfig.CreateConsensusEngine(chainConfig, logger, ¶ms.AuRaConfig{DBPath: filepath.Join(datadir, "aura")}, config.Miner.Notify, config.Miner.Noverify, "", true, datadir) - } else if chainConfig.Parlia != nil { - consensusConfig := ¶ms.ParliaConfig{DBPath: filepath.Join(datadir, "parlia")} - engine = ethconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir) - } else if chainConfig.Bor != nil { - consensusConfig := &config.Bor - engine = ethconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, HeimdallURL, false, datadir) - } else { //ethash - engine = ethash.NewFaker() - } + genesis, _ := byChain(chain) events := privateapi.NewEvents() @@ -1184,13 +1162,6 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) var batchSize datasize.ByteSize must(batchSize.UnmarshalText([]byte(batchSizeStr))) - br := getBlockReader(chainConfig) - blockDownloaderWindow := 65536 - sentryControlServer, err := sentry.NewControlServer(db, "", chainConfig, genesisBlock.Hash(), engine, 1, nil, blockDownloaderWindow, br) - if err != nil { - panic(err) - } - cfg := ethconfig.Defaults cfg.Prune = pm cfg.BatchSize = batchSize @@ -1206,6 +1177,30 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) snDir := &dir.Rw{Path: filepath.Join(datadir, "snapshots")} cfg.SnapshotDir = snDir } + var engine consensus.Engine + config := ðconfig.Defaults + if chainConfig.Clique != nil { + c := params.CliqueSnapshot + c.DBPath = filepath.Join(datadir, "clique", "db") + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, allSn) + } else if chainConfig.Aura != nil { + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, ¶ms.AuRaConfig{DBPath: filepath.Join(datadir, "aura")}, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, allSn) + } else if chainConfig.Parlia != nil { + consensusConfig := ¶ms.ParliaConfig{DBPath: filepath.Join(datadir, "parlia")} + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, allSn) + } else if chainConfig.Bor != nil { + consensusConfig := &config.Bor + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, HeimdallURL, false, datadir, allSn) + } else { //ethash + engine = ethash.NewFaker() + } + + br := getBlockReader(chainConfig) + blockDownloaderWindow := 65536 + sentryControlServer, err := sentry.NewControlServer(db, "", chainConfig, genesisBlock.Hash(), engine, 1, nil, blockDownloaderWindow, br) + if err != nil { + panic(err) + } sync, err := stages2.NewStagedSync(context.Background(), logger, db, p2p.Config{}, cfg, chainConfig.TerminalTotalDifficulty, sentryControlServer, tmpdir, diff --git a/cmd/observer/observer/crawler.go b/cmd/observer/observer/crawler.go index 07005dc368c..e220d3151d0 100644 --- a/cmd/observer/observer/crawler.go +++ b/cmd/observer/observer/crawler.go @@ -5,6 +5,9 @@ import ( "crypto/ecdsa" "errors" "fmt" + "sync/atomic" + "time" + "github.com/ledgerwatch/erigon/cmd/observer/database" "github.com/ledgerwatch/erigon/cmd/observer/utils" "github.com/ledgerwatch/erigon/core/forkid" @@ -12,8 +15,6 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/log/v3" "golang.org/x/sync/semaphore" - "sync/atomic" - "time" ) type Crawler struct { @@ -242,6 +243,9 @@ func (crawler *Crawler) Run(ctx context.Context) error { } return fmt.Errorf("failed to count ping errors: %w", err) } + if prevPingTries == nil { + prevPingTries = new(uint) + } handshakeNextRetryTime, err := crawler.db.FindHandshakeRetryTime(ctx, id) if err != nil { diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index 2ee9b0c56e9..4a09a04f46f 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -17,7 +17,6 @@ import ( "github.com/ledgerwatch/erigon/common/hexutil" math2 "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -1034,7 +1033,13 @@ func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, pa if err != nil { return nil, err } - parentHeader := rawdb.ReadHeader(dbtx, hash, blockNumber) + + // TODO: can read here only parent header + parentBlock, err := api.blockWithSenders(dbtx, hash, blockNumber) + if err != nil { + return nil, err + } + parentHeader := parentBlock.Header() if parentHeader == nil { return nil, fmt.Errorf("parent header %d(%x) not found", blockNumber, hash) } @@ -1085,7 +1090,12 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type noop := state.NewNoopWriter() cachedWriter := state.NewCachedWriter(noop, stateCache) - parentHeader := rawdb.ReadHeader(dbtx, hash, blockNumber) + // TODO: can read here only parent header + parentBlock, err := api.blockWithSenders(dbtx, hash, blockNumber) + if err != nil { + return nil, err + } + parentHeader := parentBlock.Header() if parentHeader == nil { return nil, fmt.Errorf("parent header %d(%x) not found", blockNumber, hash) } diff --git a/cmd/rpcdaemon/commands/txpool_api.go b/cmd/rpcdaemon/commands/txpool_api.go index 962d5df4178..eccb66bc4f5 100644 --- a/cmd/rpcdaemon/commands/txpool_api.go +++ b/cmd/rpcdaemon/commands/txpool_api.go @@ -5,6 +5,7 @@ import ( "context" "fmt" + "github.com/ledgerwatch/erigon-lib/gointerfaces" proto_txpool "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" @@ -56,8 +57,8 @@ func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]ma if err != nil { return nil, err } - addr := common.BytesToAddress(reply.Txs[i].Sender) - switch reply.Txs[i].Type { + addr := gointerfaces.ConvertH160toAddress(reply.Txs[i].Sender) + switch reply.Txs[i].TxnType { case proto_txpool.AllReply_PENDING: if _, ok := pending[addr]; !ok { pending[addr] = make([]types.Transaction, 0, 4) diff --git a/cmd/sentry/sentry/downloader.go b/cmd/sentry/sentry/downloader.go index cd617ad9409..77542a5d041 100644 --- a/cmd/sentry/sentry/downloader.go +++ b/cmd/sentry/sentry/downloader.go @@ -555,7 +555,7 @@ func (cs *ControlServerImpl) newBlock66(ctx context.Context, inreq *proto_sentry return fmt.Errorf("newBlock66: %w", err) } - if segments, penalty, err := cs.Hd.SingleHeaderAsSegment(headerRaw, request.Block.Header()); err == nil { + if segments, penalty, err := cs.Hd.SingleHeaderAsSegment(headerRaw, request.Block.Header(), true /* penalizePoSBlocks */); err == nil { if penalty == headerdownload.NoPenalty { cs.Hd.ProcessSegment(segments[0], true /* newBlock */, ConvertH512ToPeerID(inreq.PeerId)) // There is only one segment in this case } else { diff --git a/cmd/state/commands/erigon2.go b/cmd/state/commands/erigon2.go index c03547166fb..6526c342776 100644 --- a/cmd/state/commands/erigon2.go +++ b/cmd/state/commands/erigon2.go @@ -21,6 +21,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon/eth/ethconsensusconfig" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" @@ -204,16 +205,17 @@ func Erigon2(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log. } }() - engine := initConsensusEngine(chainConfig, logger) var blockReader interfaces.FullBlockReader + var allSnapshots *snapshotsync.RoSnapshots syncMode := ethconfig.SyncModeByChainName(chainConfig.ChainName, syncmodeCli) if syncMode == ethconfig.SnapSync { - allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapshotCfg(true, false), path.Join(datadir, "snapshots")) + allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapshotCfg(true, false), path.Join(datadir, "snapshots")) defer allSnapshots.Close() blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) } else { blockReader = snapshotsync.NewBlockReader() } + engine := initConsensusEngine(chainConfig, logger, allSnapshots) for !interrupt { blockNum++ @@ -597,23 +599,23 @@ func (ww *WriterWrapper) CreateContract(address common.Address) error { return nil } -func initConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger) (engine consensus.Engine) { +func initConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, snapshots *snapshotsync.RoSnapshots) (engine consensus.Engine) { config := ethconfig.Defaults switch { case chainConfig.Clique != nil: c := params.CliqueSnapshot c.DBPath = filepath.Join(datadir, "clique", "db") - engine = ethconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadir) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots) case chainConfig.Aura != nil: consensusConfig := ¶ms.AuRaConfig{DBPath: filepath.Join(datadir, "aura")} - engine = ethconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots) case chainConfig.Parlia != nil: consensusConfig := ¶ms.ParliaConfig{DBPath: filepath.Join(datadir, "parlia")} - engine = ethconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, snapshots) case chainConfig.Bor != nil: consensusConfig := &config.Bor - engine = ethconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "http://localhost:1317", false, datadir) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "http://localhost:1317", false, datadir, snapshots) default: //ethash engine = ethash.NewFaker() } diff --git a/cmd/utils/customflags.go b/cmd/utils/customflags.go index 6db1f74be59..0fdb6f9e954 100644 --- a/cmd/utils/customflags.go +++ b/cmd/utils/customflags.go @@ -154,11 +154,11 @@ func (b *bigValue) String() string { } func (b *bigValue) Set(s string) error { - int, ok := math.ParseBig256(s) + intVal, ok := math.ParseBig256(s) if !ok { return errors.New("invalid integer syntax") } - *b = bigValue(*int) + *b = bigValue(*intVal) return nil } @@ -172,6 +172,7 @@ func (f BigFlag) String() string { func (f BigFlag) Apply(set *flag.FlagSet) { eachName(f.Name, func(name string) { + f.Value = new(big.Int) set.Var((*bigValue)(f.Value), f.Name, f.Usage) }) } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 9c805d7c50a..0c75f31a6f0 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -134,6 +134,14 @@ var ( Name: "whitelist", Usage: "Comma separated block number-to-hash mappings to enforce (=)", } + OverrideTerminalTotalDifficulty = BigFlag{ + Name: "override.terminaltotaldifficulty", + Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting", + } + OverrideMergeForkBlock = BigFlag{ + Name: "override.mergeForkBlock", + Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting", + } // Ethash settings EthashCachesInMemoryFlag = cli.IntFlag{ Name: "ethash.cachesinmem", @@ -491,7 +499,7 @@ var ( } MaxPendingPeersFlag = cli.IntFlag{ Name: "maxpendpeers", - Usage: "Maximum number of pending connection attempts (defaults used if set to 0)", + Usage: "Maximum number of TCP connections pending to become connected peers", Value: node.DefaultConfig.P2P.MaxPendingPeers, } ListenPortFlag = cli.IntFlag{ @@ -648,6 +656,11 @@ var ( Value: "4mb", Usage: "bytes per second, example: 32mb", } + TorrentDownloadSlotsFlag = cli.IntFlag{ + Name: "torrent.download.slots", + Value: 3, + Usage: "amount of files to download in parallel. If network has enough seeders 1-3 slot enough, if network has lack of seeders increase to 5-7 (too big value will slow down everything).", + } TorrentPortFlag = cli.IntFlag{ Name: "torrent.port", Value: 42069, @@ -656,7 +669,7 @@ var ( TorrentMaxPeersFlag = cli.IntFlag{ Name: "torrent.maxpeers", Value: 100, - Usage: "limit amount of torrent peers", + Usage: "unused parameter (reserved for future use)", } TorrentConnsPerFileFlag = cli.IntFlag{ Name: "torrent.conns.perfile", @@ -1060,14 +1073,12 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) { cfg.DataDir = DataDirForNetwork(cfg.DataDir, ctx.GlobalString(ChainFlag.Name)) } - if ctx.GlobalIsSet(DbPageSizeFlag.Name) { - if err := cfg.MdbxPageSize.UnmarshalText([]byte(ctx.GlobalString(DbPageSizeFlag.Name))); err != nil { - panic(err) - } - sz := cfg.MdbxPageSize.Bytes() - if !isPowerOfTwo(sz) || sz < 256 || sz > 64*1024 { - panic("invalid --db.pagesize: " + DbPageSizeFlag.Usage) - } + if err := cfg.MdbxPageSize.UnmarshalText([]byte(ctx.GlobalString(DbPageSizeFlag.Name))); err != nil { + panic(err) + } + sz := cfg.MdbxPageSize.Bytes() + if !isPowerOfTwo(sz) || sz < 256 || sz > 64*1024 { + panic("invalid --db.pagesize: " + DbPageSizeFlag.Usage) } } @@ -1396,9 +1407,9 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *node.Config, cfg *ethconfig.Conf nodeConfig.P2P.NAT, downloadRate, uploadRate, ctx.GlobalInt(TorrentPortFlag.Name), - ctx.GlobalInt(TorrentMaxPeersFlag.Name), ctx.GlobalInt(TorrentConnsPerFileFlag.Name), db, + ctx.GlobalInt(TorrentDownloadSlotsFlag.Name), ) if err != nil { panic(err) @@ -1489,6 +1500,13 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *node.Config, cfg *ethconfig.Conf cfg.Miner.GasPrice = big.NewInt(1) } } + + if ctx.GlobalIsSet(OverrideTerminalTotalDifficulty.Name) { + cfg.Genesis.Config.TerminalTotalDifficulty = GlobalBig(ctx, OverrideTerminalTotalDifficulty.Name) + } + if ctx.GlobalIsSet(OverrideMergeForkBlock.Name) { + cfg.Genesis.Config.MergeForkBlock = GlobalBig(ctx, OverrideMergeForkBlock.Name) + } } // SetDNSDiscoveryDefaults configures DNS discovery with the given URL if diff --git a/consensus/parlia/api.go b/consensus/parlia/api.go index 05aaeefed70..fbcec09e6e6 100644 --- a/consensus/parlia/api.go +++ b/consensus/parlia/api.go @@ -42,7 +42,7 @@ func (api *API) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { if header == nil { return nil, errUnknownBlock } - return api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) + return api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil, false /* verify */) } // GetSnapshotAtHash retrieves the state snapshot at a given block. @@ -51,7 +51,7 @@ func (api *API) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) { if header == nil { return nil, errUnknownBlock } - return api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) + return api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil, false /* verify */) } // GetValidators retrieves the list of validators at the specified block. @@ -67,7 +67,7 @@ func (api *API) GetValidators(number *rpc.BlockNumber) ([]common.Address, error) if header == nil { return nil, errUnknownBlock } - snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) + snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil, false /* verify */) if err != nil { return nil, err } @@ -80,7 +80,7 @@ func (api *API) GetValidatorsAtHash(hash common.Hash) ([]common.Address, error) if header == nil { return nil, errUnknownBlock } - snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil) + snap, err := api.parlia.snapshot(api.chain, header.Number.Uint64(), header.Hash(), nil, false /* verify */) if err != nil { return nil, err } diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 89052cdf821..d1ca6701cca 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -36,6 +36,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" ) const ( @@ -232,14 +233,16 @@ type Parlia struct { slashABI abi.ABI // The fields below are for testing only - fakeDiff bool // Skip difficulty verifications - forks []uint64 // Forks extracted from the chainConfig + fakeDiff bool // Skip difficulty verifications + forks []uint64 // Forks extracted from the chainConfig + snapshots *snapshotsync.RoSnapshots } // New creates a Parlia consensus engine. func New( chainConfig *params.ChainConfig, db kv.RwDB, + snapshots *snapshotsync.RoSnapshots, ) *Parlia { // get parlia config parliaConfig := chainConfig.Parlia @@ -276,6 +279,7 @@ func New( slashABI: sABI, signer: types.LatestSigner(chainConfig), forks: forkid.GatherForks(chainConfig), + snapshots: snapshots, } return c @@ -392,7 +396,7 @@ func (p *Parlia) verifyCascadingFields(chain consensus.ChainHeaderReader, header return consensus.ErrUnknownAncestor } - snap, err := p.snapshot(chain, number-1, header.ParentHash, parents) + snap, err := p.snapshot(chain, number-1, header.ParentHash, parents, true /* verify */) if err != nil { return err } @@ -438,7 +442,7 @@ func (p *Parlia) verifySeal(chain consensus.ChainHeaderReader, header *types.Hea return errUnknownBlock } // Retrieve the snapshot needed to verify this header and cache it - snap, err := p.snapshot(chain, number-1, header.ParentHash, parents) + snap, err := p.snapshot(chain, number-1, header.ParentHash, parents, true /* verify */) if err != nil { return err } @@ -481,7 +485,7 @@ func (p *Parlia) verifySeal(chain consensus.ChainHeaderReader, header *types.Hea } // snapshot retrieves the authorization snapshot at a given point in time. -func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header) (*Snapshot, error) { +func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash common.Hash, parents []*types.Header, verify bool) (*Snapshot, error) { // Search for a snapshot in memory or on disk for checkpoints var ( headers []*types.Header @@ -500,31 +504,26 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash if s, err := loadSnapshot(p.config, p.signatures, p.db, number, hash); err == nil { log.Trace("Loaded snapshot from disk", "number", number, "hash", hash) snap = s - break + if !verify || snap != nil { + break + } } } - - // If we're at the genesis, snapshot the initial state. - if number == 0 { - checkpoint := chain.GetHeaderByNumber(number) - if checkpoint != nil { - // get checkpoint data - hash := checkpoint.Hash() - - validatorBytes := checkpoint.Extra[extraVanity : len(checkpoint.Extra)-extraSeal] - // get validators from headers - validators, err := ParseValidators(validatorBytes) - if err != nil { - return nil, err + if (verify && number%p.config.Epoch == 0) || number == 0 { + if (p.snapshots != nil && number <= p.snapshots.BlocksAvailable()) || number == 0 { + // Headers included into the snapshots have to be trusted as checkpoints + checkpoint := chain.GetHeader(hash, number) + if checkpoint != nil { + validatorBytes := checkpoint.Extra[extraVanity : len(checkpoint.Extra)-extraSeal] + // get validators from headers + validators, err := ParseValidators(validatorBytes) + if err != nil { + return nil, err + } + // new snapshot + snap = newSnapshot(p.config, p.signatures, number, hash, validators) + break } - - // new snapshot - snap = newSnapshot(p.config, p.signatures, number, hash, validators) - if err := snap.store(p.db); err != nil { - return nil, err - } - log.Info("Stored checkpoint snapshot to disk", "number", number, "hash", hash) - break } } @@ -557,7 +556,6 @@ func (p *Parlia) snapshot(chain consensus.ChainHeaderReader, number uint64, hash for i := 0; i < len(headers)/2; i++ { headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i] } - snap, err := snap.apply(headers, chain, parents, p.chainConfig.ChainID) if err != nil { return nil, err @@ -590,7 +588,7 @@ func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header header.Nonce = types.BlockNonce{} number := header.Number.Uint64() - snap, err := p.snapshot(chain, number-1, header.ParentHash, nil) + snap, err := p.snapshot(chain, number-1, header.ParentHash, nil, false /* verify */) if err != nil { return err } @@ -685,7 +683,7 @@ func (p *Parlia) finalize(header *types.Header, state *state.IntraBlockState, tx txs = userTxs // warn if not in majority fork number := header.Number.Uint64() - snap, err := p.snapshot(chain, number-1, header.ParentHash, nil) + snap, err := p.snapshot(chain, number-1, header.ParentHash, nil, false /* verify */) if err != nil { return nil, nil, err } @@ -805,7 +803,7 @@ func (p *Parlia) Seal(chain consensus.ChainHeaderReader, block *types.Block, res val, signFn := p.val, p.signFn p.lock.RUnlock() - snap, err := p.snapshot(chain, number-1, header.ParentHash, nil) + snap, err := p.snapshot(chain, number-1, header.ParentHash, nil, false /* verify */) if err != nil { return err } @@ -875,7 +873,7 @@ func (p *Parlia) SealHash(header *types.Header) common.Hash { // CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty // that a new block should have. func (p *Parlia) CalcDifficulty(chain consensus.ChainHeaderReader, time, parentTime uint64, parentDifficulty *big.Int, parentNumber uint64, parentHash, parentUncleHash common.Hash, parentSeal []rlp.RawValue) *big.Int { - snap, err := p.snapshot(chain, parentNumber, parentHash, nil) + snap, err := p.snapshot(chain, parentNumber, parentHash, nil, false /* verify */) if err != nil { return nil } @@ -950,7 +948,7 @@ func (p *Parlia) shouldWaitForCurrentBlockProcess(chain consensus.ChainHeaderRea } func (p *Parlia) EnoughDistance(chain consensus.ChainReader, header *types.Header) bool { - snap, err := p.snapshot(chain, header.Number.Uint64()-1, header.ParentHash, nil) + snap, err := p.snapshot(chain, header.Number.Uint64()-1, header.ParentHash, nil, false /* verify */) if err != nil { return true } @@ -962,7 +960,7 @@ func (p *Parlia) IsLocalBlock(header *types.Header) bool { } func (p *Parlia) AllowLightProcess(chain consensus.ChainReader, currentHeader *types.Header) bool { - snap, err := p.snapshot(chain, currentHeader.Number.Uint64()-1, currentHeader.ParentHash, nil) + snap, err := p.snapshot(chain, currentHeader.Number.Uint64()-1, currentHeader.ParentHash, nil, false /* verify */) if err != nil { return true } diff --git a/core/blockchain.go b/core/blockchain.go index 23a67c4c360..4e7b73281cf 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -351,9 +351,16 @@ func SysCallContract(contract common.Address, data []byte, chainConfig params.Ch ) vmConfig := vm.Config{NoReceipts: true} // Create a new context to be used in the EVM environment - blockContext := NewEVMBlockContext(header, nil, engine, &state.SystemAddress, nil) + isBor := chainConfig.Bor != nil + var author *common.Address + if isBor { + author = &header.Coinbase + } else { + author = &state.SystemAddress + } + blockContext := NewEVMBlockContext(header, nil, engine, author, nil) evm := vm.NewEVM(blockContext, NewEVMTxContext(msg), ibs, &chainConfig, vmConfig) - if chainConfig.Bor != nil { + if isBor { ret, _, err := evm.Call( vm.AccountRef(msg.From()), *msg.To(), diff --git a/core/genesis_test.go b/core/genesis_test.go index 575cfea0f69..799a6f21674 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -1,16 +1,48 @@ package core import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/params/networkname" "github.com/stretchr/testify/require" - "testing" ) func TestDefaultBSCGenesisBlock(t *testing.T) { - genesis := DefaultBSCGenesisBlock() - db := memdb.New() - _, block, err := CommitGenesisBlock(db, genesis) + db := memdb.NewTestDB(t) + check := func(network string) { + genesis := DefaultGenesisBlockByChainName(network) + tx, err := db.BeginRw(context.Background()) + if err != nil { + t.Fatal(err) + } + defer tx.Rollback() + _, block, err := WriteGenesisBlock(tx, genesis) + require.NoError(t, err) + expect := params.GenesisHashByChainName(network) + require.NotNil(t, expect, network) + require.Equal(t, block.Hash().Bytes(), expect.Bytes(), network) + } + for _, network := range networkname.All { + check(network) + } +} + +func TestCommitGenesisIdempotency(t *testing.T) { + _, tx := memdb.NewTestTx(t) + genesis := DefaultGenesisBlockByChainName(networkname.MainnetChainName) + _, _, err := WriteGenesisBlock(tx, genesis) + require.NoError(t, err) + seq, err := tx.ReadSequence(kv.EthTx) + require.NoError(t, err) + require.Equal(t, uint64(2), seq) + + _, _, err = WriteGenesisBlock(tx, genesis) + require.NoError(t, err) + seq, err = tx.ReadSequence(kv.EthTx) require.NoError(t, err) - require.Equal(t, block.Hash(), params.BSCGenesisHash) + require.Equal(t, uint64(2), seq) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index eaab7fc45c8..ae7cd22b3c2 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -26,8 +26,6 @@ import ( "time" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" @@ -35,6 +33,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/log/v3" ) // ReadCanonicalHash retrieves the hash assigned to a canonical block number. @@ -57,10 +56,12 @@ func WriteCanonicalHash(db kv.Putter, hash common.Hash, number uint64) error { return nil } -// DeleteCanonicalHash removes the number to hash canonical mapping. -func DeleteCanonicalHash(db kv.Deleter, number uint64) error { - if err := db.Delete(kv.HeaderCanonical, dbutils.EncodeBlockNumber(number), nil); err != nil { - return fmt.Errorf("failed to delete number to hash mapping: %w", err) +// TruncateCanonicalHash removes all the number to hash canonical mapping from block number N +func TruncateCanonicalHash(tx kv.RwTx, blockFrom uint64) error { + if err := tx.ForEach(kv.HeaderCanonical, dbutils.EncodeBlockNumber(blockFrom), func(k, _ []byte) error { + return tx.Delete(kv.HeaderCanonical, k, nil) + }); err != nil { + return fmt.Errorf("TruncateCanonicalHash: %w", err) } return nil } @@ -254,8 +255,8 @@ func WriteHeader(db kv.Putter, header *types.Header) { } } -// DeleteHeader removes all block header data associated with a hash. -func DeleteHeader(db kv.Deleter, hash common.Hash, number uint64) { +// deleteHeader - dangerous, use DeleteAncientBlocks/TruncateBlocks methods +func deleteHeader(db kv.Deleter, hash common.Hash, number uint64) { if err := db.Delete(kv.Headers, dbutils.HeaderKey(number, hash), nil); err != nil { log.Crit("Failed to delete header", "err", err) } @@ -617,8 +618,8 @@ func WriteSenders(db kv.Putter, hash common.Hash, number uint64, senders []commo return nil } -// DeleteBody removes all block body data associated with a hash. -func DeleteBody(db kv.Deleter, hash common.Hash, number uint64) { +// deleteBody removes all block body data associated with a hash. +func deleteBody(db kv.Deleter, hash common.Hash, number uint64) { if err := db.Delete(kv.BlockBody, dbutils.BlockBodyKey(number, hash), nil); err != nil { log.Crit("Failed to delete block body", "err", err) } @@ -751,7 +752,7 @@ func MakeBodiesNonCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPre return err } if k != nil && binary.BigEndian.Uint64(k) >= firstMovedTxnID { - panic(fmt.Sprintf("must not happen, ResetSequence: %d, lastInDB: %d\n", firstMovedTxnID, binary.BigEndian.Uint64(k))) + panic(fmt.Sprintf("must not happen, ResetSequence: %d, lastInDB: %d", firstMovedTxnID, binary.BigEndian.Uint64(k))) } if err := ResetSequence(tx, kv.EthTx, firstMovedTxnID); err != nil { @@ -797,10 +798,12 @@ func WriteTd(db kv.Putter, hash common.Hash, number uint64, td *big.Int) error { return nil } -// DeleteTd removes all block total difficulty data associated with a hash. -func DeleteTd(db kv.Deleter, hash common.Hash, number uint64) error { - if err := db.Delete(kv.HeaderTD, dbutils.HeaderKey(number, hash), nil); err != nil { - return fmt.Errorf("failed to delete block total difficulty: %w", err) +// TruncateTd removes all block total difficulty from block number N +func TruncateTd(tx kv.RwTx, blockFrom uint64) error { + if err := tx.ForEach(kv.HeaderTD, dbutils.EncodeBlockNumber(blockFrom), func(k, _ []byte) error { + return tx.Delete(kv.HeaderTD, k, nil) + }); err != nil { + return fmt.Errorf("TruncateTd: %w", err) } return nil } @@ -960,25 +963,9 @@ func AppendReceipts(tx kv.StatelessWriteTx, blockNumber uint64, receipts types.R return nil } -// DeleteReceipts removes all receipt data associated with a block hash. -func DeleteReceipts(db kv.RwTx, number uint64) error { - if err := db.Delete(kv.Receipts, dbutils.EncodeBlockNumber(number), nil); err != nil { - return fmt.Errorf("receipts delete failed: %d, %w", number, err) - } - - prefix := make([]byte, 8) - binary.BigEndian.PutUint64(prefix, number) - if err := db.ForPrefix(kv.Log, prefix, func(k, v []byte) error { - return db.Delete(kv.Log, k, nil) - }); err != nil { - return err - } - return nil -} - -// DeleteNewerReceipts removes all receipt for given block number or newer -func DeleteNewerReceipts(db kv.RwTx, number uint64) error { - if err := db.ForEach(kv.Receipts, dbutils.EncodeBlockNumber(number), func(k, v []byte) error { +// TruncateReceipts removes all receipt for given block number or newer +func TruncateReceipts(db kv.RwTx, number uint64) error { + if err := db.ForEach(kv.Receipts, dbutils.EncodeBlockNumber(number), func(k, _ []byte) error { return db.Delete(kv.Receipts, k, nil) }); err != nil { return err @@ -986,7 +973,7 @@ func DeleteNewerReceipts(db kv.RwTx, number uint64) error { from := make([]byte, 8) binary.BigEndian.PutUint64(from, number) - if err := db.ForEach(kv.Log, from, func(k, v []byte) error { + if err := db.ForEach(kv.Log, from, func(k, _ []byte) error { return db.Delete(kv.Log, k, nil) }); err != nil { return err @@ -1090,8 +1077,8 @@ func min(a, b uint64) uint64 { // DeleteAncientBlocks - delete [1, to) old blocks after moving it to snapshots. // keeps genesis in db: [1, to) -// doesn't delete Reciepts -// doesn't delete Canonical markers +// doesn't change sequnces of kv.EthTx and kv.NonCanonicalTxs +// doesn't delete Reciepts, Senders, Canonical markers, TotalDifficulty func DeleteAncientBlocks(db kv.RwTx, blockTo uint64, blocksDeleteLimit int) error { c, err := db.Cursor(kv.Headers) if err != nil { @@ -1099,22 +1086,17 @@ func DeleteAncientBlocks(db kv.RwTx, blockTo uint64, blocksDeleteLimit int) erro } defer c.Close() - var stopAtBlock, firstNonGenesisInDB uint64 - { - k, _, err := c.First() - if err != nil { - return err - } - firstNonGenesisInDB = binary.BigEndian.Uint64(k) - if firstNonGenesisInDB == 0 { // keep genesis in DB - k, _, err := c.Next() - if err != nil { - return err - } - firstNonGenesisInDB = binary.BigEndian.Uint64(k) - } - stopAtBlock = min(blockTo, firstNonGenesisInDB+uint64(blocksDeleteLimit)) + // find first non-genesis block + k, _, err := c.Seek(dbutils.EncodeBlockNumber(1)) + if err != nil { + return err + } + if k == nil { //nothing to delete + return nil } + blockFrom := binary.BigEndian.Uint64(k) + stopAtBlock := min(blockTo, blockFrom+uint64(blocksDeleteLimit)) + for k, _, err := c.Current(); k != nil; k, _, err = c.Next() { if err != nil { return err @@ -1155,74 +1137,110 @@ func DeleteAncientBlocks(db kv.RwTx, blockTo uint64, blocksDeleteLimit int) erro if err := db.Delete(kv.BlockBody, k, nil); err != nil { return err } - if err := db.Delete(kv.Senders, k, nil); err != nil { - return err - } } return nil } -func DeleteNewBlocks(db kv.RwTx, blockFrom uint64) error { - c, err := db.Cursor(kv.Headers) +func LastKey(tx kv.Tx, table string) ([]byte, error) { + c, err := tx.Cursor(table) if err != nil { - return err + return nil, err + } + defer c.Close() + k, _, err := c.Last() + if err != nil { + return nil, err + } + return k, nil +} + +func FirstKey(tx kv.Tx, table string) ([]byte, error) { + c, err := tx.Cursor(table) + if err != nil { + return nil, err } defer c.Close() + k, _, err := c.First() + if err != nil { + return nil, err + } + return k, nil +} - for k, _, err := c.Seek(dbutils.EncodeBlockNumber(blockFrom)); k != nil; k, _, err = c.Next() { +// TruncateBlocks - delete block >= blockFrom +// does decrement sequnces of kv.EthTx and kv.NonCanonicalTxs +// doesn't delete Reciepts, Senders, Canonical markers, TotalDifficulty +func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + + c, err := tx.Cursor(kv.Headers) + if err != nil { + return err + } + defer c.Close() + if blockFrom < 1 { //protect genesis + blockFrom = 1 + } + for k, _, err := c.Last(); k != nil; k, _, err = c.Prev() { if err != nil { return err } n := binary.BigEndian.Uint64(k) - - canonicalHash, err := ReadCanonicalHash(db, n) + if n < blockFrom { // [from, to) + break + } + canonicalHash, err := ReadCanonicalHash(tx, n) if err != nil { return err } isCanonical := bytes.Equal(k[8:], canonicalHash[:]) - b, err := ReadBodyForStorageByKey(db, k) + b, err := ReadBodyForStorageByKey(tx, k) if err != nil { return err } - if b != nil { // b == nil means body were marked as non-canonical already - txIDBytes := make([]byte, 8) - for txID := b.BaseTxId; txID < b.BaseTxId+uint64(b.TxAmount); txID++ { - binary.BigEndian.PutUint64(txIDBytes, txID) - bucket := kv.EthTx - if !isCanonical { - bucket = kv.NonCanonicalTxs - } - if err := db.Delete(bucket, txIDBytes, nil); err != nil { + if b != nil { + bucket := kv.EthTx + if !isCanonical { + bucket = kv.NonCanonicalTxs + } + if err := tx.ForEach(bucket, dbutils.EncodeBlockNumber(b.BaseTxId), func(k, _ []byte) error { + if err := tx.Delete(bucket, k, nil); err != nil { return err } + return nil + }); err != nil { + return err + } + if err := ResetSequence(tx, bucket, b.BaseTxId); err != nil { + return err } } - if err := db.Delete(kv.Headers, k, nil); err != nil { + if err := tx.Delete(kv.Headers, k, nil); err != nil { return err } - if err := db.Delete(kv.BlockBody, k, nil); err != nil { + if err := tx.Delete(kv.BlockBody, k, nil); err != nil { return err } - if err := db.Delete(kv.Senders, k, nil); err != nil { - return err + + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + log.Info("TruncateBlocks", "block", n) + default: } } - return nil -} - -// DeleteBlock removes all block data associated with a hash. -func DeleteBlock(db kv.RwTx, hash common.Hash, number uint64) error { - if err := DeleteReceipts(db, number); err != nil { - return err - } - DeleteHeader(db, hash, number) - DeleteBody(db, hash, number) - if err := DeleteTd(db, hash, number); err != nil { + // ensure no grabage records left (it may happen if db is inconsistent) + if err := tx.ForEach(kv.BlockBody, dbutils.EncodeBlockNumber(blockFrom), func(k, _ []byte) error { + return tx.Delete(kv.BlockBody, k, nil) + }); err != nil { return err } + return nil } diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 7f49659c06f..d91c4db2ac3 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -18,6 +18,7 @@ package rawdb import ( "bytes" + "context" "encoding/hex" "fmt" "math/big" @@ -61,7 +62,7 @@ func TestHeaderStorage(t *testing.T) { } } // Delete the header and verify the execution - DeleteHeader(tx, header.Hash(), header.Number.Uint64()) + deleteHeader(tx, header.Hash(), header.Number.Uint64()) if entry := ReadHeader(tx, header.Hash(), header.Number.Uint64()); entry != nil { t.Fatalf("Deleted header returned: %v", entry) } @@ -116,7 +117,7 @@ func TestBodyStorage(t *testing.T) { } } // Delete the body and verify the execution - DeleteBody(tx, hash, 0) + deleteBody(tx, hash, 0) if entry := ReadCanonicalBodyWithTransactions(tx, hash, 0); entry != nil { t.Fatalf("Deleted body returned: %v", entry) } @@ -128,6 +129,7 @@ func TestBlockStorage(t *testing.T) { // Create a test block to move around the database and make sure it's really new block := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(1), Extra: []byte("test block"), UncleHash: types.EmptyUncleHash, TxHash: types.EmptyRootHash, @@ -157,15 +159,21 @@ func TestBlockStorage(t *testing.T) { } else if entry.Hash() != block.Hash() { t.Fatalf("Retrieved header mismatch: have %v, want %v", entry, block.Header()) } + if err := TruncateBlocks(context.Background(), tx, 2); err != nil { + t.Fatal(err) + } if entry := ReadCanonicalBodyWithTransactions(tx, block.Hash(), block.NumberU64()); entry == nil { t.Fatalf("Stored body not found") } else if types.DeriveSha(types.Transactions(entry.Transactions)) != types.DeriveSha(block.Transactions()) || types.CalcUncleHash(entry.Uncles) != types.CalcUncleHash(block.Uncles()) { t.Fatalf("Retrieved body mismatch: have %v, want %v", entry, block.Body()) } // Delete the block and verify the execution - if err := DeleteBlock(tx, block.Hash(), block.NumberU64()); err != nil { - t.Fatalf("Could not delete block: %v", err) + if err := TruncateBlocks(context.Background(), tx, block.NumberU64()); err != nil { + t.Fatal(err) } + //if err := DeleteBlock(tx, block.Hash(), block.NumberU64()); err != nil { + // t.Fatalf("Could not delete block: %v", err) + //} if entry := ReadBlock(tx, block.Hash(), block.NumberU64()); entry != nil { t.Fatalf("Deleted block returned: %v", entry) } @@ -175,6 +183,14 @@ func TestBlockStorage(t *testing.T) { if entry := ReadCanonicalBodyWithTransactions(tx, block.Hash(), block.NumberU64()); entry != nil { t.Fatalf("Deleted body returned: %v", entry) } + + // write again and delete it as old one + if err := WriteBlock(tx, block); err != nil { + t.Fatalf("Could not write block: %v", err) + } + if err := DeleteAncientBlocks(tx, 0, 1); err != nil { + t.Fatal(err) + } } // Tests that partial block contents don't get reassembled into full blocks. @@ -193,7 +209,7 @@ func TestPartialBlockStorage(t *testing.T) { if entry := ReadBlock(tx, block.Hash(), block.NumberU64()); entry != nil { t.Fatalf("Non existent block returned: %v", entry) } - DeleteHeader(tx, block.Hash(), block.NumberU64()) + deleteHeader(tx, block.Hash(), block.NumberU64()) // Store a body and check that it's not recognized as a block if err := WriteBody(tx, block.Hash(), block.NumberU64(), block.Body()); err != nil { @@ -202,7 +218,7 @@ func TestPartialBlockStorage(t *testing.T) { if entry := ReadBlock(tx, block.Hash(), block.NumberU64()); entry != nil { t.Fatalf("Non existent block returned: %v", entry) } - DeleteBody(tx, block.Hash(), block.NumberU64()) + deleteBody(tx, block.Hash(), block.NumberU64()) // Store a header and a body separately and check reassembly WriteHeader(tx, header) @@ -245,7 +261,7 @@ func TestTdStorage(t *testing.T) { t.Fatalf("Retrieved TD mismatch: have %v, want %v", entry, td) } // Delete the TD and verify the execution - err = DeleteTd(tx, hash, 0) + err = TruncateTd(tx, 0) if err != nil { t.Fatalf("DeleteTd failed: %v", err) } @@ -286,7 +302,7 @@ func TestCanonicalMappingStorage(t *testing.T) { t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash) } // Delete the TD and verify the execution - err = DeleteCanonicalHash(tx, number) + err = TruncateCanonicalHash(tx, number) if err != nil { t.Fatalf("DeleteCanonicalHash failed: %v", err) } @@ -394,8 +410,8 @@ func TestBlockReceiptStorage(t *testing.T) { } } // Delete the body and ensure that the receipts are no longer returned (metadata can't be recomputed) - DeleteHeader(tx, hash, 0) - DeleteBody(tx, hash, 0) + deleteHeader(tx, hash, 0) + deleteBody(tx, hash, 0) b, senders, err = ReadBlockWithSenders(tx, hash, 0) require.NoError(err) require.Nil(b) @@ -409,7 +425,7 @@ func TestBlockReceiptStorage(t *testing.T) { WriteHeader(tx, header) // Sanity check that body alone without the receipt is a full purge require.NoError(WriteBody(tx, hash, 0, body)) - require.NoError(DeleteReceipts(tx, 0)) + require.NoError(TruncateReceipts(tx, 0)) b, senders, err = ReadBlockWithSenders(tx, hash, 0) require.NoError(err) require.NotNil(b) diff --git a/core/rawdb/bor_receipts.go b/core/rawdb/bor_receipts.go index b34fac9c617..ba186a0af7b 100644 --- a/core/rawdb/bor_receipts.go +++ b/core/rawdb/bor_receipts.go @@ -5,6 +5,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/log/v3" @@ -147,3 +148,13 @@ func ReadBorTransaction(db kv.Tx, hash common.Hash) (*types.Transaction, common. var tx types.Transaction = types.NewBorTransaction() return &tx, blockHash, *blockNumber, uint64(bodyForStorage.TxAmount), nil } + +// TruncateBorReceipts removes all bor receipt for given block number or newer +func TruncateBorReceipts(db kv.RwTx, number uint64) error { + if err := db.ForEach(kv.BorReceipts, dbutils.EncodeBlockNumber(number), func(k, _ []byte) error { + return db.Delete(kv.BorReceipts, k, nil) + }); err != nil { + return err + } + return nil +} diff --git a/core/skip_analysis.go b/core/skip_analysis.go index 572c9f6588c..44157992400 100644 --- a/core/skip_analysis.go +++ b/core/skip_analysis.go @@ -21,7 +21,7 @@ import ( ) // MainnetNotCheckedFrom is the first block number not yet checked for invalid jumps -const MainnetNotCheckedFrom uint64 = 14611100 +const MainnetNotCheckedFrom uint64 = 14702900 // SkipAnalysis function tells us whether we can skip performing jumpdest analysis // for the historical blocks (on mainnet now but perhaps on the testsnets diff --git a/core/state/plain_state_writer.go b/core/state/plain_state_writer.go index 68b4a059081..c8c7d3f7c81 100644 --- a/core/state/plain_state_writer.go +++ b/core/state/plain_state_writer.go @@ -42,6 +42,7 @@ func (w *PlainStateWriter) SetAccumulator(accumulator *shards.Accumulator) *Plai } func (w *PlainStateWriter) UpdateAccountData(address common.Address, original, account *accounts.Account) error { + //fmt.Printf("UpdateAccount [%x] hashed [%x]\n", address, crypto.Keccak256(address[:])) if w.csw != nil { if err := w.csw.UpdateAccountData(address, original, account); err != nil { return err @@ -93,6 +94,7 @@ func (w *PlainStateWriter) DeleteAccount(address common.Address, original *accou } func (w *PlainStateWriter) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { + //fmt.Printf("WriteAccountStorage [%x] hashed [%x],loc [%x] hashed [%x], val [%x]\n", address, crypto.Keccak256(address[:]), *key, crypto.Keccak256((*key)[:]), value.Bytes()) if w.csw != nil { if err := w.csw.WriteAccountStorage(address, incarnation, key, original, value); err != nil { return err diff --git a/core/state_processor.go b/core/state_processor.go index 0667b8dba26..044f121edc4 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -143,6 +143,10 @@ func applyTransaction(config *params.ChainConfig, gp *GasPool, statedb *state.In func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, contractHasTEVM func(contractHash common.Hash) (bool, error)) (*types.Receipt, []byte, error) { // Create a new context to be used in the EVM environment + // Add addresses to access list if applicable + // about the transaction and calling mechanisms. + cfg.SkipAnalysis = SkipAnalysis(config, header.Number.Uint64()) + var vmenv vm.VMInterface if tx.IsStarkNet() { @@ -152,9 +156,5 @@ func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Has vmenv = vm.NewEVM(blockContext, vm.TxContext{}, ibs, config, cfg) } - // Add addresses to access list if applicable - // about the transaction and calling mechanisms. - cfg.SkipAnalysis = SkipAnalysis(config, header.Number.Uint64()) - return applyTransaction(config, gp, ibs, stateWriter, header, tx, usedGas, vmenv, cfg) } diff --git a/eth/backend.go b/eth/backend.go index 7e946ce153a..3de4032cf5a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -21,7 +21,6 @@ import ( "context" "errors" "fmt" - "google.golang.org/protobuf/types/known/emptypb" "math/big" "os" "path/filepath" @@ -30,6 +29,9 @@ import ( "sync" "time" + "github.com/ledgerwatch/erigon/eth/ethconsensusconfig" + "google.golang.org/protobuf/types/known/emptypb" + "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/direct" @@ -78,7 +80,7 @@ import ( "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshotsynccli" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" stages2 "github.com/ledgerwatch/erigon/turbo/stages" "github.com/ledgerwatch/log/v3" "google.golang.org/grpc" @@ -216,44 +218,6 @@ func New(stack *node.Node, config *ethconfig.Config, txpoolCfg txpool2.Config, l } backend.gasPrice, _ = uint256.FromBig(config.Miner.GasPrice) - var consensusConfig interface{} - - if chainConfig.Clique != nil { - consensusConfig = &config.Clique - } else if chainConfig.Aura != nil { - config.Aura.Etherbase = config.Miner.Etherbase - consensusConfig = &config.Aura - } else if chainConfig.Parlia != nil { - consensusConfig = &config.Parlia - } else if chainConfig.Bor != nil { - consensusConfig = &config.Bor - } else { - consensusConfig = &config.Ethash - } - - backend.engine = ethconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir()) - - log.Info("Initialising Ethereum protocol", "network", config.NetworkID) - - if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error { - if err = stagedsync.UpdateMetrics(tx); err != nil { - return err - } - - config.Prune, err = prune.EnsureNotChanged(tx, config.Prune) - if err != nil { - return err - } - if err := snapshotsynccli.EnsureNotChanged(tx, config.Snapshot); err != nil { - return err - } - log.Info("Effective", "prune_flags", config.Prune.String(), "snapshot_flags", config.Snapshot.String()) - - return nil - }); err != nil { - return nil, err - } - if config.TxPool.Journal != "" { config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } @@ -324,10 +288,11 @@ func New(stack *node.Node, config *ethconfig.Config, txpoolCfg txpool2.Config, l if err != nil { return nil, err } - if err = downloader.CreateTorrentFilesAndAdd(ctx, config.SnapshotDir, backend.downloadProtocols.TorrentClient); err != nil { - return nil, fmt.Errorf("CreateTorrentFilesAndAdd: %w", err) + if err := backend.downloadProtocols.Start(ctx, true); err != nil { + return nil, fmt.Errorf("downloadProtocols start: %w", err) } - bittorrentServer, err := downloader.NewGrpcServer(backend.downloadProtocols.DB, backend.downloadProtocols, config.SnapshotDir, false) + + bittorrentServer, err := downloader.NewGrpcServer(backend.downloadProtocols.DB, backend.downloadProtocols, config.SnapshotDir) if err != nil { return nil, fmt.Errorf("new server: %w", err) } @@ -341,6 +306,44 @@ func New(stack *node.Node, config *ethconfig.Config, txpoolCfg txpool2.Config, l blockReader = snapshotsync.NewBlockReader() } + var consensusConfig interface{} + + if chainConfig.Clique != nil { + consensusConfig = &config.Clique + } else if chainConfig.Aura != nil { + config.Aura.Etherbase = config.Miner.Etherbase + consensusConfig = &config.Aura + } else if chainConfig.Parlia != nil { + consensusConfig = &config.Parlia + } else if chainConfig.Bor != nil { + consensusConfig = &config.Bor + } else { + consensusConfig = &config.Ethash + } + + backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots) + + log.Info("Initialising Ethereum protocol", "network", config.NetworkID) + + if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error { + if err = stagedsync.UpdateMetrics(tx); err != nil { + return err + } + + config.Prune, err = prune.EnsureNotChanged(tx, config.Prune) + if err != nil { + return err + } + if err := snap.EnsureNotChanged(tx, config.Snapshot); err != nil { + return err + } + log.Info("Effective", "prune_flags", config.Prune.String(), "snapshot_flags", config.Snapshot.String()) + + return nil + }); err != nil { + return nil, err + } + backend.sentryControlServer, err = sentry.NewControlServer(chainKv, stack.Config().NodeName(), chainConfig, genesis.Hash(), backend.engine, backend.config.NetworkID, backend.sentries, config.BlockDownloaderWindow, blockReader) if err != nil { return nil, err diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 7885e9e9da3..2bd72e1fbb2 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -27,25 +27,15 @@ import ( "time" "github.com/c2h5oh/datasize" - "github.com/davecgh/go-spew/spew" "github.com/ledgerwatch/erigon-lib/common/dir" "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/consensus/aura" - "github.com/ledgerwatch/erigon/consensus/aura/consensusconfig" - "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/consensus/clique" - "github.com/ledgerwatch/erigon/consensus/db" "github.com/ledgerwatch/erigon/consensus/ethash" - "github.com/ledgerwatch/erigon/consensus/parlia" - "github.com/ledgerwatch/erigon/consensus/serenity" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/eth/gasprice" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/params/networkname" - "github.com/ledgerwatch/log/v3" ) // FullNodeGPO contains default gasprice oracle settings for full node. @@ -228,65 +218,6 @@ type Config struct { Ethstats string } -func CreateConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, config interface{}, notify []string, noverify bool, HeimdallURL string, WithoutHeimdall bool, datadir string) consensus.Engine { - var eng consensus.Engine - - switch consensusCfg := config.(type) { - case *ethash.Config: - switch consensusCfg.PowMode { - case ethash.ModeFake: - log.Warn("Ethash used in fake mode") - eng = ethash.NewFaker() - case ethash.ModeTest: - log.Warn("Ethash used in test mode") - eng = ethash.NewTester(nil, noverify) - case ethash.ModeShared: - log.Warn("Ethash used in shared mode") - eng = ethash.NewShared() - default: - eng = ethash.New(ethash.Config{ - CachesInMem: consensusCfg.CachesInMem, - CachesLockMmap: consensusCfg.CachesLockMmap, - DatasetDir: consensusCfg.DatasetDir, - DatasetsInMem: consensusCfg.DatasetsInMem, - DatasetsOnDisk: consensusCfg.DatasetsOnDisk, - DatasetsLockMmap: consensusCfg.DatasetsLockMmap, - }, notify, noverify) - } - case *params.ConsensusSnapshotConfig: - if chainConfig.Clique != nil { - eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory)) - } - case *params.AuRaConfig: - if chainConfig.Aura != nil { - var err error - eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory), chainConfig.Aura.Etherbase, consensusconfig.GetConfigByChain(chainConfig.ChainName)) - if err != nil { - panic(err) - } - } - case *params.ParliaConfig: - if chainConfig.Parlia != nil { - eng = parlia.New(chainConfig, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory)) - } - case *params.BorConfig: - if chainConfig.Bor != nil { - borDbPath := filepath.Join(datadir, "bor") // bor consensus path: datadir/bor - eng = bor.New(chainConfig, db.OpenDatabase(borDbPath, logger, false), HeimdallURL, WithoutHeimdall) - } - } - - if eng == nil { - panic("unknown config" + spew.Sdump(config)) - } - - if chainConfig.TerminalTotalDifficulty == nil { - return eng - } else { - return serenity.New(eng) // the Merge - } -} - type SyncMode string const ( diff --git a/eth/ethconsensusconfig/config.go b/eth/ethconsensusconfig/config.go new file mode 100644 index 00000000000..fadd2ab887a --- /dev/null +++ b/eth/ethconsensusconfig/config.go @@ -0,0 +1,78 @@ +package ethconsensusconfig + +import ( + "path/filepath" + + "github.com/davecgh/go-spew/spew" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/aura" + "github.com/ledgerwatch/erigon/consensus/aura/consensusconfig" + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/consensus/clique" + "github.com/ledgerwatch/erigon/consensus/db" + "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/consensus/parlia" + "github.com/ledgerwatch/erigon/consensus/serenity" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/log/v3" +) + +func CreateConsensusEngine(chainConfig *params.ChainConfig, logger log.Logger, config interface{}, notify []string, noverify bool, HeimdallURL string, WithoutHeimdall bool, datadir string, snapshots *snapshotsync.RoSnapshots) consensus.Engine { + var eng consensus.Engine + + switch consensusCfg := config.(type) { + case *ethash.Config: + switch consensusCfg.PowMode { + case ethash.ModeFake: + log.Warn("Ethash used in fake mode") + eng = ethash.NewFaker() + case ethash.ModeTest: + log.Warn("Ethash used in test mode") + eng = ethash.NewTester(nil, noverify) + case ethash.ModeShared: + log.Warn("Ethash used in shared mode") + eng = ethash.NewShared() + default: + eng = ethash.New(ethash.Config{ + CachesInMem: consensusCfg.CachesInMem, + CachesLockMmap: consensusCfg.CachesLockMmap, + DatasetDir: consensusCfg.DatasetDir, + DatasetsInMem: consensusCfg.DatasetsInMem, + DatasetsOnDisk: consensusCfg.DatasetsOnDisk, + DatasetsLockMmap: consensusCfg.DatasetsLockMmap, + }, notify, noverify) + } + case *params.ConsensusSnapshotConfig: + if chainConfig.Clique != nil { + eng = clique.New(chainConfig, consensusCfg, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory)) + } + case *params.AuRaConfig: + if chainConfig.Aura != nil { + var err error + eng, err = aura.NewAuRa(chainConfig.Aura, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory), chainConfig.Aura.Etherbase, consensusconfig.GetConfigByChain(chainConfig.ChainName)) + if err != nil { + panic(err) + } + } + case *params.ParliaConfig: + if chainConfig.Parlia != nil { + eng = parlia.New(chainConfig, db.OpenDatabase(consensusCfg.DBPath, logger, consensusCfg.InMemory), snapshots) + } + case *params.BorConfig: + if chainConfig.Bor != nil { + borDbPath := filepath.Join(datadir, "bor") // bor consensus path: datadir/bor + eng = bor.New(chainConfig, db.OpenDatabase(borDbPath, logger, false), HeimdallURL, WithoutHeimdall) + } + } + + if eng == nil { + panic("unknown config" + spew.Sdump(config)) + } + + if chainConfig.TerminalTotalDifficulty == nil { + return eng + } else { + return serenity.New(eng) // the Merge + } +} diff --git a/eth/filters/filter.go b/eth/filters/filter.go index cd3c382b9bf..df78dffeb29 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -297,7 +297,7 @@ Logs: } // If the to filtered topics is greater than the amount of topics in logs, skip. if len(topics) > len(log.Topics) { - continue Logs + continue } for i, sub := range topics { match := len(sub) == 0 // empty rule set == wildcard diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index bb7c94e340e..5224e3c80c5 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -110,7 +110,7 @@ func (s *PruneState) DoneAt(db kv.Putter, blockNum uint64) error { } // PruneTable has `limit` parameter to avoid too large data deletes per one sync cycle - better delete by small portions to reduce db.FreeList size -func PruneTable(tx kv.RwTx, table string, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context, limit int) error { +func PruneTable(tx kv.RwTx, table string, pruneTo uint64, ctx context.Context, limit int) error { c, err := tx.RwCursor(table) if err != nil { @@ -133,8 +133,6 @@ func PruneTable(tx kv.RwTx, table string, logPrefix string, pruneTo uint64, logE break } select { - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s]", logPrefix), "table", table, "block", blockNum) case <-ctx.Done(): return libcommon.ErrStopped default: diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 1a244ca1ef3..4de8724e1cf 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -10,7 +10,6 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/interfaces" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/params" @@ -258,10 +257,11 @@ func logProgressBodies(logPrefix string, committed uint64, prevDeliveredCount, d runtime.ReadMemStats(&m) log.Info(fmt.Sprintf("[%s] Wrote block bodies", logPrefix), "block_num", committed, - "delivery/sec", common.StorageSize(speed), - "wasted/sec", common.StorageSize(wastedSpeed), - "alloc", common.StorageSize(m.Alloc), - "sys", common.StorageSize(m.Sys)) + "delivery/sec", libcommon.ByteCount(uint64(speed)), + "wasted/sec", libcommon.ByteCount(uint64(wastedSpeed)), + "alloc", libcommon.ByteCount(m.Alloc), + "sys", libcommon.ByteCount(m.Sys), + ) } func UnwindBodiesStage(u *UnwindState, tx kv.RwTx, cfg BodiesCfg, ctx context.Context) (err error) { diff --git a/eth/stagedsync/stage_call_traces.go b/eth/stagedsync/stage_call_traces.go index 47a2dc8ff34..163327e37da 100644 --- a/eth/stagedsync/stage_call_traces.go +++ b/eth/stagedsync/stage_call_traces.go @@ -14,10 +14,11 @@ import ( "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/log/v3" ) @@ -141,8 +142,8 @@ func promoteCallTraces(logPrefix string, tx kv.RwTx, startBlock, endBlock uint64 log.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockNum, "blk/second", speed, - "alloc", common.StorageSize(m.Alloc), - "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), + "sys", libcommon.ByteCount(m.Sys)) case <-checkFlushEvery.C: if needFlush64(froms, bufLimit) { if err := flushBitmaps64(collectorFrom, froms); err != nil { @@ -168,6 +169,39 @@ func promoteCallTraces(logPrefix string, tx kv.RwTx, startBlock, endBlock uint64 return err } + // Clean up before loading call traces to reclaim space + var prunedMin uint64 = math.MaxUint64 + var prunedMax uint64 = 0 + for k, _, err = traceCursor.First(); k != nil; k, _, err = traceCursor.NextNoDup() { + if err != nil { + return err + } + blockNum := binary.BigEndian.Uint64(k) + if blockNum+params.FullImmutabilityThreshold >= endBlock { + break + } + select { + default: + case <-logEvery.C: + var m runtime.MemStats + runtime.ReadMemStats(&m) + log.Info(fmt.Sprintf("[%s] Pruning call trace intermediate table", logPrefix), "number", blockNum, + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) + } + if err = traceCursor.DeleteCurrentDuplicates(); err != nil { + return fmt.Errorf("remove trace call set for block %d: %w", blockNum, err) + } + if blockNum < prunedMin { + prunedMin = blockNum + } + if blockNum > prunedMax { + prunedMax = blockNum + } + } + if prunedMax != 0 && prunedMax > prunedMin+16 { + log.Info(fmt.Sprintf("[%s] Pruned call trace intermediate table", logPrefix), "from", prunedMin, "to", prunedMax) + } + if err := finaliseCallTraces(collectorFrom, collectorTo, logPrefix, tx, quit); err != nil { return err } @@ -305,8 +339,8 @@ func DoUnwindCallTraces(logPrefix string, db kv.RwTx, from, to uint64, ctx conte log.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockNum, "blk/second", speed, - "alloc", common.StorageSize(m.Alloc), - "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), + "sys", libcommon.ByteCount(m.Sys)) case <-ctx.Done(): return libcommon.ErrStopped default: @@ -399,7 +433,7 @@ func pruneCallTraces(tx kv.RwTx, logPrefix string, pruneTo uint64, ctx context.C case <-logEvery.C: var m runtime.MemStats runtime.ReadMemStats(&m) - log.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockNum, "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) + log.Info(fmt.Sprintf("[%s] Progress", logPrefix), "number", blockNum, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) case <-ctx.Done(): return libcommon.ErrStopped default: diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index c1a4b3fa41a..4f166536c1e 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -368,18 +368,18 @@ func logProgress(logPrefix string, prevBlock uint64, prevTime time.Time, current runtime.ReadMemStats(&m) var logpairs = []interface{}{ "number", currentBlock, - "blk/s", speed, - "tx/s", speedTx, - "Mgas/s", speedMgas, - "gasState", gasState, + "blk/s", fmt.Sprintf("%.1f", speed), + "tx/s", fmt.Sprintf("%.1f", speedTx), + "Mgas/s", fmt.Sprintf("%.1f", speedMgas), + "gasState", fmt.Sprintf("%.2f", gasState), } if estimatedTime > 0 { logpairs = append(logpairs, "estimated duration", estimatedTime) } if batch != nil { - logpairs = append(logpairs, "batch", common.StorageSize(batch.BatchSize())) + logpairs = append(logpairs, "batch", libcommon.ByteCount(uint64(batch.BatchSize()))) } - logpairs = append(logpairs, "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys)) + logpairs = append(logpairs, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) log.Info(fmt.Sprintf("[%s] Executed blocks", logPrefix), logpairs...) return currentBlock, currentTx, currentTime @@ -519,11 +519,14 @@ func unwindExecutionStage(u *UnwindState, s *StageState, tx kv.RwTx, quit <-chan return err } - if err := rawdb.DeleteNewerReceipts(tx, u.UnwindPoint+1); err != nil { - return fmt.Errorf("walking receipts: %w", err) + if err := rawdb.TruncateReceipts(tx, u.UnwindPoint+1); err != nil { + return fmt.Errorf("truncate receipts: %w", err) + } + if err := rawdb.TruncateBorReceipts(tx, u.UnwindPoint+1); err != nil { + return fmt.Errorf("truncate bor receipts: %w", err) } if err := rawdb.DeleteNewerEpochs(tx, u.UnwindPoint+1); err != nil { - return fmt.Errorf("walking epoch: %w", err) + return fmt.Errorf("delete newer epochs: %w", err) } // Truncate CallTraceSet @@ -587,11 +590,11 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con } if cfg.prune.Receipts.Enabled() { - if err = PruneTable(tx, kv.Receipts, logPrefix, cfg.prune.Receipts.PruneTo(s.ForwardProgress), logEvery, ctx, math.MaxInt32); err != nil { + if err = PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { return err } // LogIndex.Prune will read everything what not pruned here - if err = PruneTable(tx, kv.Log, logPrefix, cfg.prune.Receipts.PruneTo(s.ForwardProgress), logEvery, ctx, math.MaxInt32); err != nil { + if err = PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { return err } } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index dc61302b983..3a619a9e9ea 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -23,7 +23,6 @@ import ( "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" @@ -139,72 +138,6 @@ func SpawnStageHeaders( } } -func finishHandlingForkChoice( - forkChoice *engineapi.ForkChoiceMessage, - headHeight uint64, - s *StageState, - tx kv.RwTx, - cfg HeadersCfg, - useExternalTx bool, -) error { - log.Info(fmt.Sprintf("[%s] Unsettled forkchoice after unwind", s.LogPrefix()), "height", headHeight, "forkchoice", forkChoice) - - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() - - if err := fixCanonicalChain(s.LogPrefix(), logEvery, headHeight, forkChoice.HeadBlockHash, tx, cfg.blockReader); err != nil { - return err - } - - if err := rawdb.WriteHeadHeaderHash(tx, forkChoice.HeadBlockHash); err != nil { - return err - } - - sendErrResponse := cfg.hd.GetPendingPayloadStatus() != (common.Hash{}) - - safeIsCanonical, err := rawdb.IsCanonicalHash(tx, forkChoice.SafeBlockHash) - if err != nil { - return err - } - if !safeIsCanonical { - log.Warn(fmt.Sprintf("[%s] Non-canonical SafeBlockHash", s.LogPrefix()), "forkChoice", forkChoice) - if sendErrResponse { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - CriticalError: errors.New("safe block is not an ancestor of head block"), - } - cfg.hd.ClearPendingPayloadStatus() - sendErrResponse = false - } - } - - finalizedIsCanonical, err := rawdb.IsCanonicalHash(tx, forkChoice.FinalizedBlockHash) - if err != nil { - return err - } - if !finalizedIsCanonical { - log.Warn(fmt.Sprintf("[%s] Non-canonical FinalizedBlockHash", s.LogPrefix()), "forkChoice", forkChoice) - if sendErrResponse { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - CriticalError: errors.New("finalized block is not an ancestor of head block"), - } - cfg.hd.ClearPendingPayloadStatus() - } - } - - if err := s.Update(tx, headHeight); err != nil { - return err - } - - if !useExternalTx { - if err := tx.Commit(); err != nil { - return err - } - } - - cfg.hd.ClearUnsettledForkChoice() - return nil -} - // HeadersPOS processes Proof-of-Stake requests (newPayload, forkchoiceUpdated) func HeadersPOS( s *StageState, @@ -263,8 +196,46 @@ func HeadersPOS( return nil } +func safeAndFinalizedBlocksAreCanonical( + forkChoice *engineapi.ForkChoiceMessage, + s *StageState, + tx kv.RwTx, + cfg HeadersCfg, + sendErrResponse bool, +) (bool, error) { + safeIsCanonical, err := rawdb.IsCanonicalHash(tx, forkChoice.SafeBlockHash) + if err != nil { + return false, err + } + if !safeIsCanonical { + log.Warn(fmt.Sprintf("[%s] Non-canonical SafeBlockHash", s.LogPrefix()), "forkChoice", forkChoice) + if sendErrResponse { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + CriticalError: errors.New("safe block is not an ancestor of head block"), + } + } + return false, nil + } + + finalizedIsCanonical, err := rawdb.IsCanonicalHash(tx, forkChoice.FinalizedBlockHash) + if err != nil { + return false, err + } + if !finalizedIsCanonical { + log.Warn(fmt.Sprintf("[%s] Non-canonical FinalizedBlockHash", s.LogPrefix()), "forkChoice", forkChoice) + if sendErrResponse { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + CriticalError: errors.New("finalized block is not an ancestor of head block"), + } + } + return false, nil + } + + return true, nil +} + func startHandlingForkChoice( - forkChoiceMessage *engineapi.ForkChoiceMessage, + forkChoice *engineapi.ForkChoiceMessage, requestStatus engineapi.RequestStatus, requestId int, s *StageState, @@ -274,14 +245,22 @@ func startHandlingForkChoice( cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, ) error { - headerHash := forkChoiceMessage.HeadBlockHash + headerHash := forkChoice.HeadBlockHash log.Info(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash == headerHash { // no-op log.Info(fmt.Sprintf("[%s] Fork choice no-op", s.LogPrefix())) cfg.hd.BeaconRequestList.Remove(requestId) - if requestStatus == engineapi.New { + canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, requestStatus == engineapi.New) + if err != nil { + log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) + if requestStatus == engineapi.New { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + } + return err + } + if canonical && requestStatus == engineapi.New { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ Status: remote.EngineStatus_VALID, LatestValidHash: currentHeadHash, @@ -356,11 +335,55 @@ func startHandlingForkChoice( u.UnwindTo(forkingPoint, common.Hash{}) log.Trace(fmt.Sprintf("[%s] Fork choice unwind finished", s.LogPrefix())) - cfg.hd.SetUnsettledForkChoice(forkChoiceMessage, headerNumber) + cfg.hd.SetUnsettledForkChoice(forkChoice, headerNumber) return nil } +func finishHandlingForkChoice( + forkChoice *engineapi.ForkChoiceMessage, + headHeight uint64, + s *StageState, + tx kv.RwTx, + cfg HeadersCfg, + useExternalTx bool, +) error { + log.Info(fmt.Sprintf("[%s] Unsettled forkchoice after unwind", s.LogPrefix()), "height", headHeight, "forkchoice", forkChoice) + + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + + if err := fixCanonicalChain(s.LogPrefix(), logEvery, headHeight, forkChoice.HeadBlockHash, tx, cfg.blockReader); err != nil { + return err + } + + if err := rawdb.WriteHeadHeaderHash(tx, forkChoice.HeadBlockHash); err != nil { + return err + } + + sendErrResponse := cfg.hd.GetPendingPayloadStatus() != (common.Hash{}) + canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, sendErrResponse) + if err != nil { + return err + } + if !canonical { + cfg.hd.ClearPendingPayloadStatus() + } + + if err := s.Update(tx, headHeight); err != nil { + return err + } + + if !useExternalTx { + if err := tx.Commit(); err != nil { + return err + } + } + + cfg.hd.ClearUnsettledForkChoice() + return nil +} + func handleNewPayload( payloadMessage *engineapi.PayloadMessage, requestStatus engineapi.RequestStatus, @@ -847,11 +870,6 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx kv.RwTx, cfg HeadersCfg, te defer tx.Rollback() } // Delete canonical hashes that are being unwound - var headerProgress uint64 - headerProgress, err = stages.GetStageProgress(tx, stages.Headers) - if err != nil { - return err - } badBlock := u.BadBlock != (common.Hash{}) if badBlock { cfg.hd.ReportBadHeader(u.BadBlock) @@ -875,10 +893,8 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx kv.RwTx, cfg HeadersCfg, te return fmt.Errorf("iterate over headers to mark bad headers: %w", err) } } - for blockHeight := headerProgress; blockHeight > u.UnwindPoint; blockHeight-- { - if err = rawdb.DeleteCanonicalHash(tx, blockHeight); err != nil { - return err - } + if err := rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1); err != nil { + return err } if badBlock { var maxTd big.Int @@ -951,8 +967,8 @@ func logProgressHeaders(logPrefix string, prev, now uint64) uint64 { log.Info(fmt.Sprintf("[%s] Wrote block headers", logPrefix), "number", now, "blk/second", speed, - "alloc", common.StorageSize(m.Alloc), - "sys", common.StorageSize(m.Sys)) + "alloc", libcommon.ByteCount(m.Alloc), + "sys", libcommon.ByteCount(m.Sys)) return now } @@ -1086,8 +1102,8 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if workers < 1 { workers = 1 } - if workers > 4 { - workers = 4 + if workers > 2 { + workers = 2 // 4 workers get killed on 16Gb RAM } if err := snapshotsync.BuildIndices(ctx, cfg.snapshots, cfg.snapshotDir, *chainID, cfg.tmpdir, cfg.snapshots.IndicesAvailable(), workers, log.LvlInfo); err != nil { return err @@ -1100,14 +1116,10 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R } } - if s.BlockNumber < 2 { // allow genesis + if s.BlockNumber < cfg.snapshots.BlocksAvailable() { // allow genesis logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - //tx.ClearBucket(kv.HeaderCanonical) - //tx.ClearBucket(kv.HeaderTD) - //tx.ClearBucket(kv.HeaderNumber) - // fill some small tables from snapshots, in future we may store this data in snapshots also, but // for now easier just store them in db td := big.NewInt(0) @@ -1149,10 +1161,16 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if !ok { return fmt.Errorf("snapshot not found for block: %d", cfg.snapshots.BlocksAvailable()) } - } - - // Add last headers from snapshots to HeaderDownloader (as persistent links) - if s.BlockNumber < cfg.snapshots.BlocksAvailable() { + if err := s.Update(tx, cfg.snapshots.BlocksAvailable()); err != nil { + return err + } + canonicalHash, err := cfg.blockReader.CanonicalHash(ctx, tx, cfg.snapshots.BlocksAvailable()) + if err != nil { + return err + } + if err = rawdb.WriteHeadHeaderHash(tx, canonicalHash); err != nil { + return err + } if err := cfg.hd.AddHeaderFromSnapshot(tx, cfg.snapshots.BlocksAvailable(), cfg.blockReader); err != nil { return err } @@ -1174,12 +1192,10 @@ func WaitForDownloader(ctx context.Context, tx kv.RwTx, cfg HeadersCfg) error { // send all hashes to the Downloader service preverified := snapshotsCfg.Preverified - var prevBytesCompleted uint64 - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() + req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, len(preverified))} + i := 0 for _, p := range preverified { - req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 1)} - req.Items[0] = &proto_downloader.DownloadItem{ + req.Items[i] = &proto_downloader.DownloadItem{ TorrentHash: downloadergrpc.String2Proto(p.Hash), Path: p.Name, } @@ -1196,44 +1212,41 @@ func WaitForDownloader(ctx context.Context, tx kv.RwTx, cfg HeadersCfg) error { } break } - - if reply, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err != nil { - log.Warn("Error while waiting for snapshots progress", "err", err) - } else if reply.Completed { + if _, err := cfg.snapshotDownloader.Download(ctx, req); err != nil { + log.Error("[Snapshots] call downloader", "err", err) + time.Sleep(10 * time.Second) continue } + break + } + logEvery := time.NewTicker(logInterval / 3) + defer logEvery.Stop() - // Print download progress until all segments are available - Loop: - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-checkStatsEvery.C: - if reply, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err != nil { - log.Warn("Error while waiting for snapshots progress", "err", err) - } else if reply.Completed { - break Loop - } - case <-logEvery.C: - if reply, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err != nil { - log.Warn("Error while waiting for snapshots progress", "err", err) - } else if reply.Completed { - break Loop - } else { - readBytesPerSec := (reply.BytesCompleted - prevBytesCompleted) / uint64(logInterval.Seconds()) - // writeBytesPerSec += (reply.BytesWritten - prevBytesWritten) / int64(logInterval.Seconds()) - - //readiness := 100 * (float64(reply.BytesCompleted) / float64(reply.BytesTotal)) - log.Info("[Snapshots] download", //"progress", fmt.Sprintf("%.2f%%", readiness), - "progress", libcommon.ByteCount(reply.BytesCompleted), - "download", libcommon.ByteCount(readBytesPerSec)+"/s", - "torrent_peers", reply.Peers, - "connections", reply.Connections, - // "upload", libcommon.ByteCount(writeBytesPerSec)+"/s", - ) - prevBytesCompleted = reply.BytesCompleted + // Print download progress until all segments are available +Loop: + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-logEvery.C: + if stats, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err != nil { + log.Warn("Error while waiting for snapshots progress", "err", err) + } else if stats.Completed { + break Loop + } else { + if stats.MetadataReady < stats.FilesTotal { + log.Info(fmt.Sprintf("[Snapshots] Waiting for torrents metadata: %d/%d", stats.MetadataReady, stats.FilesTotal)) + continue } + + log.Info("[Snapshots] download", + "progress", fmt.Sprintf("%.2f%% %s/%s", stats.Progress, libcommon.ByteCount(stats.BytesCompleted), libcommon.ByteCount(stats.BytesTotal)), + "download", libcommon.ByteCount(stats.DownloadRate)+"/s", + "upload", libcommon.ByteCount(stats.UploadRate)+"/s", + "peers", stats.PeersUnique, + "connections", stats.ConnectionsTotal, + "files", stats.FilesTotal, + ) } } } diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index ee605bfc4ea..92d464b8a1a 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -124,7 +124,8 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c current.Receipts = types.Receipts{} } - _, err := core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, &cfg.chainConfig, ibs, current.Receipts, epochReader{tx: tx}, chainReader{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, true) + _, err := core.FinalizeBlockExecution(cfg.engine, stateReader, current.Header, current.Txs, current.Uncles, stateWriter, + &cfg.chainConfig, ibs, current.Receipts, epochReader{tx: tx}, chainReader{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, true) if err != nil { return err } diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index a9b3f021ecc..a0fe3604bab 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -373,12 +373,23 @@ func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Co defer tx.Rollback() } + // With snapsync - can prune old data only after snapshot for this data created: CanDeleteTo() if cfg.blockRetire.Snapshots() != nil && cfg.blockRetire.Snapshots().Cfg().Enabled { - if err := retireBlocks(s, tx, cfg, ctx); err != nil { - return fmt.Errorf("retireBlocks: %w", err) + if !cfg.blockRetire.Snapshots().Cfg().KeepBlocks { + canDeleteTo := snapshotsync.CanDeleteTo(s.ForwardProgress, cfg.blockRetire.Snapshots()) + if err := rawdb.DeleteAncientBlocks(tx, canDeleteTo, 1_000); err != nil { + return nil + } + if err = PruneTable(tx, kv.Senders, canDeleteTo, ctx, 1_000); err != nil { + return err + } + } + + if err := retireBlocksInSingleBackgroundThread(s, cfg, ctx); err != nil { + return fmt.Errorf("retireBlocksInSingleBackgroundThread: %w", err) } } else if cfg.prune.TxIndex.Enabled() { - if err = PruneTable(tx, kv.Senders, s.LogPrefix(), to, logEvery, ctx, 1_000); err != nil { + if err = PruneTable(tx, kv.Senders, to, ctx, 1_000); err != nil { return err } } @@ -391,15 +402,7 @@ func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Co return nil } -func retireBlocks(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Context) (err error) { - // delete portion of old blocks in any case - if !cfg.blockRetire.Snapshots().Cfg().KeepBlocks { - canDeleteTo := snapshotsync.CanDeleteTo(s.ForwardProgress, cfg.blockRetire.Snapshots()) - if err := rawdb.DeleteAncientBlocks(tx, canDeleteTo, 1_000); err != nil { - return nil - } - } - +func retireBlocksInSingleBackgroundThread(s *PruneState, cfg SendersCfg, ctx context.Context) (err error) { // if something already happens in background - noop if cfg.blockRetire.Working() { return nil diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index f463ef13df6..3ea16cf4f36 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -6,6 +6,7 @@ import ( "os" "time" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/debug" @@ -300,7 +301,7 @@ func printLogs(tx kv.RwTx, timings []Timing) error { if err1 != nil { return err1 } - bucketSizes = append(bucketSizes, bucket, common.StorageSize(sz)) + bucketSizes = append(bucketSizes, bucket, libcommon.ByteCount(sz)) } log.Info("Tables", bucketSizes...) } diff --git a/ethdb/privateapi/txpool.go b/ethdb/privateapi/txpool.go index 8f00ae99c01..6a61b27b2d5 100644 --- a/ethdb/privateapi/txpool.go +++ b/ethdb/privateapi/txpool.go @@ -46,31 +46,29 @@ func (s *TxPoolServer) All(context.Context, *proto_txpool.AllRequest) (*proto_tx reply := &proto_txpool.AllReply{} reply.Txs = make([]*proto_txpool.AllReply_Tx, 0, 32) for addr, list := range pending { - addrBytes := addr.Bytes() for i := range list { b, err := rlp.EncodeToBytes(list[i]) if err != nil { return nil, err } reply.Txs = append(reply.Txs, &proto_txpool.AllReply_Tx{ - Sender: addrBytes, - Type: proto_txpool.AllReply_PENDING, - RlpTx: b, + Sender: gointerfaces.ConvertAddressToH160(addr), + TxnType: proto_txpool.AllReply_PENDING, + RlpTx: b, }) } } for addr, list := range queued { - addrBytes := addr.Bytes() for i := range list { b, err := rlp.EncodeToBytes(list[i]) if err != nil { return nil, err } reply.Txs = append(reply.Txs, &proto_txpool.AllReply_Tx{ - Sender: addrBytes, - Type: proto_txpool.AllReply_QUEUED, - RlpTx: b, + Sender: gointerfaces.ConvertAddressToH160(addr), + TxnType: proto_txpool.AllReply_QUEUED, + RlpTx: b, }) } } diff --git a/go.mod b/go.mod index a6d4177baee..cc4e2aeaba6 100644 --- a/go.mod +++ b/go.mod @@ -35,14 +35,14 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220426111915-6745c226947e + github.com/ledgerwatch/erigon-lib v0.0.0-20220503202246-be98ee4a0a53 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/pelletier/go-toml v1.9.5 github.com/pelletier/go-toml/v2 v2.0.0-beta.8 github.com/quasilyte/go-ruleguard/dsl v0.3.19 github.com/rs/cors v1.8.2 - github.com/shirou/gopsutil/v3 v3.22.2 + github.com/shirou/gopsutil/v3 v3.22.3 github.com/spf13/cobra v1.4.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.1 @@ -61,12 +61,12 @@ require ( golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 golang.org/x/time v0.0.0-20220411224347-583f2d630306 - google.golang.org/grpc v1.45.0 + google.golang.org/grpc v1.46.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 google.golang.org/protobuf v1.28.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 - modernc.org/sqlite v1.14.2-0.20211125151325-d4ed92c0a70f + modernc.org/sqlite v1.17.0 pgregory.net/rapid v0.4.7 ) @@ -150,26 +150,26 @@ require ( github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tidwall/btree v0.7.2-0.20211211132910-4215444137fc // indirect - github.com/tklauser/go-sysconf v0.3.9 // indirect - github.com/tklauser/numcpus v0.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/tklauser/numcpus v0.4.0 // indirect github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect go.etcd.io/bbolt v1.3.6 // indirect golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect + golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.10 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect lukechampine.com/uint128 v1.1.1 // indirect - modernc.org/cc/v3 v3.35.18 // indirect - modernc.org/ccgo/v3 v3.12.73 // indirect - modernc.org/libc v1.11.82 // indirect + modernc.org/cc/v3 v3.35.26 // indirect + modernc.org/ccgo/v3 v3.16.2 // indirect + modernc.org/libc v1.15.0 // indirect modernc.org/mathutil v1.4.1 // indirect - modernc.org/memory v1.0.5 // indirect + modernc.org/memory v1.0.7 // indirect modernc.org/opt v0.1.1 // indirect modernc.org/strutil v1.1.1 // indirect modernc.org/token v1.0.0 // indirect diff --git a/go.sum b/go.sum index 3d4bcea1583..99f2fab4c33 100644 --- a/go.sum +++ b/go.sum @@ -174,8 +174,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= @@ -217,7 +217,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f h1:Y/gg/utVetS+WS6htAKCTDralkm/8hLIIUAtLFdbdQ8= @@ -454,8 +454,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220426111915-6745c226947e h1:ikt1sQuQBb5GFXfbUVLoPuf/IhUZk9WbPDpecuidTNk= -github.com/ledgerwatch/erigon-lib v0.0.0-20220426111915-6745c226947e/go.mod h1:0VKhW10UjEr7I6DaV+N0KNbXvMygC99qmRl2CfaN9gw= +github.com/ledgerwatch/erigon-lib v0.0.0-20220503202246-be98ee4a0a53 h1:9lOxYT4CPvefJe73NOxhWqHCWW/he+bNM3Yj01CThYU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220503202246-be98ee4a0a53/go.mod h1:Z6hOzGMPdbzDcCs+EV5CEl/a6zOpgXqXL0K5956iXUc= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -476,8 +476,8 @@ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Ky github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= -github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.12 h1:TJ1bhYJPV44phC+IMu1u2K/i5RriLTPe+yc68XDJ1Z0= +github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= @@ -656,8 +656,8 @@ github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5P github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shirou/gopsutil/v3 v3.22.2 h1:wCrArWFkHYIdDxx/FSfF5RB4dpJYW6t7rcp3+zL8uks= -github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shirou/gopsutil/v3 v3.22.3 h1:UebRzEomgMpv61e3hgD1tGooqX5trFbdU/ehphbHd00= +github.com/shirou/gopsutil/v3 v3.22.3/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -704,10 +704,10 @@ github.com/tidwall/btree v0.7.2-0.20211211132910-4215444137fc/go.mod h1:LGm8L/DZ github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= -github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev3vTo= -github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= -github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ= -github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/torquem-ch/mdbx-go v0.23.2 h1:7axXl0leix2v8No+mRzeTV32hJrV1817aKhh+hTEpC8= github.com/torquem-ch/mdbx-go v0.23.2/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= @@ -859,8 +859,9 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -929,19 +930,19 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210326220804-49726bf1d181/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210902050250-f475640dd07b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1095,8 +1096,8 @@ google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -1164,8 +1165,13 @@ modernc.org/cc/v3 v3.35.10/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g modernc.org/cc/v3 v3.35.15/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= modernc.org/cc/v3 v3.35.16/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= modernc.org/cc/v3 v3.35.17/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= -modernc.org/cc/v3 v3.35.18 h1:rMZhRcWrba0y3nVmdiQ7kxAgOOSq2m2f2VzjHLgEs6U= modernc.org/cc/v3 v3.35.18/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.20/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.22/go.mod h1:iPJg1pkwXqAV16SNgFBVYmggfMg6xhs+2oiO0vclK3g= +modernc.org/cc/v3 v3.35.24/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.35.25/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.35.26 h1:S4B+fg6/9krLtfZ9lr7pfKiESopiv+Sm6lUUI3oc0fY= +modernc.org/cc/v3 v3.35.26/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= modernc.org/ccgo/v3 v3.9.5/go.mod h1:umuo2EP2oDSBnD3ckjaVUXMrmeAw8C8OSICVa0iFf60= modernc.org/ccgo/v3 v3.10.0/go.mod h1:c0yBmkRFi7uW4J7fwx/JiijwOjeAeR2NoSaRVFPmjMw= modernc.org/ccgo/v3 v3.11.0/go.mod h1:dGNposbDp9TOZ/1KBxghxtUp/bzErD0/0QW4hhSaBMI= @@ -1195,11 +1201,30 @@ modernc.org/ccgo/v3 v3.12.55/go.mod h1:rsXiIyJi9psOwiBkplOaHye5L4MOOaCjHg1Fxkj7I modernc.org/ccgo/v3 v3.12.56/go.mod h1:ljeFks3faDseCkr60JMpeDb2GSO3TKAmrzm7q9YOcMU= modernc.org/ccgo/v3 v3.12.57/go.mod h1:hNSF4DNVgBl8wYHpMvPqQWDQx8luqxDnNGCMM4NFNMc= modernc.org/ccgo/v3 v3.12.60/go.mod h1:k/Nn0zdO1xHVWjPYVshDeWKqbRWIfif5dtsIOCUVMqM= -modernc.org/ccgo/v3 v3.12.65/go.mod h1:D6hQtKxPNZiY6wDBtehSGKFKmyXn53F8nGTpH+POmS4= modernc.org/ccgo/v3 v3.12.66/go.mod h1:jUuxlCFZTUZLMV08s7B1ekHX5+LIAurKTTaugUr/EhQ= modernc.org/ccgo/v3 v3.12.67/go.mod h1:Bll3KwKvGROizP2Xj17GEGOTrlvB1XcVaBrC90ORO84= -modernc.org/ccgo/v3 v3.12.73 h1:AMk4wEpzWjpODXohKvvnlwLob4Xk8tq3we6CwYh88mA= modernc.org/ccgo/v3 v3.12.73/go.mod h1:hngkB+nUUqzOf3iqsM48Gf1FZhY599qzVg1iX+BT3cQ= +modernc.org/ccgo/v3 v3.12.81/go.mod h1:p2A1duHoBBg1mFtYvnhAnQyI6vL0uw5PGYLSIgF6rYY= +modernc.org/ccgo/v3 v3.12.84/go.mod h1:ApbflUfa5BKadjHynCficldU1ghjen84tuM5jRynB7w= +modernc.org/ccgo/v3 v3.12.86/go.mod h1:dN7S26DLTgVSni1PVA3KxxHTcykyDurf3OgUzNqTSrU= +modernc.org/ccgo/v3 v3.12.90/go.mod h1:obhSc3CdivCRpYZmrvO88TXlW0NvoSVvdh/ccRjJYko= +modernc.org/ccgo/v3 v3.12.92/go.mod h1:5yDdN7ti9KWPi5bRVWPl8UNhpEAtCjuEE7ayQnzzqHA= +modernc.org/ccgo/v3 v3.13.1/go.mod h1:aBYVOUfIlcSnrsRVU8VRS35y2DIfpgkmVkYZ0tpIXi4= +modernc.org/ccgo/v3 v3.15.9/go.mod h1:md59wBwDT2LznX/OTCPoVS6KIsdRgY8xqQwBV+hkTH0= +modernc.org/ccgo/v3 v3.15.10/go.mod h1:wQKxoFn0ynxMuCLfFD09c8XPUCc8obfchoVR9Cn0fI8= +modernc.org/ccgo/v3 v3.15.12/go.mod h1:VFePOWoCd8uDGRJpq/zfJ29D0EVzMSyID8LCMWYbX6I= +modernc.org/ccgo/v3 v3.15.14/go.mod h1:144Sz2iBCKogb9OKwsu7hQEub3EVgOlyI8wMUPGKUXQ= +modernc.org/ccgo/v3 v3.15.15/go.mod h1:z5qltXjU4PJl0pE5nhYQCvA9DhPHiWsl5GWl89+NSYE= +modernc.org/ccgo/v3 v3.15.16/go.mod h1:XbKRMeMWMdq712Tr5ECgATYMrzJ+g9zAZEj2ktzBe24= +modernc.org/ccgo/v3 v3.15.17/go.mod h1:bofnFkpRFf5gLY+mBZIyTW6FEcp26xi2lgOFk2Rlvs0= +modernc.org/ccgo/v3 v3.15.19/go.mod h1:TDJj+DxR26pkDteH2E5WQDj/xlmtsX7JdzkJkaZhOVU= +modernc.org/ccgo/v3 v3.16.0/go.mod h1:w55kPTAqvRMAYS3Lwij6qhqIuBEYS3Z8QtDkjD8cnik= +modernc.org/ccgo/v3 v3.16.1/go.mod h1:w55kPTAqvRMAYS3Lwij6qhqIuBEYS3Z8QtDkjD8cnik= +modernc.org/ccgo/v3 v3.16.2 h1:FUklsEMps3Y2heuTOmn/l6mv83nQgCjW3nsU+1JXzuQ= +modernc.org/ccgo/v3 v3.16.2/go.mod h1:w55kPTAqvRMAYS3Lwij6qhqIuBEYS3Z8QtDkjD8cnik= +modernc.org/ccorpus v1.11.1/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= modernc.org/libc v1.9.8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= @@ -1231,31 +1256,47 @@ modernc.org/libc v1.11.54/go.mod h1:S/FVnskbzVUrjfBqlGFIPA5m7UwB3n9fojHhCNfSsnw= modernc.org/libc v1.11.55/go.mod h1:j2A5YBRm6HjNkoSs/fzZrSxCuwWqcMYTDPLNx0URn3M= modernc.org/libc v1.11.56/go.mod h1:pakHkg5JdMLt2OgRadpPOTnyRXm/uzu+Yyg/LSLdi18= modernc.org/libc v1.11.58/go.mod h1:ns94Rxv0OWyoQrDqMFfWwka2BcaF6/61CqJRK9LP7S8= -modernc.org/libc v1.11.70/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw= modernc.org/libc v1.11.71/go.mod h1:DUOmMYe+IvKi9n6Mycyx3DbjfzSKrdr/0Vgt3j7P5gw= modernc.org/libc v1.11.75/go.mod h1:dGRVugT6edz361wmD9gk6ax1AbDSe0x5vji0dGJiPT0= -modernc.org/libc v1.11.82 h1:CSl/6n4odvPYWKKqBtFb8e0ZWVTjxDqwxTjaoee9V7E= modernc.org/libc v1.11.82/go.mod h1:NF+Ek1BOl2jeC7lw3a7Jj5PWyHPwWD4aq3wVKxqV1fI= +modernc.org/libc v1.11.86/go.mod h1:ePuYgoQLmvxdNT06RpGnaDKJmDNEkV7ZPKI2jnsvZoE= +modernc.org/libc v1.11.87/go.mod h1:Qvd5iXTeLhI5PS0XSyqMY99282y+3euapQFxM7jYnpY= +modernc.org/libc v1.11.88/go.mod h1:h3oIVe8dxmTcchcFuCcJ4nAWaoiwzKCdv82MM0oiIdQ= +modernc.org/libc v1.11.98/go.mod h1:ynK5sbjsU77AP+nn61+k+wxUGRx9rOFcIqWYYMaDZ4c= +modernc.org/libc v1.11.101/go.mod h1:wLLYgEiY2D17NbBOEp+mIJJJBGSiy7fLL4ZrGGZ+8jI= +modernc.org/libc v1.12.0/go.mod h1:2MH3DaF/gCU8i/UBiVE1VFRos4o523M7zipmwH8SIgQ= +modernc.org/libc v1.14.1/go.mod h1:npFeGWjmZTjFeWALQLrvklVmAxv4m80jnG3+xI8FdJk= +modernc.org/libc v1.14.2/go.mod h1:MX1GBLnRLNdvmK9azU9LCxZ5lMyhrbEMK8rG3X/Fe34= +modernc.org/libc v1.14.3/go.mod h1:GPIvQVOVPizzlqyRX3l756/3ppsAgg1QgPxjr5Q4agQ= +modernc.org/libc v1.14.6/go.mod h1:2PJHINagVxO4QW/5OQdRrvMYo+bm5ClpUFfyXCYl9ak= +modernc.org/libc v1.14.7/go.mod h1:f8xfWXW8LW41qb4X5+huVQo5dcfPlq7Cbny2TDheMv0= +modernc.org/libc v1.14.8/go.mod h1:9+JCLb1MWSY23smyOpIPbd5ED+rSS/ieiDWUpdyO3mo= +modernc.org/libc v1.14.10/go.mod h1:y1MtIWhwpJFpLYm6grAThtuXJKEsY6xkdZmXbRngIdo= +modernc.org/libc v1.14.12/go.mod h1:fJdoe23MHu2ruPQkFPPqCpToDi5cckzsbmkI6Ez0LqQ= +modernc.org/libc v1.15.0 h1:/CTHjQ1QO5mkLDeQICuA9Vh0YvhQTMqtCF2urQTaod8= +modernc.org/libc v1.15.0/go.mod h1:H1OKCu+NYa9+uQG8WsP7DndMBP61I4PWH8ivWhbdoWQ= modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8= modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= -modernc.org/memory v1.0.5 h1:XRch8trV7GgvTec2i7jc33YlUI0RKVDBvZ5eZ5m8y14= modernc.org/memory v1.0.5/go.mod h1:B7OYswTRnfGg+4tDH1t1OeUNnsy2viGTdME4tzd+IjM= +modernc.org/memory v1.0.6/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.0.7 h1:UE3cxTRFa5tfUibAV7Jqq8P7zRY0OlJg+yWVIIaluEE= +modernc.org/memory v1.0.7/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A= modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= -modernc.org/sqlite v1.14.2-0.20211125151325-d4ed92c0a70f h1:yQwkmqKCIgLzFIfjfPfZAAxLZernckpo7zGTv37Ahv0= -modernc.org/sqlite v1.14.2-0.20211125151325-d4ed92c0a70f/go.mod h1:YT5XFRKOueohjppHO4cHb54eQlnaUGsZMHoryaCpNo4= +modernc.org/sqlite v1.17.0 h1:yF5JlxCzQOn2WzyfGAPvHbMNx98ifXLno7a97qggXjE= +modernc.org/sqlite v1.17.0/go.mod h1:yMNaeEckF88G+PcfRcZRwGE+XnBkzWl/j15bPsDm4QM= modernc.org/strutil v1.1.1 h1:xv+J1BXY3Opl2ALrBwyfEikFAj8pmqcpnfmuwUwcozs= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= -modernc.org/tcl v1.8.13 h1:V0sTNBw0Re86PvXZxuCub3oO9WrSTqALgrwNZNvLFGw= -modernc.org/tcl v1.8.13/go.mod h1:V+q/Ef0IJaNUSECieLU4o+8IScapxnMyFV6i/7uQlAY= +modernc.org/tcl v1.12.0 h1:Mw2Ukszv5qZbwk/wC9HkDjxhPD4exnd/7/zVxqrB4rY= +modernc.org/tcl v1.12.0/go.mod h1:9zyAWctRV6IAkMTBeGLyYYqcBrTlVy3ubqiY3dzMfwI= modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk= modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= -modernc.org/z v1.2.19 h1:BGyRFWhDVn5LFS5OcX4Yd/MlpRTOc7hOPTdcIpCiUao= -modernc.org/z v1.2.19/go.mod h1:+ZpP0pc4zz97eukOzW3xagV/lS82IpPN9NGG5pNF9vY= +modernc.org/z v1.4.0 h1:IpbQb3bOi5Fz17UVGU/mSor8sKIu/7pdCsmGGnQHcxs= +modernc.org/z v1.4.0/go.mod h1:x6vxerH3hHCPGA3DAM5pERRzuyJEO4UGVfdQC4NZYl0= pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/migrations/txs_begin_end_test.go b/migrations/txs_begin_end_test.go index f15cc892a92..148c4bcc4ae 100644 --- a/migrations/txs_begin_end_test.go +++ b/migrations/txs_begin_end_test.go @@ -41,8 +41,8 @@ func TestTxsBeginEnd(t *testing.T) { return err } + err = rawdb.TruncateCanonicalHash(tx, 7) for i := uint64(7); i < 10; i++ { - err = rawdb.DeleteCanonicalHash(tx, i) require.NoError(err) hash := common.Hash{0xa, byte(i)} err = writeRawBodyDeprecated(tx, hash, i, b) diff --git a/node/defaults.go b/node/defaults.go index 951571f8996..4210c7a2247 100644 --- a/node/defaults.go +++ b/node/defaults.go @@ -43,9 +43,10 @@ var DefaultConfig = Config{ WSPort: DefaultWSPort, WSModules: []string{"net", "web3"}, P2P: p2p.Config{ - ListenAddr: ":30303", - ListenAddr65: ":30304", - MaxPeers: 100, - NAT: nat.Any(), + ListenAddr: ":30303", + ListenAddr65: ":30304", + MaxPeers: 100, + MaxPendingPeers: 1000, + NAT: nat.Any(), }, } diff --git a/node/node.go b/node/node.go index 086e05cd7a4..465e11fce0c 100644 --- a/node/node.go +++ b/node/node.go @@ -522,12 +522,12 @@ func OpenDatabase(config *Config, logger log.Logger, label kv.Label) (kv.RwDB, e var openFunc func(exclusive bool) (kv.RwDB, error) log.Info("Opening Database", "label", name, "path", dbPath) openFunc = func(exclusive bool) (kv.RwDB, error) { - opts := mdbx.NewMDBX(logger).Path(dbPath).Label(label).DBVerbosity(config.DatabaseVerbosity).MapSize(6 * datasize.TB) + opts := mdbx.NewMDBX(logger).Path(dbPath).Label(label).DBVerbosity(config.DatabaseVerbosity) if exclusive { opts = opts.Exclusive() } if label == kv.ChainDB { - opts = opts.PageSize(config.MdbxPageSize.Bytes()) + opts = opts.PageSize(config.MdbxPageSize.Bytes()).MapSize(8 * datasize.TB) } return opts.Open() } diff --git a/p2p/dial.go b/p2p/dial.go index d15458f4d35..d1ce4d57d6d 100644 --- a/p2p/dial.go +++ b/p2p/dial.go @@ -146,9 +146,6 @@ type dialConfig struct { } func (cfg dialConfig) withDefaults() dialConfig { - if cfg.maxActiveDials == 0 { - cfg.maxActiveDials = defaultMaxPendingPeers - } if cfg.log == nil { cfg.log = log.Root() } diff --git a/p2p/discover/common.go b/p2p/discover/common.go index 63cbcd091c0..c8a96b0c068 100644 --- a/p2p/discover/common.go +++ b/p2p/discover/common.go @@ -19,7 +19,9 @@ package discover import ( "context" "crypto/ecdsa" + "github.com/ledgerwatch/erigon/crypto" "net" + "time" "github.com/ledgerwatch/erigon/common/mclock" "github.com/ledgerwatch/erigon/p2p/enode" @@ -48,6 +50,9 @@ type Config struct { Log log.Logger // if set, log messages go here ValidSchemes enr.IdentityScheme // allowed identity schemes Clock mclock.Clock + ReplyTimeout time.Duration + + PrivateKeyGenerator func() (*ecdsa.PrivateKey, error) } func (cfg Config) withDefaults() Config { @@ -60,6 +65,12 @@ func (cfg Config) withDefaults() Config { if cfg.Clock == nil { cfg.Clock = mclock.System{} } + if cfg.ReplyTimeout == 0 { + cfg.ReplyTimeout = respTimeout + } + if cfg.PrivateKeyGenerator == nil { + cfg.PrivateKeyGenerator = crypto.GenerateKey + } return cfg } diff --git a/p2p/discover/table_integration_test.go b/p2p/discover/table_integration_test.go new file mode 100644 index 00000000000..6bb9fa3ad98 --- /dev/null +++ b/p2p/discover/table_integration_test.go @@ -0,0 +1,28 @@ +//go:build integration +// +build integration + +package discover + +import ( + "math/rand" + "testing" + "testing/quick" + "time" +) + +func TestTable_findNodeByID_quickCheck(t *testing.T) { + t.Parallel() + + config := quick.Config{ + MaxCount: 1000, + Rand: rand.New(rand.NewSource(time.Now().Unix())), + } + + test := func(nodesCount uint16, resultsCount byte) bool { + return testTableFindNodeByIDRun(t, nodesCount, resultsCount, config.Rand) + } + + if err := quick.Check(test, &config); err != nil { + t.Error(err) + } +} diff --git a/p2p/discover/table_test.go b/p2p/discover/table_test.go index 0241ed84779..0e61ffb92b9 100644 --- a/p2p/discover/table_test.go +++ b/p2p/discover/table_test.go @@ -189,31 +189,35 @@ func checkIPLimitInvariant(t *testing.T, tab *Table) { } } -func TestTable_findnodeByID(t *testing.T) { - t.Parallel() - - test := func(test *closeTest) bool { +func testTableFindNodeByIDRun(t *testing.T, nodesCountGen uint16, resultsCountGen byte, rand *rand.Rand) bool { + if !t.Skipped() { // for any node table, Target and N transport := newPingRecorder() tab, db := newTestTable(transport) defer db.Close() defer tab.close() - fillTable(tab, test.All) + + nodesCount := int(nodesCountGen) % (bucketSize*nBuckets + 1) + testNodes := generateNodes(rand, nodesCount) + fillTable(tab, testNodes) + + target := enode.ID{} + resultsCount := int(resultsCountGen) % (bucketSize + 1) // check that closest(Target, N) returns nodes - result := tab.findnodeByID(test.Target, test.N, false).entries + result := tab.findnodeByID(target, resultsCount, false).entries if hasDuplicates(result) { t.Errorf("result contains duplicates") return false } - if !sortedByDistanceTo(test.Target, result) { + if !sortedByDistanceTo(target, result) { t.Errorf("result is not sorted by distance to target") return false } // check that the number of results is min(N, tablen) - wantN := test.N - if tlen := tab.len(); tlen < test.N { + wantN := resultsCount + if tlen := tab.len(); tlen < resultsCount { wantN = tlen } if len(result) != wantN { @@ -230,9 +234,9 @@ func TestTable_findnodeByID(t *testing.T) { continue // don't run the check below for nodes in result } farthestResult := result[len(result)-1].ID() - if enode.DistCmp(test.Target, n.ID(), farthestResult) < 0 { + if enode.DistCmp(target, n.ID(), farthestResult) < 0 { t.Errorf("table contains node that is closer to target but it's not in result") - t.Logf(" Target: %v", test.Target) + t.Logf(" Target: %v", target) t.Logf(" Farthest Result: %v", farthestResult) t.Logf(" ID: %v", n.ID()) return false @@ -241,9 +245,50 @@ func TestTable_findnodeByID(t *testing.T) { } return true } - if err := quick.Check(test, quickcfg()); err != nil { - t.Error(err) - } + return true +} + +func TestTable_findNodeByID_examples(t *testing.T) { + t.Parallel() + + randGen := rand.New(rand.NewSource(time.Now().Unix())) + + t.Run("n0r1", func(t *testing.T) { + testTableFindNodeByIDRun(t, 0, 1, randGen) + }) + t.Run("n1r1", func(t *testing.T) { + testTableFindNodeByIDRun(t, 1, 1, randGen) + }) + t.Run("n16r1", func(t *testing.T) { + testTableFindNodeByIDRun(t, bucketSize, 1, randGen) + }) + t.Run("nMr1", func(t *testing.T) { + testTableFindNodeByIDRun(t, uint16(bucketSize*nBuckets), 1, randGen) + }) + t.Run("n0r2", func(t *testing.T) { + testTableFindNodeByIDRun(t, 0, 2, randGen) + }) + t.Run("n1r2", func(t *testing.T) { + testTableFindNodeByIDRun(t, 1, 2, randGen) + }) + t.Run("n16r2", func(t *testing.T) { + testTableFindNodeByIDRun(t, bucketSize, 2, randGen) + }) + t.Run("nMr2", func(t *testing.T) { + testTableFindNodeByIDRun(t, uint16(bucketSize*nBuckets), 2, randGen) + }) + t.Run("n0rM", func(t *testing.T) { + testTableFindNodeByIDRun(t, 0, bucketSize, randGen) + }) + t.Run("n1rM", func(t *testing.T) { + testTableFindNodeByIDRun(t, 1, bucketSize, randGen) + }) + t.Run("n16rM", func(t *testing.T) { + testTableFindNodeByIDRun(t, bucketSize, bucketSize, randGen) + }) + t.Run("nMrM", func(t *testing.T) { + testTableFindNodeByIDRun(t, uint16(bucketSize*nBuckets), bucketSize, randGen) + }) } func TestTable_ReadRandomNodesGetAll(t *testing.T) { @@ -281,27 +326,24 @@ func TestTable_ReadRandomNodesGetAll(t *testing.T) { } } -type closeTest struct { - Self enode.ID - Target enode.ID - All []*node - N int +func generateNodes(rand *rand.Rand, count int) []*node { + nodes := make([]*node, 0, count) + for i := 0; i < count; i++ { + nodes = append(nodes, generateNode(rand)) + } + return nodes } -func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value { - t := &closeTest{ - Self: gen(enode.ID{}, rand).(enode.ID), - Target: gen(enode.ID{}, rand).(enode.ID), - N: rand.Intn(bucketSize), - } - for _, id := range gen([]enode.ID{}, rand).([]enode.ID) { - r := new(enr.Record) - r.Set(enr.IP(genIP(rand))) - n := wrapNode(enode.SignNull(r, id)) - n.livenessChecks = 1 - t.All = append(t.All, n) - } - return reflect.ValueOf(t) +func generateNode(rand *rand.Rand) *node { + var id enode.ID + rand.Read(id[:]) + + r := new(enr.Record) + r.Set(enr.IP(genIP(rand))) + + n := wrapNode(enode.SignNull(r, id)) + n.livenessChecks = 1 + return n } func TestTable_addVerifiedNode(t *testing.T) { @@ -395,29 +437,12 @@ func TestTable_revalidateSyncRecord(t *testing.T) { } } -// gen wraps quick.Value so it's easier to use. -// it generates a random value of the given value's type. -func gen(typ interface{}, rand *rand.Rand) interface{} { - v, ok := quick.Value(reflect.TypeOf(typ), rand) - if !ok { - panic(fmt.Sprintf("couldn't generate random value of type %T", typ)) - } - return v.Interface() -} - func genIP(rand *rand.Rand) net.IP { ip := make(net.IP, 4) rand.Read(ip) return ip } -func quickcfg() *quick.Config { - return &quick.Config{ - MaxCount: 1000, - Rand: rand.New(rand.NewSource(time.Now().Unix())), - } -} - func newkey() *ecdsa.PrivateKey { key, err := crypto.GenerateKey() if err != nil { diff --git a/p2p/discover/v4_lookup_test.go b/p2p/discover/v4_lookup_test.go index 1eb38de14aa..4e71dd32776 100644 --- a/p2p/discover/v4_lookup_test.go +++ b/p2p/discover/v4_lookup_test.go @@ -17,6 +17,7 @@ package discover import ( + "context" "crypto/ecdsa" "fmt" "net" @@ -74,7 +75,18 @@ func TestUDPv4_LookupIterator(t *testing.T) { t.Skip("fix me on win please") } t.Parallel() - test := newUDPTest(t) + + // Set up RandomNodes() to use expected keys instead of generating random ones. + testNetPrivateKeys := lookupTestnet.privateKeys() + testNetPrivateKeyIndex := -1 + privateKeyGenerator := func() (*ecdsa.PrivateKey, error) { + testNetPrivateKeyIndex = (testNetPrivateKeyIndex + 1) % len(testNetPrivateKeys) + return testNetPrivateKeys[testNetPrivateKeyIndex], nil + } + ctx := context.Background() + ctx = contextWithPrivateKeyGenerator(ctx, privateKeyGenerator) + + test := newUDPTestContext(ctx, t) defer test.close() // Seed table with initial nodes. @@ -316,6 +328,16 @@ func (tn *preminedTestnet) closest(n int) (nodes []*enode.Node) { return nodes[:n] } +func (tn *preminedTestnet) privateKeys() []*ecdsa.PrivateKey { + var keys []*ecdsa.PrivateKey + for d := range tn.dists { + for _, key := range tn.dists[d] { + keys = append(keys, key) + } + } + return keys +} + var _ = (*preminedTestnet).mine // avoid linter warning about mine being dead code. // mine generates a testnet struct literal with nodes at diff --git a/p2p/discover/v4_udp.go b/p2p/discover/v4_udp.go index e823bca9705..6f213407948 100644 --- a/p2p/discover/v4_udp.go +++ b/p2p/discover/v4_udp.go @@ -77,8 +77,11 @@ type UDPv4 struct { addReplyMatcher chan *replyMatcher gotreply chan reply + replyTimeout time.Duration closeCtx context.Context cancelCloseCtx context.CancelFunc + + privateKeyGenerator func() (*ecdsa.PrivateKey, error) } // replyMatcher represents a pending reply. @@ -137,9 +140,12 @@ func ListenV4(ctx context.Context, c UDPConn, ln *enode.LocalNode, cfg Config) ( db: ln.Database(), gotreply: make(chan reply), addReplyMatcher: make(chan *replyMatcher), + replyTimeout: cfg.ReplyTimeout, closeCtx: closeCtx, cancelCloseCtx: cancel, log: cfg.Log, + + privateKeyGenerator: cfg.PrivateKeyGenerator, } tab, err := newTable(t, ln.Database(), cfg.Bootnodes, t.log) @@ -282,7 +288,7 @@ func (t *UDPv4) lookupSelf() []*enode.Node { } func (t *UDPv4) newRandomLookup(ctx context.Context) *lookup { - key, err := crypto.GenerateKey() + key, err := t.privateKeyGenerator() if err != nil { t.log.Warn("Failed to generate a random node key for newRandomLookup", "err", err) key = t.priv @@ -447,7 +453,7 @@ func (t *UDPv4) loop() { now := time.Now() for el := plist.Front(); el != nil; el = el.Next() { nextTimeout = el.Value.(*replyMatcher) - if dist := nextTimeout.deadline.Sub(now); dist < 2*respTimeout { + if dist := nextTimeout.deadline.Sub(now); dist < 2*t.replyTimeout { timeout.Reset(dist) return } @@ -472,7 +478,7 @@ func (t *UDPv4) loop() { return case p := <-t.addReplyMatcher: - p.deadline = time.Now().Add(respTimeout) + p.deadline = time.Now().Add(t.replyTimeout) plist.PushBack(p) case r := <-t.gotreply: @@ -595,7 +601,7 @@ func (t *UDPv4) ensureBond(toid enode.ID, toaddr *net.UDPAddr) { rm := t.sendPing(toid, toaddr, nil) <-rm.errc // Wait for them to ping back and process our pong. - time.Sleep(respTimeout) + time.Sleep(t.replyTimeout) } } diff --git a/p2p/discover/v4_udp_test.go b/p2p/discover/v4_udp_test.go index 8e13fa95766..6b007bbd324 100644 --- a/p2p/discover/v4_udp_test.go +++ b/p2p/discover/v4_udp_test.go @@ -61,6 +61,17 @@ type udpTest struct { } func newUDPTest(t *testing.T) *udpTest { + return newUDPTestContext(context.Background(), t) +} + +func newUDPTestContext(ctx context.Context, t *testing.T) *udpTest { + ctx = disableLookupSlowdown(ctx) + + replyTimeout := contextGetReplyTimeout(ctx) + if replyTimeout == 0 { + replyTimeout = 50 * time.Millisecond + } + test := &udpTest{ t: t, pipe: newpipe(), @@ -75,11 +86,13 @@ func newUDPTest(t *testing.T) *udpTest { panic(err) } ln := enode.NewLocalNode(test.db, test.localkey) - ctx := context.Background() - ctx = disableLookupSlowdown(ctx) test.udp, err = ListenV4(ctx, test.pipe, ln, Config{ PrivateKey: test.localkey, Log: testlog.Logger(t, log.LvlError), + + ReplyTimeout: replyTimeout, + + PrivateKeyGenerator: contextGetPrivateKeyGenerator(ctx), }) if err != nil { panic(err) @@ -175,9 +188,12 @@ func TestUDPv4_responseTimeouts(t *testing.T) { if runtime.GOOS == `darwin` { t.Skip("unstable test on darwin") } - t.Parallel() - test := newUDPTest(t) + + ctx := context.Background() + ctx = contextWithReplyTimeout(ctx, respTimeout) + + test := newUDPTestContext(ctx, t) defer test.close() rand.Seed(time.Now().UnixNano()) @@ -604,6 +620,24 @@ func startLocalhostV4(t *testing.T, cfg Config) *UDPv4 { return udp } +func contextWithReplyTimeout(ctx context.Context, value time.Duration) context.Context { + return context.WithValue(ctx, "p2p.discover.Config.ReplyTimeout", value) +} + +func contextGetReplyTimeout(ctx context.Context) time.Duration { + value, _ := ctx.Value("p2p.discover.Config.ReplyTimeout").(time.Duration) + return value +} + +func contextWithPrivateKeyGenerator(ctx context.Context, value func() (*ecdsa.PrivateKey, error)) context.Context { + return context.WithValue(ctx, "p2p.discover.Config.PrivateKeyGenerator", value) +} + +func contextGetPrivateKeyGenerator(ctx context.Context) func() (*ecdsa.PrivateKey, error) { + value, _ := ctx.Value("p2p.discover.Config.PrivateKeyGenerator").(func() (*ecdsa.PrivateKey, error)) + return value +} + // dgramPipe is a fake UDP socket. It queues all sent datagrams. type dgramPipe struct { queue chan dgram diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go index 0fb214527ea..6485c1d6d69 100644 --- a/p2p/discover/v5_udp_test.go +++ b/p2p/discover/v5_udp_test.go @@ -46,18 +46,26 @@ func TestUDPv5_lookupE2E(t *testing.T) { t.Skip("fix me on win please") } t.Parallel() + + bootNode := startLocalhostV5(t, Config{}) + bootNodeRec := bootNode.Self() + const N = 5 - var nodes []*UDPv5 - for i := 0; i < N; i++ { - var cfg Config - if len(nodes) > 0 { - bn := nodes[0].Self() - cfg.Bootnodes = []*enode.Node{bn} + nodes := []*UDPv5{bootNode} + for len(nodes) < N { + cfg := Config{ + Bootnodes: []*enode.Node{bootNodeRec}, } node := startLocalhostV5(t, cfg) nodes = append(nodes, node) - defer node.Close() } + + defer func() { + for _, node := range nodes { + node.Close() + } + }() + last := nodes[N-1] target := nodes[rand.Intn(N-2)].Self() @@ -102,7 +110,7 @@ func startLocalhostV5(t *testing.T, cfg Config) *UDPv5 { } realaddr := socket.LocalAddr().(*net.UDPAddr) ln.SetStaticIP(realaddr.IP) - ln.Set(enr.UDP(realaddr.Port)) + ln.SetFallbackUDP(realaddr.Port) ctx := context.Background() ctx = disableLookupSlowdown(ctx) udp, err := ListenV5(ctx, socket, ln, cfg) diff --git a/p2p/server.go b/p2p/server.go index b11e8a182f8..b7b152b8547 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -24,6 +24,7 @@ import ( "encoding/hex" "errors" "fmt" + "golang.org/x/sync/semaphore" "net" "sort" "sync" @@ -52,8 +53,7 @@ const ( discmixTimeout = 5 * time.Second // Connectivity defaults. - defaultMaxPendingPeers = 50 - defaultDialRatio = 3 + defaultDialRatio = 3 // This time limits inbound connection attempts per source IP. inboundThrottleTime = 30 * time.Second @@ -79,7 +79,7 @@ type Config struct { // MaxPendingPeers is the maximum number of peers that can be pending in the // handshake phase, counted separately for inbound and outbound connections. - // Zero defaults to preset values. + // It must be greater than zero. MaxPendingPeers int `toml:",omitempty"` // DialRatio controls the ratio of inbound to dialed connections. @@ -191,7 +191,9 @@ type Server struct { dialsched *dialScheduler // Channels into the run loop. - quit chan struct{} + quitCtx context.Context + quitFunc context.CancelFunc + quit <-chan struct{} addtrusted chan *enode.Node removetrusted chan *enode.Node peerOp chan peerOpFunc @@ -409,10 +411,10 @@ func (srv *Server) Stop() { return } srv.running = false - close(srv.quit) + srv.quitFunc() if srv.listener != nil { // this unblocks listener Accept - srv.listener.Close() + _ = srv.listener.Close() } if srv.nodedb != nil { srv.nodedb.Close() @@ -476,13 +478,17 @@ func (srv *Server) Start(ctx context.Context) error { if srv.PrivateKey == nil { return errors.New("Server.PrivateKey must be set to a non-nil key") } + if srv.MaxPendingPeers <= 0 { + return errors.New("MaxPendingPeers must be greater than zero") + } if srv.newTransport == nil { srv.newTransport = newRLPX } if srv.listenFunc == nil { srv.listenFunc = net.Listen } - srv.quit = make(chan struct{}) + srv.quitCtx, srv.quitFunc = context.WithCancel(ctx) + srv.quit = srv.quitCtx.Done() srv.delpeer = make(chan peerDrop) srv.checkpointPostHandshake = make(chan *conn) srv.checkpointAddPeer = make(chan *conn) @@ -495,11 +501,11 @@ func (srv *Server) Start(ctx context.Context) error { return err } if srv.ListenAddr != "" { - if err := srv.setupListening(); err != nil { + if err := srv.setupListening(srv.quitCtx); err != nil { return err } } - if err := srv.setupDiscovery(ctx); err != nil { + if err := srv.setupDiscovery(srv.quitCtx); err != nil { return err } srv.setupDialScheduler() @@ -586,8 +592,8 @@ func (srv *Server) setupDiscovery(ctx context.Context) error { srv.loopWG.Add(1) go func() { defer debug.LogPanic() + defer srv.loopWG.Done() nat.Map(srv.NAT, srv.quit, "udp", realaddr.Port, realaddr.Port, "ethereum discovery") - srv.loopWG.Done() }() } } @@ -682,7 +688,7 @@ func (srv *Server) maxDialedConns() (limit int) { return limit } -func (srv *Server) setupListening() error { +func (srv *Server) setupListening(ctx context.Context) error { // Launch the listener. listener, err := srv.listenFunc("tcp", srv.ListenAddr) if err != nil { @@ -698,14 +704,18 @@ func (srv *Server) setupListening() error { srv.loopWG.Add(1) go func() { defer debug.LogPanic() + defer srv.loopWG.Done() nat.Map(srv.NAT, srv.quit, "tcp", tcp.Port, tcp.Port, "ethereum p2p") - srv.loopWG.Done() }() } } srv.loopWG.Add(1) - go srv.listenLoop() + go func() { + defer debug.LogPanic() + defer srv.loopWG.Done() + srv.listenLoop(ctx) + }() return nil } @@ -776,13 +786,12 @@ running: // Ensure that the trusted flag is set before checking against MaxPeers. c.flags |= trustedConn } - // TODO: track in-progress inbound node IDs (pre-Peer) to avoid dialing them. - c.cont <- srv.postHandshakeChecks(peers, inboundCount, c) + c.cont <- nil case c := <-srv.checkpointAddPeer: // At this point the connection is past the protocol handshake. // Its capabilities are known and the remote identity is verified. - err := srv.addPeerChecks(peers, inboundCount, c) + err := srv.postHandshakeChecks(peers, inboundCount, c) if err == nil { // The handshakes are done and it passed all checks. p := srv.launchPeer(c, c.pubkey) @@ -840,49 +849,35 @@ func (srv *Server) postHandshakeChecks(peers map[enode.ID]*Peer, inboundCount in return DiscAlreadyConnected case c.node.ID() == srv.localnode.ID(): return DiscSelf + case (len(srv.Protocols) > 0) && (countMatchingProtocols(srv.Protocols, c.caps) == 0): + return DiscUselessPeer default: return nil } } -func (srv *Server) addPeerChecks(peers map[enode.ID]*Peer, inboundCount int, c *conn) error { - // Drop connections with no matching protocols. - if len(srv.Protocols) > 0 && countMatchingProtocols(srv.Protocols, c.caps) == 0 { - return DiscUselessPeer - } - // Repeat the post-handshake checks because the - // peer set might have changed since those checks were performed. - return srv.postHandshakeChecks(peers, inboundCount, c) -} - // listenLoop runs in its own goroutine and accepts // inbound connections. -func (srv *Server) listenLoop() { - defer debug.LogPanic() +func (srv *Server) listenLoop(ctx context.Context) { srv.log.Trace("TCP listener up", "addr", srv.listener.Addr()) - // The slots channel limits accepts of new connections. - tokens := defaultMaxPendingPeers - if srv.MaxPendingPeers > 0 { - tokens = srv.MaxPendingPeers - } - slots := make(chan struct{}, tokens) - for i := 0; i < tokens; i++ { - slots <- struct{}{} - } + // The slots limit accepts of new connections. + slots := semaphore.NewWeighted(int64(srv.MaxPendingPeers)) // Wait for slots to be returned on exit. This ensures all connection goroutines // are down before listenLoop returns. - defer srv.loopWG.Done() defer func() { - for i := 0; i < cap(slots); i++ { - <-slots - } + _ = slots.Acquire(ctx, int64(srv.MaxPendingPeers)) }() for { // Wait for a free slot before accepting. - <-slots + if slotErr := slots.Acquire(ctx, 1); slotErr != nil { + if !errors.Is(slotErr, context.Canceled) { + srv.log.Error("Failed to get a peer connection slot", "err", slotErr) + } + return + } var ( fd net.Conn @@ -899,8 +894,13 @@ func (srv *Server) listenLoop() { time.Sleep(time.Millisecond * 200) continue } else if err != nil { - srv.log.Trace("Read error", "err", err) - slots <- struct{}{} + // Log the error unless the server is shutting down. + select { + case <-srv.quit: + default: + srv.log.Error("Server listener failed to accept a connection", "err", err) + } + slots.Release(1) return } break @@ -908,9 +908,9 @@ func (srv *Server) listenLoop() { remoteIP := netutil.AddrIP(fd.RemoteAddr()) if err := srv.checkInboundConn(fd, remoteIP); err != nil { - srv.log.Trace("Rejected inbound connnection", "addr", fd.RemoteAddr(), "err", err) - fd.Close() - slots <- struct{}{} + srv.log.Trace("Rejected inbound connection", "addr", fd.RemoteAddr(), "err", err) + _ = fd.Close() + slots.Release(1) continue } if remoteIP != nil { @@ -923,8 +923,9 @@ func (srv *Server) listenLoop() { } go func() { defer debug.LogPanic() - srv.SetupConn(fd, inboundConn, nil) - slots <- struct{}{} + defer slots.Release(1) + // The error is logged in Server.setupConn(). + _ = srv.SetupConn(fd, inboundConn, nil) }() } } diff --git a/p2p/server_test.go b/p2p/server_test.go index ac5dcb7fd73..40ef5c2c631 100644 --- a/p2p/server_test.go +++ b/p2p/server_test.go @@ -69,12 +69,13 @@ func (c *testTransport) close(err error) { func startTestServer(t *testing.T, remoteKey *ecdsa.PublicKey, pf func(*Peer)) *Server { config := Config{ - Name: "test", - MaxPeers: 10, - ListenAddr: "127.0.0.1:0", - NoDiscovery: true, - PrivateKey: newkey(), - Log: testlog.Logger(t, log.LvlError), + Name: "test", + MaxPeers: 10, + MaxPendingPeers: 10, + ListenAddr: "127.0.0.1:0", + NoDiscovery: true, + PrivateKey: newkey(), + Log: testlog.Logger(t, log.LvlError), } server := &Server{ Config: config, @@ -211,18 +212,20 @@ func TestServerDial(t *testing.T) { // This test checks that RemovePeer disconnects the peer if it is connected. func TestServerRemovePeerDisconnect(t *testing.T) { srv1 := &Server{Config: Config{ - PrivateKey: newkey(), - MaxPeers: 1, - NoDiscovery: true, - Log: testlog.Logger(t, log.LvlTrace).New("server", "1"), + PrivateKey: newkey(), + MaxPeers: 1, + MaxPendingPeers: 1, + NoDiscovery: true, + Log: testlog.Logger(t, log.LvlTrace).New("server", "1"), }} srv2 := &Server{Config: Config{ - PrivateKey: newkey(), - MaxPeers: 1, - NoDiscovery: true, - NoDial: true, - ListenAddr: "127.0.0.1:0", - Log: testlog.Logger(t, log.LvlTrace).New("server", "2"), + PrivateKey: newkey(), + MaxPeers: 1, + MaxPendingPeers: 1, + NoDiscovery: true, + NoDial: true, + ListenAddr: "127.0.0.1:0", + Log: testlog.Logger(t, log.LvlTrace).New("server", "2"), }} if err := srv1.TestStart(); err != nil { t.Fatal("cant start srv1") @@ -249,12 +252,13 @@ func TestServerAtCap(t *testing.T) { trustedID := enode.PubkeyToIDV4(&trustedNode.PublicKey) srv := &Server{ Config: Config{ - PrivateKey: newkey(), - MaxPeers: 10, - NoDial: true, - NoDiscovery: true, - TrustedNodes: []*enode.Node{newNode(trustedID, "")}, - Log: testlog.Logger(t, log.LvlTrace), + PrivateKey: newkey(), + MaxPeers: 10, + MaxPendingPeers: 10, + NoDial: true, + NoDiscovery: true, + TrustedNodes: []*enode.Node{newNode(trustedID, "")}, + Log: testlog.Logger(t, log.LvlTrace), }, } if err := srv.TestStart(); err != nil { @@ -270,22 +274,30 @@ func TestServerAtCap(t *testing.T) { } // Inject a few connections to fill up the peer set. - for i := 0; i < 10; i++ { + for i := 0; i < srv.Config.MaxPeers; i++ { c := newconn(randomID()) if err := srv.checkpoint(c, srv.checkpointAddPeer); err != nil { t.Fatalf("could not add conn %d: %v", i, err) } } + // Try inserting a non-trusted connection. anotherID := randomID() c := newconn(anotherID) - if err := srv.checkpoint(c, srv.checkpointPostHandshake); err != DiscTooManyPeers { + if err := srv.checkpoint(c, srv.checkpointPostHandshake); err != nil { + t.Error("unexpected error @ checkpointPostHandshake:", err) + } + if err := srv.checkpoint(c, srv.checkpointAddPeer); err != DiscTooManyPeers { t.Error("wrong error for insert:", err) } + // Try inserting a trusted connection. c = newconn(trustedID) if err := srv.checkpoint(c, srv.checkpointPostHandshake); err != nil { - t.Error("unexpected error for trusted conn @posthandshake:", err) + t.Error("unexpected error @ checkpointPostHandshake:", err) + } + if err := srv.checkpoint(c, srv.checkpointAddPeer); err != nil { + t.Error("unexpected error for trusted conn:", err) } if !c.is(trustedConn) { t.Error("Server did not set trusted flag") @@ -294,7 +306,10 @@ func TestServerAtCap(t *testing.T) { // Remove from trusted set and try again srv.RemoveTrustedPeer(newNode(trustedID, "")) c = newconn(trustedID) - if err := srv.checkpoint(c, srv.checkpointPostHandshake); err != DiscTooManyPeers { + if err := srv.checkpoint(c, srv.checkpointPostHandshake); err != nil { + t.Error("unexpected error @ checkpointPostHandshake:", err) + } + if err := srv.checkpoint(c, srv.checkpointAddPeer); err != DiscTooManyPeers { t.Error("wrong error for insert:", err) } @@ -302,7 +317,10 @@ func TestServerAtCap(t *testing.T) { srv.AddTrustedPeer(newNode(anotherID, "")) c = newconn(anotherID) if err := srv.checkpoint(c, srv.checkpointPostHandshake); err != nil { - t.Error("unexpected error for trusted conn @posthandshake:", err) + t.Error("unexpected error @ checkpointPostHandshake:", err) + } + if err := srv.checkpoint(c, srv.checkpointAddPeer); err != nil { + t.Error("unexpected error for trusted conn:", err) } if !c.is(trustedConn) { t.Error("Server did not set trusted flag") @@ -318,19 +336,19 @@ func TestServerPeerLimits(t *testing.T) { pubkey: &clientkey.PublicKey, phs: protoHandshake{ Pubkey: crypto.MarshalPubkey(&clientkey.PublicKey), - // Force "DiscUselessPeer" due to unmatching caps - // Caps: []Cap{discard.cap()}, + Caps: []Cap{discard.cap()}, }, } srv := &Server{ Config: Config{ - PrivateKey: srvkey, - MaxPeers: 0, - NoDial: true, - NoDiscovery: true, - Protocols: []Protocol{discard}, - Log: testlog.Logger(t, log.LvlTrace), + PrivateKey: srvkey, + MaxPeers: 0, + MaxPendingPeers: 50, + NoDial: true, + NoDiscovery: true, + Protocols: []Protocol{discard}, + Log: testlog.Logger(t, log.LvlTrace), }, newTransport: func(fd net.Conn, dialDest *ecdsa.PublicKey) transport { return tp }, } @@ -343,35 +361,31 @@ func TestServerPeerLimits(t *testing.T) { flags := dynDialedConn dialDest := clientnode conn, _ := net.Pipe() - srv.SetupConn(conn, flags, dialDest) - if tp.closeErr != DiscTooManyPeers { - t.Errorf("unexpected close error: %q", tp.closeErr) + err := srv.SetupConn(conn, flags, dialDest) + _ = conn.Close() + if !errors.Is(err, DiscTooManyPeers) { + t.Fatalf("expected DiscTooManyPeers, but got error: %q", err) } - conn.Close() srv.AddTrustedPeer(clientnode) // Check that server allows a trusted peer despite being full. conn, _ = net.Pipe() - srv.SetupConn(conn, flags, dialDest) - if tp.closeErr == DiscTooManyPeers { - t.Errorf("failed to bypass MaxPeers with trusted node: %q", tp.closeErr) + err = srv.SetupConn(conn, flags, dialDest) + _ = conn.Close() + if err != nil { + t.Fatalf("failed to bypass MaxPeers with trusted node: %q", err) } - if tp.closeErr != DiscUselessPeer { - t.Errorf("unexpected close error: %q", tp.closeErr) - } - conn.Close() - srv.RemoveTrustedPeer(clientnode) // Check that server is full again. conn, _ = net.Pipe() - srv.SetupConn(conn, flags, dialDest) - if tp.closeErr != DiscTooManyPeers { - t.Errorf("unexpected close error: %q", tp.closeErr) + err = srv.SetupConn(conn, flags, dialDest) + _ = conn.Close() + if !errors.Is(err, DiscTooManyPeers) { + t.Fatalf("expected DiscTooManyPeers, but got error: %q", err) } - conn.Close() } func TestServerSetupConn(t *testing.T) { @@ -418,7 +432,7 @@ func TestServerSetupConn(t *testing.T) { { tt: &setupTransport{pubkey: srvpub, phs: protoHandshake{Pubkey: crypto.MarshalPubkey(srvpub)}}, flags: inboundConn, - wantCalls: "doEncHandshake,close,", + wantCalls: "doEncHandshake,doProtoHandshake,close,", wantCloseErr: DiscSelf, }, { @@ -432,12 +446,13 @@ func TestServerSetupConn(t *testing.T) { for i, test := range tests { t.Run(test.wantCalls, func(t *testing.T) { cfg := Config{ - PrivateKey: srvkey, - MaxPeers: 10, - NoDial: true, - NoDiscovery: true, - Protocols: []Protocol{discard}, - Log: testlog.Logger(t, log.LvlTrace), + PrivateKey: srvkey, + MaxPeers: 10, + MaxPendingPeers: 10, + NoDial: true, + NoDiscovery: true, + Protocols: []Protocol{discard}, + Log: testlog.Logger(t, log.LvlTrace), } srv := &Server{ Config: cfg, @@ -484,6 +499,7 @@ func (c *setupTransport) doProtoHandshake(our *protoHandshake) (*protoHandshake, } return &c.phs, nil } + func (c *setupTransport) close(err error) { c.calls += "close," c.closeErr = err @@ -491,10 +507,11 @@ func (c *setupTransport) close(err error) { // setupConn shouldn't write to/read from the connection. func (c *setupTransport) WriteMsg(Msg) error { - panic("WriteMsg called on setupTransport") + return errors.New("WriteMsg called on setupTransport") } + func (c *setupTransport) ReadMsg() (Msg, error) { - panic("ReadMsg called on setupTransport") + return Msg{}, errors.New("ReadMsg called on setupTransport") } func newkey() *ecdsa.PrivateKey { @@ -518,13 +535,14 @@ func TestServerInboundThrottle(t *testing.T) { newTransportCalled := make(chan struct{}) srv := &Server{ Config: Config{ - PrivateKey: newkey(), - ListenAddr: "127.0.0.1:0", - MaxPeers: 10, - NoDial: true, - NoDiscovery: true, - Protocols: []Protocol{discard}, - Log: testlog.Logger(t, log.LvlTrace), + PrivateKey: newkey(), + ListenAddr: "127.0.0.1:0", + MaxPeers: 10, + MaxPendingPeers: 10, + NoDial: true, + NoDiscovery: true, + Protocols: []Protocol{discard}, + Log: testlog.Logger(t, log.LvlTrace), }, newTransport: func(fd net.Conn, dialDest *ecdsa.PublicKey) transport { newTransportCalled <- struct{}{} diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go index 8f505fc7606..68a22002cca 100644 --- a/p2p/simulations/adapters/inproc.go +++ b/p2p/simulations/adapters/inproc.go @@ -96,6 +96,7 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) { P2P: p2p.Config{ PrivateKey: config.PrivateKey, MaxPeers: math.MaxInt32, + MaxPendingPeers: 50, NoDiscovery: true, Dialer: s, EnableMsgEvents: config.EnableMsgEvents, diff --git a/params/networkname/network_name.go b/params/networkname/network_name.go index 58932199389..f6bb6887a5f 100644 --- a/params/networkname/network_name.go +++ b/params/networkname/network_name.go @@ -17,3 +17,21 @@ const ( MumbaiChainName = "mumbai" BorMainnetChainName = "bor-mainnet" ) + +var All = []string{ + MainnetChainName, + SepoliaChainName, + RopstenChainName, + RinkebyChainName, + GoerliChainName, + KilnDevnetChainName, + //DevChainName, + ErigonMineName, + SokolChainName, + FermionChainName, + BSCChainName, + ChapelChainName, + //RialtoChainName, + MumbaiChainName, + BorMainnetChainName, +} diff --git a/params/version.go b/params/version.go index b133e553cfa..b88b835ed9e 100644 --- a/params/version.go +++ b/params/version.go @@ -32,8 +32,8 @@ var ( // see https://calver.org const ( VersionMajor = 2022 // Major version component of the current release - VersionMinor = 4 // Minor version component of the current release - VersionMicro = 4 // Patch version component of the current release + VersionMinor = 5 // Minor version component of the current release + VersionMicro = 1 // Patch version component of the current release VersionModifier = "alpha" // Modifier component of the current release VersionKeyCreated = "ErigonVersionCreated" VersionKeyFinished = "ErigonVersionFinished" diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index b21aff1e525..384947150e2 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -26,6 +26,7 @@ import ( "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli" ) @@ -106,7 +107,7 @@ var ( SnapshotSegmentSizeFlag = cli.Uint64Flag{ Name: "segment.size", Usage: "Amount of blocks in each segment", - Value: snapshotsync.DEFAULT_SEGMENT_SIZE, + Value: snap.DEFAULT_SEGMENT_SIZE, } SnapshotRebuildFlag = cli.BoolFlag{ Name: "rebuild", diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index a252294ba9f..533f4eff42b 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -76,6 +76,7 @@ var DefaultFlags = []cli.Flag{ utils.TorrentPortFlag, utils.TorrentMaxPeersFlag, utils.TorrentConnsPerFileFlag, + utils.TorrentDownloadSlotsFlag, utils.TorrentUploadRateFlag, utils.TorrentDownloadRateFlag, utils.TorrentVerbosityFlag, @@ -123,4 +124,7 @@ var DefaultFlags = []cli.Flag{ utils.HeimdallURLFlag, utils.WithoutHeimdallFlag, utils.EthStatsURLFlag, + + utils.OverrideTerminalTotalDifficulty, + utils.OverrideMergeForkBlock, } diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index ed979e84c84..c5f8bf1a00d 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -565,6 +565,7 @@ func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmoun } func (back *BlockReaderWithSnapshots) txnByHash(txnHash common.Hash, segments []*TxnSegment, buf []byte) (txn types.Transaction, blockNum, txnID uint64, err error) { + for i := len(segments) - 1; i >= 0; i-- { sn := segments[i] if sn.IdxTxnHash == nil || sn.IdxTxnHash2BlockNum == nil { @@ -577,14 +578,19 @@ func (back *BlockReaderWithSnapshots) txnByHash(txnHash common.Hash, segments [] gg := sn.Seg.MakeGetter() gg.Reset(offset) buf, _ = gg.Next(buf[:0]) + if len(buf) == 0 { // system-txn + continue + } + // first byte txnHash check - reducing false-positives 256 times. Allows don't store and don't calculate full hash of entity - when checking many snapshots. if len(buf) > 1 && txnHash[0] != buf[0] { continue } + sender := buf[1 : 1+20] + reader2 := recsplit.NewIndexReader(sn.IdxTxnHash2BlockNum) blockNum = reader2.Lookup(txnHash[:]) - sender := buf[1 : 1+20] txn, err = types.DecodeTransaction(rlp.NewStream(bytes.NewReader(buf[1+20:]), uint64(len(buf)))) if err != nil { return diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 9123648e454..1e918854733 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -6,13 +6,11 @@ import ( "encoding/binary" "errors" "fmt" - "io/fs" "os" "path" "path/filepath" "runtime" "sort" - "strconv" "strings" "sync" "time" @@ -34,6 +32,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" "github.com/ledgerwatch/log/v3" "go.uber.org/atomic" @@ -52,62 +51,6 @@ type BlocksSnapshot struct { From, To uint64 // [from,to) } -type Type int - -const ( - Headers Type = iota - Bodies - Transactions - NumberOfTypes -) - -func (ft Type) String() string { - switch ft { - case Headers: - return "headers" - case Bodies: - return "bodies" - case Transactions: - return "transactions" - default: - panic(fmt.Sprintf("unknown file type: %d", ft)) - } -} - -func ParseFileType(s string) (Type, bool) { - switch s { - case "headers": - return Headers, true - case "bodies": - return Bodies, true - case "transactions": - return Transactions, true - default: - return NumberOfTypes, false - } -} - -type IdxType string - -const ( - Transactions2Block IdxType = "transactions-to-block" -) - -func (it IdxType) String() string { return string(it) } - -var AllSnapshotTypes = []Type{Headers, Bodies, Transactions} - -var ( - ErrInvalidFileName = fmt.Errorf("invalid compressed file name") -) - -func FileName(from, to uint64, fileType string) string { - return fmt.Sprintf("v1-%06d-%06d-%s", from/1_000, to/1_000, fileType) -} -func SegmentFileName(from, to uint64, t Type) string { return FileName(from, to, t.String()) + ".seg" } -func DatFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".dat" } -func IdxFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".idx" } - func (s BlocksSnapshot) Has(block uint64) bool { return block >= s.From && block < s.To } type HeaderSegment struct { @@ -128,12 +71,12 @@ func (sn *HeaderSegment) close() { } func (sn *HeaderSegment) reopen(dir string) (err error) { sn.close() - fileName := SegmentFileName(sn.From, sn.To, Headers) + fileName := snap.SegmentFileName(sn.From, sn.To, snap.Headers) sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(dir, IdxFileName(sn.From, sn.To, Headers.String()))) + sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Headers.String()))) if err != nil { return err } @@ -158,12 +101,12 @@ func (sn *BodySegment) close() { } func (sn *BodySegment) reopen(dir string) (err error) { sn.close() - fileName := SegmentFileName(sn.From, sn.To, Bodies) + fileName := snap.SegmentFileName(sn.From, sn.To, snap.Bodies) sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(dir, IdxFileName(sn.From, sn.To, Bodies.String()))) + sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Bodies.String()))) if err != nil { return err } @@ -193,16 +136,16 @@ func (sn *TxnSegment) close() { } func (sn *TxnSegment) reopen(dir string) (err error) { sn.close() - fileName := SegmentFileName(sn.From, sn.To, Transactions) + fileName := snap.SegmentFileName(sn.From, sn.To, snap.Transactions) sn.Seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(dir, IdxFileName(sn.From, sn.To, Transactions.String()))) + sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions.String()))) if err != nil { return err } - sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(dir, IdxFileName(sn.From, sn.To, Transactions2Block.String()))) + sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions2Block.String()))) if err != nil { return err } @@ -394,10 +337,10 @@ func (s *RoSnapshots) idxAvailability() uint64 { } func (s *RoSnapshots) ReopenIndices() error { - return s.ReopenSomeIndices(AllSnapshotTypes...) + return s.ReopenSomeIndices(snap.AllSnapshotTypes...) } -func (s *RoSnapshots) ReopenSomeIndices(types ...Type) (err error) { +func (s *RoSnapshots) ReopenSomeIndices(types ...snap.Type) (err error) { s.Headers.lock.Lock() defer s.Headers.lock.Unlock() s.Bodies.lock.Lock() @@ -407,15 +350,15 @@ func (s *RoSnapshots) ReopenSomeIndices(types ...Type) (err error) { for _, t := range types { switch t { - case Headers: + case snap.Headers: if err := s.Headers.reopen(s.dir); err != nil { return err } - case Bodies: + case snap.Bodies: if err := s.Bodies.reopen(s.dir); err != nil { return err } - case Transactions: + case snap.Transactions: if err := s.Txs.reopen(s.dir); err != nil { return err } @@ -437,7 +380,7 @@ func (s *RoSnapshots) AsyncOpenAll(ctx context.Context) { return default: } - if err := s.Reopen(); err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, ErrSnapshotMissed) { + if err := s.Reopen(); err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, snap.ErrSnapshotMissed) { log.Error("AsyncOpenAll", "err", err) } time.Sleep(15 * time.Second) @@ -460,7 +403,7 @@ func (s *RoSnapshots) Reopen() error { for _, f := range files { { seg := &BodySegment{From: f.From, To: f.To} - fileName := SegmentFileName(f.From, f.To, Bodies) + fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -472,7 +415,7 @@ func (s *RoSnapshots) Reopen() error { } { seg := &HeaderSegment{From: f.From, To: f.To} - fileName := SegmentFileName(f.From, f.To, Headers) + fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -484,7 +427,7 @@ func (s *RoSnapshots) Reopen() error { } { seg := &TxnSegment{From: f.From, To: f.To} - fileName := SegmentFileName(f.From, f.To, Transactions) + fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -504,23 +447,23 @@ func (s *RoSnapshots) Reopen() error { s.segmentsReady.Store(true) for _, sn := range s.Headers.segments { - sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(s.dir, IdxFileName(sn.From, sn.To, Headers.String()))) + sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Headers.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } } for _, sn := range s.Bodies.segments { - sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(s.dir, IdxFileName(sn.From, sn.To, Bodies.String()))) + sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Bodies.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } } for _, sn := range s.Txs.segments { - sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(s.dir, IdxFileName(sn.From, sn.To, Transactions.String()))) + sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(s.dir, IdxFileName(sn.From, sn.To, Transactions2Block.String()))) + sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions2Block.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } @@ -546,7 +489,7 @@ func (s *RoSnapshots) ReopenSegments() error { for _, f := range files { { seg := &BodySegment{From: f.From, To: f.To} - fileName := SegmentFileName(f.From, f.To, Bodies) + fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -559,7 +502,7 @@ func (s *RoSnapshots) ReopenSegments() error { { fmt.Printf("reopen segment: %d-%d\n", f.From, f.To) seg := &HeaderSegment{From: f.From, To: f.To} - fileName := SegmentFileName(f.From, f.To, Headers) + fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -571,7 +514,7 @@ func (s *RoSnapshots) ReopenSegments() error { } { seg := &TxnSegment{From: f.From, To: f.To} - fileName := SegmentFileName(f.From, f.To, Transactions) + fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { if errors.Is(err, os.ErrNotExist) { @@ -676,7 +619,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, snapshotDir *dir.Rw, chai <-workersCh }() - f := filepath.Join(snapshotDir.Path, SegmentFileName(blockFrom, blockTo, Headers)) + f := filepath.Join(snapshotDir.Path, snap.SegmentFileName(blockFrom, blockTo, snap.Headers)) errs <- HeadersIdx(ctx, f, blockFrom, tmpDir, lvl) select { case <-ctx.Done(): @@ -723,7 +666,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, snapshotDir *dir.Rw, chai <-workersCh }() - f := filepath.Join(snapshotDir.Path, SegmentFileName(blockFrom, blockTo, Bodies)) + f := filepath.Join(snapshotDir.Path, snap.SegmentFileName(blockFrom, blockTo, snap.Bodies)) errs <- BodiesIdx(ctx, f, blockFrom, tmpDir, lvl) select { case <-ctx.Done(): @@ -753,7 +696,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, snapshotDir *dir.Rw, chai return err } // hack to read first block body - to get baseTxId from there - if err := s.ReopenSomeIndices(Headers, Bodies); err != nil { + if err := s.ReopenSomeIndices(snap.Headers, snap.Bodies); err != nil { return err } if err := s.Txs.View(func(segments []*TxnSegment) error { @@ -812,100 +755,29 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, snapshotDir *dir.Rw, chai return nil } -// FileInfo - parsed file metadata -type FileInfo struct { - _ fs.FileInfo - Version uint8 - From, To uint64 - Path, Ext string - T Type -} - -func IdxFiles(dir string) (res []FileInfo, err error) { return filesWithExt(dir, ".idx") } -func Segments(dir string) (res []FileInfo, err error) { return filesWithExt(dir, ".seg") } -func TmpFiles(dir string) (res []string, err error) { - files, err := os.ReadDir(dir) - if err != nil { - return nil, err - } - for _, f := range files { - if f.IsDir() || len(f.Name()) < 3 { - continue - } - if filepath.Ext(f.Name()) != ".tmp" { - continue - } - res = append(res, filepath.Join(dir, f.Name())) - } - return res, nil -} - -var ErrSnapshotMissed = fmt.Errorf("snapshot missed") - -func noGaps(in []FileInfo) (out []FileInfo, err error) { +func noGaps(in []snap.FileInfo) (out []snap.FileInfo, err error) { var prevTo uint64 for _, f := range in { if f.To <= prevTo { continue } if f.From != prevTo { // no gaps - return nil, fmt.Errorf("%w: from %d to %d", ErrSnapshotMissed, prevTo, f.From) + return nil, fmt.Errorf("%w: from %d to %d", snap.ErrSnapshotMissed, prevTo, f.From) } prevTo = f.To out = append(out, f) } return out, nil } -func parseDir(dir string) (res []FileInfo, err error) { - files, err := os.ReadDir(dir) - if err != nil { - return nil, err - } - for _, f := range files { - fileInfo, err := f.Info() - if err != nil { - return nil, err - } - if f.IsDir() || fileInfo.Size() == 0 || len(f.Name()) < 3 { - continue - } - - meta, err := ParseFileName(dir, f.Name()) - if err != nil { - if errors.Is(err, ErrInvalidFileName) { - continue - } - return nil, err - } - res = append(res, meta) - } - sort.Slice(res, func(i, j int) bool { - if res[i].Version != res[j].Version { - return res[i].Version < res[j].Version - } - if res[i].From != res[j].From { - return res[i].From < res[j].From - } - if res[i].To != res[j].To { - return res[i].To < res[j].To - } - if res[i].T != res[j].T { - return res[i].T < res[j].T - } - return res[i].Ext < res[j].Ext - }) - - return res, nil -} -func allTypeOfSegmentsMustExist(dir string, in []FileInfo) (res []FileInfo) { +func allTypeOfSegmentsMustExist(dir string, in []snap.FileInfo) (res []snap.FileInfo) { MainLoop: for _, f := range in { if f.From == f.To { continue } - for _, t := range AllSnapshotTypes { - p := filepath.Join(dir, SegmentFileName(f.From, f.To, t)) + for _, t := range snap.AllSnapshotTypes { + p := filepath.Join(dir, snap.SegmentFileName(f.From, f.To, t)) if _, err := os.Stat(p); err != nil { if errors.Is(err, os.ErrNotExist) { continue MainLoop @@ -919,7 +791,7 @@ MainLoop: } // noOverlaps - keep largest ranges and avoid overlap -func noOverlaps(in []FileInfo) (res []FileInfo) { +func noOverlaps(in []snap.FileInfo) (res []snap.FileInfo) { for i := range in { f := in[i] if f.From == f.To { @@ -943,13 +815,13 @@ func noOverlaps(in []FileInfo) (res []FileInfo) { return res } -func segments2(dir string) (res []FileInfo, err error) { - list, err := Segments(dir) +func segments2(dir string) (res []snap.FileInfo, err error) { + list, err := snap.Segments(dir) if err != nil { return nil, err } for _, f := range list { - if f.T != Headers { + if f.T != snap.Headers { continue } res = append(res, f) @@ -957,72 +829,10 @@ func segments2(dir string) (res []FileInfo, err error) { return noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, res))) } -func filterExt(in []FileInfo, expectExt string) (out []FileInfo) { - for _, f := range in { - if f.Ext != expectExt { // filter out only compressed files - continue - } - out = append(out, f) - } - return out -} -func filesWithExt(dir, expectExt string) ([]FileInfo, error) { - files, err := parseDir(dir) - if err != nil { - return nil, err - } - return filterExt(files, expectExt), nil -} - -func IsCorrectFileName(name string) bool { - parts := strings.Split(name, "-") - return len(parts) == 4 && parts[3] != "v1" -} - -func ParseFileName(dir, fileName string) (res FileInfo, err error) { - ext := filepath.Ext(fileName) - onlyName := fileName[:len(fileName)-len(ext)] - parts := strings.Split(onlyName, "-") - if len(parts) < 4 { - return res, fmt.Errorf("expected format: v1-001500-002000-bodies.seg got: %s. %w", fileName, ErrInvalidFileName) - } - if parts[0] != "v1" { - return res, fmt.Errorf("version: %s. %w", parts[0], ErrInvalidFileName) - } - from, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return - } - to, err := strconv.ParseUint(parts[2], 10, 64) - if err != nil { - return - } - var snapshotType Type - ft, ok := ParseFileType(parts[3]) - if !ok { - return res, fmt.Errorf("unexpected snapshot suffix: %s,%w", parts[2], ErrInvalidFileName) - } - switch ft { - case Headers: - snapshotType = Headers - case Bodies: - snapshotType = Bodies - case Transactions: - snapshotType = Transactions - default: - return res, fmt.Errorf("unexpected snapshot suffix: %s,%w", parts[2], ErrInvalidFileName) - } - return FileInfo{From: from * 1_000, To: to * 1_000, Path: filepath.Join(dir, fileName), T: snapshotType, Ext: ext}, nil -} - -const MERGE_THRESHOLD = 2 // don't trigger merge if have too small amount of partial segments -const DEFAULT_SEGMENT_SIZE = 500_000 -const MIN_SEGMENT_SIZE = 1_000 - func chooseSegmentEnd(from, to, blocksPerFile uint64) uint64 { next := (from/blocksPerFile + 1) * blocksPerFile to = min(next, to) - return to - (to % MIN_SEGMENT_SIZE) // round down to the nearest 1k + return to - (to % snap.MIN_SEGMENT_SIZE) // round down to the nearest 1k } func min(a, b uint64) uint64 { @@ -1126,7 +936,7 @@ type DBEventNotifier interface { func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint256.Int, tmpDir string, snapshots *RoSnapshots, rwSnapshotDir *dir.Rw, db kv.RoDB, workers int, downloader proto_downloader.DownloaderClient, lvl log.Lvl, notifier DBEventNotifier) error { log.Log(lvl, "[snapshots] Retire Blocks", "range", fmt.Sprintf("%dk-%dk", blockFrom/1000, blockTo/1000)) // in future we will do it in background - if err := DumpBlocks(ctx, blockFrom, blockTo, DEFAULT_SEGMENT_SIZE, tmpDir, snapshots.Dir(), db, workers, lvl); err != nil { + if err := DumpBlocks(ctx, blockFrom, blockTo, snap.DEFAULT_SEGMENT_SIZE, tmpDir, snapshots.Dir(), db, workers, lvl); err != nil { return fmt.Errorf("DumpBlocks: %w", err) } if err := snapshots.Reopen(); err != nil { @@ -1149,14 +959,14 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return err } // start seed large .seg of large size - req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(AllSnapshotTypes))} + req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(snap.AllSnapshotTypes))} for _, r := range ranges { - if r.to-r.from != DEFAULT_SEGMENT_SIZE { + if r.to-r.from != snap.DEFAULT_SEGMENT_SIZE { continue } - for _, t := range AllSnapshotTypes { + for _, t := range snap.AllSnapshotTypes { req.Items = append(req.Items, &proto_downloader.DownloadItem{ - Path: SegmentFileName(r.from, r.to, t), + Path: snap.SegmentFileName(r.from, r.to, t), }) } } @@ -1180,17 +990,17 @@ func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, t return nil } func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapshotDir string, chainDB kv.RoDB, workers int, lvl log.Lvl) error { - segmentFile := filepath.Join(snapshotDir, SegmentFileName(blockFrom, blockTo, Transactions)) + segmentFile := filepath.Join(snapshotDir, snap.SegmentFileName(blockFrom, blockTo, snap.Transactions)) if _, err := DumpTxs(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpTxs: %w", err) } - segmentFile = filepath.Join(snapshotDir, SegmentFileName(blockFrom, blockTo, Bodies)) + segmentFile = filepath.Join(snapshotDir, snap.SegmentFileName(blockFrom, blockTo, snap.Bodies)) if err := DumpBodies(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpBodies: %w", err) } - segmentFile = filepath.Join(snapshotDir, SegmentFileName(blockFrom, blockTo, Headers)) + segmentFile = filepath.Join(snapshotDir, snap.SegmentFileName(blockFrom, blockTo, snap.Headers)) if err := DumpHeaders(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpHeaders: %w", err) } @@ -1344,7 +1154,7 @@ func DumpTxs(ctx context.Context, db kv.RoDB, segmentFile, tmpDir string, blockF var m runtime.MemStats runtime.ReadMemStats(&m) log.Log(lvl, "[snapshots] Dumping txs", "block num", blockNum, - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys), + "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys), ) default: } @@ -1412,7 +1222,7 @@ func DumpHeaders(ctx context.Context, db kv.RoDB, segmentFilePath, tmpDir string var m runtime.MemStats runtime.ReadMemStats(&m) log.Log(lvl, "[snapshots] Dumping headers", "block num", blockNum, - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys), + "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys), ) default: } @@ -1467,7 +1277,7 @@ func DumpBodies(ctx context.Context, db kv.RoDB, segmentFilePath, tmpDir string, var m runtime.MemStats runtime.ReadMemStats(&m) log.Log(lvl, "[snapshots] Wrote into file", "block num", blockNum, - "alloc", common.StorageSize(m.Alloc), "sys", common.StorageSize(m.Sys), + "alloc", common2.ByteCount(m.Alloc), "sys", common2.ByteCount(m.Sys), ) default: } @@ -1493,7 +1303,7 @@ func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockT var expectedCount, firstTxID uint64 firstBlockNum := blockFrom - bodySegmentPath := filepath.Join(snapshotDir.Path, SegmentFileName(blockFrom, blockTo, Bodies)) + bodySegmentPath := filepath.Join(snapshotDir.Path, snap.SegmentFileName(blockFrom, blockTo, snap.Bodies)) bodiesSegment, err := compress.NewDecompressor(bodySegmentPath) if err != nil { return err @@ -1509,7 +1319,7 @@ func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockT } firstTxID = firstBody.BaseTxId - bodyIdxPath := filepath.Join(snapshotDir.Path, IdxFileName(blockFrom, blockTo, Bodies.String())) + bodyIdxPath := filepath.Join(snapshotDir.Path, snap.IdxFileName(blockFrom, blockTo, snap.Bodies.String())) idx, err := recsplit.OpenIndex(bodyIdxPath) if err != nil { return err @@ -1529,7 +1339,7 @@ func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockT idx.Close() } - segmentFilePath := filepath.Join(snapshotDir.Path, SegmentFileName(blockFrom, blockTo, Transactions)) + segmentFilePath := filepath.Join(snapshotDir.Path, snap.SegmentFileName(blockFrom, blockTo, snap.Transactions)) d, err := compress.NewDecompressor(segmentFilePath) if err != nil { return err @@ -1542,7 +1352,7 @@ func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockT BucketSize: 2000, LeafSize: 8, TmpDir: tmpDir, - IndexFile: filepath.Join(snapshotDir.Path, IdxFileName(blockFrom, blockTo, Transactions.String())), + IndexFile: filepath.Join(snapshotDir.Path, snap.IdxFileName(blockFrom, blockTo, snap.Transactions.String())), BaseDataID: firstTxID, }) if err != nil { @@ -1554,7 +1364,7 @@ func TransactionsIdx(ctx context.Context, chainID uint256.Int, blockFrom, blockT BucketSize: 2000, LeafSize: 8, TmpDir: tmpDir, - IndexFile: filepath.Join(snapshotDir.Path, IdxFileName(blockFrom, blockTo, Transactions2Block.String())), + IndexFile: filepath.Join(snapshotDir.Path, snap.IdxFileName(blockFrom, blockTo, snap.Transactions2Block.String())), BaseDataID: firstBlockNum, }) if err != nil { @@ -1834,7 +1644,7 @@ func (r mergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []mergeRange) { for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { sn := snapshots.Headers.segments[i] - if sn.To-sn.From >= DEFAULT_SEGMENT_SIZE { // is complete .seg + if sn.To-sn.From >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg continue } @@ -1894,7 +1704,7 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges return err } { - segFilePath := filepath.Join(snapshotDir.Path, SegmentFileName(r.from, r.to, Bodies)) + segFilePath := filepath.Join(snapshotDir.Path, snap.SegmentFileName(r.from, r.to, snap.Bodies)) if err := m.merge(ctx, toMergeBodies, segFilePath, logEvery); err != nil { return fmt.Errorf("mergeByAppendSegments: %w", err) } @@ -1906,7 +1716,7 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges } { - segFilePath := filepath.Join(snapshotDir.Path, SegmentFileName(r.from, r.to, Headers)) + segFilePath := filepath.Join(snapshotDir.Path, snap.SegmentFileName(r.from, r.to, snap.Headers)) if err := m.merge(ctx, toMergeHeaders, segFilePath, logEvery); err != nil { return fmt.Errorf("mergeByAppendSegments: %w", err) } @@ -1918,7 +1728,7 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges } { - segFilePath := filepath.Join(snapshotDir.Path, SegmentFileName(r.from, r.to, Transactions)) + segFilePath := filepath.Join(snapshotDir.Path, snap.SegmentFileName(r.from, r.to, snap.Transactions)) if err := m.merge(ctx, toMergeTxs, segFilePath, logEvery); err != nil { return fmt.Errorf("mergeByAppendSegments: %w", err) } @@ -2008,12 +1818,12 @@ func (m *Merger) removeOldFiles(toDel []string, snapshotsDir *dir.Rw) error { ext := filepath.Ext(f) withoutExt := f[:len(f)-len(ext)] _ = os.Remove(withoutExt + ".idx") - if strings.HasSuffix(withoutExt, Transactions.String()) { + if strings.HasSuffix(withoutExt, snap.Transactions.String()) { _ = os.Remove(withoutExt + "-to-block.idx") _ = os.Remove(withoutExt + "-id.idx") } } - tmpFiles, err := TmpFiles(snapshotsDir.Path) + tmpFiles, err := snap.TmpFiles(snapshotsDir.Path) if err != nil { return err } @@ -2030,11 +1840,11 @@ func assertAllSegments(blocks []*BlocksSnapshot, root string) { wg.Add(1) go func(sn *BlocksSnapshot) { defer wg.Done() - f := filepath.Join(root, SegmentFileName(sn.From, sn.To, Headers)) + f := filepath.Join(root, snap.SegmentFileName(sn.From, sn.To, snap.Headers)) assertSegment(f) - f = filepath.Join(root, SegmentFileName(sn.From, sn.To, Bodies)) + f = filepath.Join(root, snap.SegmentFileName(sn.From, sn.To, snap.Bodies)) assertSegment(f) - f = filepath.Join(root, SegmentFileName(sn.From, sn.To, Transactions)) + f = filepath.Join(root, snap.SegmentFileName(sn.From, sn.To, snap.Transactions)) assertSegment(f) fmt.Printf("done:%s\n", f) }(sn) diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index 5f6e8122fc8..2929f898c5d 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -13,13 +13,14 @@ import ( "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/params/networkname" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" ) -func createTestSegmentFile(t *testing.T, from, to uint64, name Type, dir string) { - c, err := compress.NewCompressor(context.Background(), "test", filepath.Join(dir, SegmentFileName(from, to, name)), dir, 100, 1, log.LvlDebug) +func createTestSegmentFile(t *testing.T, from, to uint64, name snap.Type, dir string) { + c, err := compress.NewCompressor(context.Background(), "test", filepath.Join(dir, snap.SegmentFileName(from, to, name)), dir, 100, 1, log.LvlDebug) require.NoError(t, err) defer c.Close() err = c.AddWord([]byte{1}) @@ -30,7 +31,7 @@ func createTestSegmentFile(t *testing.T, from, to uint64, name Type, dir string) KeyCount: 1, BucketSize: 10, TmpDir: dir, - IndexFile: filepath.Join(dir, IdxFileName(from, to, name.String())), + IndexFile: filepath.Join(dir, snap.IdxFileName(from, to, name.String())), LeafSize: 8, }) require.NoError(t, err) @@ -39,12 +40,12 @@ func createTestSegmentFile(t *testing.T, from, to uint64, name Type, dir string) require.NoError(t, err) err = idx.Build() require.NoError(t, err) - if name == Transactions { + if name == snap.Transactions { idx, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{ KeyCount: 1, BucketSize: 10, TmpDir: dir, - IndexFile: filepath.Join(dir, IdxFileName(from, to, Transactions2Block.String())), + IndexFile: filepath.Join(dir, snap.IdxFileName(from, to, snap.Transactions2Block.String())), LeafSize: 8, }) require.NoError(t, err) @@ -59,7 +60,7 @@ func createTestSegmentFile(t *testing.T, from, to uint64, name Type, dir string) func TestMergeSnapshots(t *testing.T) { dir, require := t.TempDir(), require.New(t) createFile := func(from, to uint64) { - for _, snT := range AllSnapshotTypes { + for _, snT := range snap.AllSnapshotTypes { createTestSegmentFile(t, from, to, snT, dir) } } @@ -82,7 +83,7 @@ func TestMergeSnapshots(t *testing.T) { require.NoError(err) } - expectedFileName := SegmentFileName(500_000, 1_000_000, Transactions) + expectedFileName := snap.SegmentFileName(500_000, 1_000_000, snap.Transactions) d, err := compress.NewDecompressor(filepath.Join(dir, expectedFileName)) require.NoError(err) defer d.Close() @@ -97,7 +98,7 @@ func TestMergeSnapshots(t *testing.T) { require.NoError(err) } - expectedFileName = SegmentFileName(1_100_000, 1_200_000, Transactions) + expectedFileName = snap.SegmentFileName(1_100_000, 1_200_000, snap.Transactions) d, err = compress.NewDecompressor(filepath.Join(dir, expectedFileName)) require.NoError(err) defer d.Close() @@ -129,7 +130,7 @@ func TestOpenAllSnapshot(t *testing.T) { chainSnapshotCfg := snapshothashes.KnownConfig(networkname.MainnetChainName) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 cfg := ethconfig.Snapshot{Enabled: true} - createFile := func(from, to uint64, name Type) { createTestSegmentFile(t, from, to, name, dir) } + createFile := func(from, to uint64, name snap.Type) { createTestSegmentFile(t, from, to, name, dir) } s := NewRoSnapshots(cfg, dir) defer s.Close() err := s.Reopen() @@ -137,23 +138,23 @@ func TestOpenAllSnapshot(t *testing.T) { require.Equal(0, len(s.Headers.segments)) s.Close() - createFile(500_000, 1_000_000, Bodies) + createFile(500_000, 1_000_000, snap.Bodies) s = NewRoSnapshots(cfg, dir) defer s.Close() require.Equal(0, len(s.Bodies.segments)) //because, no headers and transactions snapshot files are created s.Close() - createFile(500_000, 1_000_000, Headers) - createFile(500_000, 1_000_000, Transactions) + createFile(500_000, 1_000_000, snap.Headers) + createFile(500_000, 1_000_000, snap.Transactions) s = NewRoSnapshots(cfg, dir) err = s.Reopen() require.Error(err) require.Equal(0, len(s.Headers.segments)) //because, no gaps are allowed (expect snapshots from block 0) s.Close() - createFile(0, 500_000, Bodies) - createFile(0, 500_000, Headers) - createFile(0, 500_000, Transactions) + createFile(0, 500_000, snap.Bodies) + createFile(0, 500_000, snap.Headers) + createFile(0, 500_000, snap.Transactions) s = NewRoSnapshots(cfg, dir) defer s.Close() @@ -190,9 +191,9 @@ func TestOpenAllSnapshot(t *testing.T) { defer s.Close() require.Equal(2, len(s.Headers.segments)) - createFile(500_000, 900_000, Headers) - createFile(500_000, 900_000, Bodies) - createFile(500_000, 900_000, Transactions) + createFile(500_000, 900_000, snap.Headers) + createFile(500_000, 900_000, snap.Bodies) + createFile(500_000, 900_000, snap.Transactions) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 s = NewRoSnapshots(cfg, dir) defer s.Close() @@ -217,24 +218,24 @@ func TestParseCompressedFileName(t *testing.T) { require.NoError(err) return s.Name() } - _, err := ParseFileName("", stat("a")) + _, err := snap.ParseFileName("", stat("a")) require.Error(err) - _, err = ParseFileName("", stat("1-a")) + _, err = snap.ParseFileName("", stat("1-a")) require.Error(err) - _, err = ParseFileName("", stat("1-2-a")) + _, err = snap.ParseFileName("", stat("1-2-a")) require.Error(err) - _, err = ParseFileName("", stat("1-2-bodies.info")) + _, err = snap.ParseFileName("", stat("1-2-bodies.info")) require.Error(err) - _, err = ParseFileName("", stat("1-2-bodies.seg")) + _, err = snap.ParseFileName("", stat("1-2-bodies.seg")) require.Error(err) - _, err = ParseFileName("", stat("v2-1-2-bodies.seg")) + _, err = snap.ParseFileName("", stat("v2-1-2-bodies.seg")) require.Error(err) - _, err = ParseFileName("", stat("v0-1-2-bodies.seg")) + _, err = snap.ParseFileName("", stat("v0-1-2-bodies.seg")) require.Error(err) - f, err := ParseFileName("", stat("v1-1-2-bodies.seg")) + f, err := snap.ParseFileName("", stat("v1-1-2-bodies.seg")) require.NoError(err) - require.Equal(f.T, Bodies) + require.Equal(f.T, snap.Bodies) require.Equal(1_000, int(f.From)) require.Equal(2_000, int(f.To)) } diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go new file mode 100644 index 00000000000..0e8f90a7f53 --- /dev/null +++ b/turbo/snapshotsync/snap/files.go @@ -0,0 +1,202 @@ +package snap + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "sort" + "strconv" + "strings" +) + +type Type int + +const ( + Headers Type = iota + Bodies + Transactions + NumberOfTypes +) + +func (ft Type) String() string { + switch ft { + case Headers: + return "headers" + case Bodies: + return "bodies" + case Transactions: + return "transactions" + default: + panic(fmt.Sprintf("unknown file type: %d", ft)) + } +} + +func ParseFileType(s string) (Type, bool) { + switch s { + case "headers": + return Headers, true + case "bodies": + return Bodies, true + case "transactions": + return Transactions, true + default: + return NumberOfTypes, false + } +} + +type IdxType string + +const ( + Transactions2Block IdxType = "transactions-to-block" +) + +func (it IdxType) String() string { return string(it) } + +var AllSnapshotTypes = []Type{Headers, Bodies, Transactions} + +var ( + ErrInvalidFileName = fmt.Errorf("invalid compressed file name") +) + +func FileName(from, to uint64, fileType string) string { + return fmt.Sprintf("v1-%06d-%06d-%s", from/1_000, to/1_000, fileType) +} +func SegmentFileName(from, to uint64, t Type) string { return FileName(from, to, t.String()) + ".seg" } +func DatFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".dat" } +func IdxFileName(from, to uint64, fType string) string { return FileName(from, to, fType) + ".idx" } + +func FilterExt(in []FileInfo, expectExt string) (out []FileInfo) { + for _, f := range in { + if f.Ext != expectExt { // filter out only compressed files + continue + } + out = append(out, f) + } + return out +} +func FilesWithExt(dir, expectExt string) ([]FileInfo, error) { + files, err := ParseDir(dir) + if err != nil { + return nil, err + } + return FilterExt(files, expectExt), nil +} + +func IsCorrectFileName(name string) bool { + parts := strings.Split(name, "-") + return len(parts) == 4 && parts[3] != "v1" +} + +func ParseFileName(dir, fileName string) (res FileInfo, err error) { + ext := filepath.Ext(fileName) + onlyName := fileName[:len(fileName)-len(ext)] + parts := strings.Split(onlyName, "-") + if len(parts) < 4 { + return res, fmt.Errorf("expected format: v1-001500-002000-bodies.seg got: %s. %w", fileName, ErrInvalidFileName) + } + if parts[0] != "v1" { + return res, fmt.Errorf("version: %s. %w", parts[0], ErrInvalidFileName) + } + from, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return + } + to, err := strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return + } + var snapshotType Type + ft, ok := ParseFileType(parts[3]) + if !ok { + return res, fmt.Errorf("unexpected snapshot suffix: %s,%w", parts[2], ErrInvalidFileName) + } + switch ft { + case Headers: + snapshotType = Headers + case Bodies: + snapshotType = Bodies + case Transactions: + snapshotType = Transactions + default: + return res, fmt.Errorf("unexpected snapshot suffix: %s,%w", parts[2], ErrInvalidFileName) + } + return FileInfo{From: from * 1_000, To: to * 1_000, Path: filepath.Join(dir, fileName), T: snapshotType, Ext: ext}, nil +} + +const MERGE_THRESHOLD = 2 // don't trigger merge if have too small amount of partial segments +const DEFAULT_SEGMENT_SIZE = 500_000 +const MIN_SEGMENT_SIZE = 1_000 + +// FileInfo - parsed file metadata +type FileInfo struct { + _ fs.FileInfo + Version uint8 + From, To uint64 + Path, Ext string + T Type +} + +func IdxFiles(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".idx") } +func Segments(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".seg") } +func TmpFiles(dir string) (res []string, err error) { + files, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + for _, f := range files { + if f.IsDir() || len(f.Name()) < 3 { + continue + } + if filepath.Ext(f.Name()) != ".tmp" { + continue + } + res = append(res, filepath.Join(dir, f.Name())) + } + return res, nil +} + +var ErrSnapshotMissed = fmt.Errorf("snapshot missed") + +func ParseDir(dir string) (res []FileInfo, err error) { + files, err := os.ReadDir(dir) + if err != nil { + return nil, err + } + for _, f := range files { + fileInfo, err := f.Info() + if err != nil { + return nil, err + } + if f.IsDir() || fileInfo.Size() == 0 || len(f.Name()) < 3 { + continue + } + + meta, err := ParseFileName(dir, f.Name()) + if err != nil { + if errors.Is(err, ErrInvalidFileName) { + continue + } + return nil, err + } + res = append(res, meta) + } + sort.Slice(res, func(i, j int) bool { + if res[i].Version != res[j].Version { + return res[i].Version < res[j].Version + } + if res[i].From != res[j].From { + return res[i].From < res[j].From + } + if res[i].To != res[j].To { + return res[i].To < res[j].To + } + if res[i].T != res[j].T { + return res[i].T < res[j].T + } + return res[i].Ext < res[j].Ext + }) + + return res, nil +} diff --git a/turbo/snapshotsync/snapshotsynccli/flags.go b/turbo/snapshotsync/snap/flags.go similarity index 97% rename from turbo/snapshotsync/snapshotsynccli/flags.go rename to turbo/snapshotsync/snap/flags.go index eac39949f70..e0f23e9078c 100644 --- a/turbo/snapshotsync/snapshotsynccli/flags.go +++ b/turbo/snapshotsync/snap/flags.go @@ -1,4 +1,4 @@ -package snapshotsynccli +package snap import ( "fmt" diff --git a/turbo/stages/bodydownload/body_algos.go b/turbo/stages/bodydownload/body_algos.go index f8b4cac250f..30a1caf523f 100644 --- a/turbo/stages/bodydownload/body_algos.go +++ b/turbo/stages/bodydownload/body_algos.go @@ -133,12 +133,13 @@ func (bd *BodyDownload) RequestMoreBodies(tx kv.RwTx, blockReader interfaces.Ful bd.deliveriesB[blockNum-bd.requestedLow] = block.RawBody() // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) - var td *big.Int if parent, err := rawdb.ReadTd(tx, block.ParentHash(), block.NumberU64()-1); err != nil { log.Error("Failed to ReadTd", "err", err, "number", block.NumberU64()-1, "hash", block.ParentHash()) } else if parent != nil { - td = new(big.Int).Add(block.Difficulty(), parent) - go blockPropagator(context.Background(), block, td) + if block.Difficulty().Sign() != 0 { // don't propagate proof-of-stake blocks + td := new(big.Int).Add(block.Difficulty(), parent) + go blockPropagator(context.Background(), block, td) + } } else { log.Error("Propagating dangling block", "number", block.Number(), "hash", hash) } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 9deab0df1b6..68bbc557d9d 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -131,7 +131,7 @@ func (hd *HeaderDownload) childParentValid(child, parent *types.Header) (bool, P } // SingleHeaderAsSegment converts message containing 1 header into one singleton chain segment -func (hd *HeaderDownload) SingleHeaderAsSegment(headerRaw []byte, header *types.Header) ([]ChainSegment, Penalty, error) { +func (hd *HeaderDownload) SingleHeaderAsSegment(headerRaw []byte, header *types.Header, penalizePoSBlocks bool) ([]ChainSegment, Penalty, error) { hd.lock.RLock() defer hd.lock.RUnlock() @@ -139,6 +139,9 @@ func (hd *HeaderDownload) SingleHeaderAsSegment(headerRaw []byte, header *types. if _, bad := hd.badHeaders[headerHash]; bad { return nil, BadBlockPenalty, nil } + if penalizePoSBlocks && header.Difficulty.Sign() == 0 { + return nil, NewBlockGossipAfterMergePenalty, nil + } h := ChainSegmentHeader{ Header: header, HeaderRaw: headerRaw, @@ -234,12 +237,12 @@ func (hd *HeaderDownload) removeUpwards(toRemove []*Link) { func (hd *HeaderDownload) MarkPreverified(link *Link) { // Go through all parent links that are not preverified and mark them too - for link != nil && !link.persisted { - if !link.verified { + for link != nil && !link.verified { + if !link.persisted { link.verified = true hd.moveLinkToQueue(link, InsertQueueID) + link = hd.links[link.header.ParentHash] } - link = hd.links[link.header.ParentHash] } } @@ -272,13 +275,11 @@ func (hd *HeaderDownload) extendUp(segment ChainSegment, attachmentLink *Link) { prevLink := attachmentLink for i := len(segment) - 1; i >= 0; i-- { link := hd.addHeaderAsLink(segment[i], false /* persisted */) - if prevLink.persisted { - // If we are attching to already persisted link, schedule for insertion (persistence) - if link.verified { - hd.moveLinkToQueue(link, InsertQueueID) - } else { - hd.moveLinkToQueue(link, VerifyQueueID) - } + // If we are attching to already persisted link, schedule for insertion (persistence) + if link.verified { + hd.moveLinkToQueue(link, InsertQueueID) + } else { + hd.moveLinkToQueue(link, VerifyQueueID) } prevLink.next = append(prevLink.next, link) prevLink = link @@ -362,12 +363,10 @@ func (hd *HeaderDownload) connect(segment ChainSegment, attachmentLink *Link, an for i := len(segment) - 1; i >= 0; i-- { link := hd.addHeaderAsLink(segment[i], false /* persisted */) // If we attach to already persisted link, mark this one for insertion - if prevLink.persisted { - if link.verified { - hd.moveLinkToQueue(link, InsertQueueID) - } else { - hd.moveLinkToQueue(link, VerifyQueueID) - } + if link.verified { + hd.moveLinkToQueue(link, InsertQueueID) + } else { + hd.moveLinkToQueue(link, VerifyQueueID) } prevLink.next = append(prevLink.next, link) prevLink = link @@ -757,6 +756,18 @@ func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, terminalTotalDifficul checkInsert := true + if hd.trace { + var iStrs []string + for i := 0; i < hd.insertQueue.Len(); i++ { + iStrs = append(iStrs, fmt.Sprintf("%d=>%x", hd.insertQueue[i].blockHeight, hd.insertQueue[i].hash)) + } + var vStrs []string + for i := 0; i < hd.verifyQueue.Len(); i++ { + vStrs = append(vStrs, fmt.Sprintf("%d=>%x", hd.verifyQueue[i].blockHeight, hd.verifyQueue[i].hash)) + } + log.Info("InsertHeaders", "highestInDb", hd.highestInDb, "insertQueue", strings.Join(iStrs, ", "), "verifyQueue", strings.Join(vStrs, ", ")) + } + for checkInsert { checkInsert = false // Check what we can insert without verification @@ -795,6 +806,9 @@ func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, terminalTotalDifficul } if link.blockHeight > hd.highestInDb { + if hd.trace { + log.Info("Highest in DB change", "number", link.blockHeight, "hash", link.hash) + } hd.highestInDb = link.blockHeight } link.persisted = true @@ -1356,7 +1370,7 @@ func (hd *HeaderDownload) AddMinedHeader(header *types.Header) error { if err := header.EncodeRLP(buf); err != nil { return err } - segments, _, err := hd.SingleHeaderAsSegment(buf.Bytes(), header) + segments, _, err := hd.SingleHeaderAsSegment(buf.Bytes(), header, false /* penalizePoSBlocks */) if err != nil { return err } diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index d2da1eebfac..c76b65eda6b 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -180,6 +180,7 @@ const ( TooFarFuturePenalty TooFarPastPenalty AbandonedAnchorPenalty + NewBlockGossipAfterMergePenalty ) type PeerPenalty struct { @@ -278,6 +279,7 @@ type HeaderDownload struct { requestChaining bool // Whether the downloader is allowed to issue more requests when previous responses created or moved an anchor fetchingNew bool // Set when the stage that is actively fetching the headers is in progress topSeenHeightPoW uint64 + trace bool consensusHeaderReader consensus.ChainHeaderReader headerReader interfaces.HeaderReader @@ -355,6 +357,8 @@ func (p Penalty) String() string { return "TooFarFuture" case TooFarPastPenalty: return "TooFarPast" + case NewBlockGossipAfterMergePenalty: + return "NewBlockGossipAfterMerge" default: return fmt.Sprintf("Unknown(%d)", p) } diff --git a/turbo/stages/headerdownload/header_test.go b/turbo/stages/headerdownload/header_test.go index 8c16ebac71a..59aec92e160 100644 --- a/turbo/stages/headerdownload/header_test.go +++ b/turbo/stages/headerdownload/header_test.go @@ -166,7 +166,7 @@ func TestSingleHeaderAsSegment(t *testing.T) { var h types.Header h.Number = big.NewInt(5) headerRaw, _ := rlp.EncodeToBytes(h) - if chainSegments, penalty, err := hd.SingleHeaderAsSegment(headerRaw, &h); err == nil { + if chainSegments, penalty, err := hd.SingleHeaderAsSegment(headerRaw, &h, false /* penalizePoSBlocks */); err == nil { if penalty != NoPenalty { t.Errorf("unexpected penalty: %s", penalty) } @@ -185,7 +185,7 @@ func TestSingleHeaderAsSegment(t *testing.T) { // Same header with a bad hash hd.ReportBadHeader(h.Hash()) - if chainSegments, penalty, err := hd.SingleHeaderAsSegment(headerRaw, &h); err == nil { + if chainSegments, penalty, err := hd.SingleHeaderAsSegment(headerRaw, &h, false /* penalizePoSBlocks */); err == nil { if penalty != BadBlockPenalty { t.Errorf("expected BadBlock penalty, got %s", penalty) }