diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000000..5d5d221eb385 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,31 @@ +# This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time. +# +# You can adjust the behavior by modifying this file. +# For more information, see: +# https://github.com/actions/stale +name: Mark stale issues and pull requests + +on: + schedule: + - cron: '0 0 * * *' + +jobs: + stale: + + runs-on: ubuntu-latest + permissions: + issues: write + pull-requests: write + + steps: + - uses: actions/stale@v5 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: 'This issue is stale because it has been open 14 days with no activity. Remove stale label or comment or this will be closed in 14 days.' + stale-pr-message: 'This PR is stale because it has been open 21 days with no activity. Remove stale label or comment or this will be closed in 14 days.' + close-issue-message: 'This issue was closed because it has been stalled for 28 days with no activity.' + close-pr-message: 'This PR was closed because it has been stalled for 35 days with no activity.' + days-before-issue-stale: 14 + days-before-pr-stale: 21 + days-before-issue-close: 14 + days-before-pr-close: 14 diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 2f087af787cc..87770f49ab79 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -1107,6 +1107,8 @@ func (c *Bor) CommitStates( lastStateID++ } + log.Info("StateSyncData", "Gas", totalGas, "Block-number", number, "LastStateID", lastStateID, "TotalRecords", len(eventRecords)) + return stateSyncs, nil } diff --git a/consensus/bor/heimdall.go b/consensus/bor/heimdall.go index c97a3cda11b6..217de13fe947 100644 --- a/consensus/bor/heimdall.go +++ b/consensus/bor/heimdall.go @@ -2,6 +2,7 @@ package bor import ( "github.com/ethereum/go-ethereum/consensus/bor/clerk" + "github.com/ethereum/go-ethereum/consensus/bor/heimdall/checkpoint" "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span" ) @@ -9,5 +10,6 @@ import ( type IHeimdallClient interface { StateSyncEvents(fromID uint64, to int64) ([]*clerk.EventRecordWithTime, error) Span(spanID uint64) (*span.HeimdallSpan, error) + FetchLatestCheckpoint() (*checkpoint.Checkpoint, error) Close() } diff --git a/consensus/bor/heimdall/checkpoint/checkpoint.go b/consensus/bor/heimdall/checkpoint/checkpoint.go new file mode 100644 index 000000000000..77569293adb9 --- /dev/null +++ b/consensus/bor/heimdall/checkpoint/checkpoint.go @@ -0,0 +1,22 @@ +package checkpoint + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +// Checkpoint defines a response object type of bor checkpoint +type Checkpoint struct { + Proposer common.Address `json:"proposer"` + StartBlock *big.Int `json:"start_block"` + EndBlock *big.Int `json:"end_block"` + RootHash common.Hash `json:"root_hash"` + BorChainID string `json:"bor_chain_id"` + Timestamp uint64 `json:"timestamp"` +} + +type CheckpointResponse struct { + Height string `json:"height"` + Result Checkpoint `json:"result"` +} diff --git a/consensus/bor/heimdall/client.go b/consensus/bor/heimdall/client.go index c5e1dfb4cff3..2d42cfc31b21 100644 --- a/consensus/bor/heimdall/client.go +++ b/consensus/bor/heimdall/client.go @@ -12,6 +12,7 @@ import ( "time" "github.com/ethereum/go-ethereum/consensus/bor/clerk" + "github.com/ethereum/go-ethereum/consensus/bor/heimdall/checkpoint" "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span" "github.com/ethereum/go-ethereum/log" ) @@ -53,6 +54,7 @@ func NewHeimdallClient(urlString string) *HeimdallClient { const ( fetchStateSyncEventsFormat = "from-id=%d&to-time=%d&limit=%d" fetchStateSyncEventsPath = "clerk/event-record/list" + fetchLatestCheckpoint = "/checkpoints/latest" fetchSpanFormat = "bor/span/%d" ) @@ -108,6 +110,21 @@ func (h *HeimdallClient) Span(spanID uint64) (*span.HeimdallSpan, error) { return &response.Result, nil } +// FetchLatestCheckpoint fetches the latest bor submitted checkpoint from heimdall +func (h *HeimdallClient) FetchLatestCheckpoint() (*checkpoint.Checkpoint, error) { + url, err := latestCheckpointURL(h.urlString) + if err != nil { + return nil, err + } + + response, err := FetchWithRetry[checkpoint.CheckpointResponse](h.client, url, h.closeCh) + if err != nil { + return nil, err + } + + return &response.Result, nil +} + // FetchWithRetry returns data from heimdall with retry func FetchWithRetry[T any](client http.Client, url *url.URL, closeCh chan struct{}) (*T, error) { // attempt counter @@ -171,6 +188,10 @@ func stateSyncURL(urlString string, fromID uint64, to int64) (*url.URL, error) { return makeURL(urlString, fetchStateSyncEventsPath, queryParams) } +func latestCheckpointURL(urlString string) (*url.URL, error) { + return makeURL(urlString, fetchLatestCheckpoint, "") +} + func makeURL(urlString, rawPath, rawQuery string) (*url.URL, error) { u, err := url.Parse(urlString) if err != nil { diff --git a/eth/api_backend.go b/eth/api_backend.go index 6577ac1e1af4..c33f3cf6f282 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -359,3 +359,11 @@ func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, re func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { return b.eth.stateAtTransaction(block, txIndex, reexec) } + +func (b *EthAPIBackend) GetCheckpointWhitelist() map[uint64]common.Hash { + return b.eth.Downloader().ChainValidator.GetCheckpointWhitelist() +} + +func (b *EthAPIBackend) PurgeCheckpointWhitelist() { + b.eth.Downloader().ChainValidator.PurgeCheckpointWhitelist() +} diff --git a/eth/backend.go b/eth/backend.go index 03419cde450c..0b8a956cfa40 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -101,6 +101,8 @@ type Ethereum struct { lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase) + closeCh chan struct{} // Channel to signal the background processes to exit + shutdownTracker *shutdowncheck.ShutdownTracker // Tracks if and when the node has shutdown ungracefully } @@ -163,6 +165,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { bloomRequests: make(chan chan *bloombits.Retrieval), bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms), p2pServer: stack.Server(), + closeCh: make(chan struct{}), shutdownTracker: shutdowncheck.NewShutdownTracker(chainDb), } @@ -254,6 +257,7 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { BloomCache: uint64(cacheLimit), EventMux: eth.eventMux, Checkpoint: checkpoint, + EthAPI: ethAPI, PeerRequiredBlocks: config.PeerRequiredBlocks, }); err != nil { return nil, err @@ -608,9 +612,77 @@ func (s *Ethereum) Start() error { } maxPeers -= s.config.LightPeers } + // Start the networking layer and the light server if requested s.handler.Start(maxPeers) + go s.startCheckpointWhitelistService() + + return nil +} + +// StartCheckpointWhitelistService starts the goroutine to fetch checkpoints and update the +// checkpoint whitelist map. +func (s *Ethereum) startCheckpointWhitelistService() { + // a shortcut helps with tests and early exit + select { + case <-s.closeCh: + return + default: + } + + // first run the checkpoint whitelist + err := s.handleWhitelistCheckpoint() + if err != nil { + if errors.Is(err, ErrBorConsensusWithoutHeimdall) || errors.Is(err, ErrNotBorConsensus) { + return + } + + log.Warn("unable to whitelist checkpoint - first run", "err", err) + } + + ticker := time.NewTicker(100 * time.Second) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := s.handleWhitelistCheckpoint() + if err != nil { + log.Warn("unable to whitelist checkpoint", "err", err) + } + case <-s.closeCh: + return + } + } +} + +var ( + ErrNotBorConsensus = errors.New("not bor consensus was given") + ErrBorConsensusWithoutHeimdall = errors.New("bor consensus without heimdall") +) + +// handleWhitelistCheckpoint handles the checkpoint whitelist mechanism. +func (s *Ethereum) handleWhitelistCheckpoint() error { + ethHandler := (*ethHandler)(s.handler) + + bor, ok := ethHandler.chain.Engine().(*bor.Bor) + if !ok { + return ErrNotBorConsensus + } + + if bor.HeimdallClient == nil { + return ErrBorConsensusWithoutHeimdall + } + + endBlockNum, endBlockHash, err := ethHandler.fetchWhitelistCheckpoint(bor) + if err != nil { + return err + } + + // Update the checkpoint whitelist map. + ethHandler.downloader.ProcessCheckpoint(endBlockNum, endBlockHash) + return nil } @@ -626,6 +698,9 @@ func (s *Ethereum) Stop() error { s.bloomIndexer.Close() close(s.closeBloomHandler) + // Close all bg processes + close(s.closeCh) + // closing consensus engine first, as miner has deps on it s.engine.Close() s.txPool.Stop() diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 1e68746f97ed..1a9d815ccd3c 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -30,6 +30,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/downloader/whitelist" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" @@ -143,6 +144,8 @@ type Downloader struct { quitCh chan struct{} // Quit channel to signal termination quitLock sync.Mutex // Lock to prevent double closes + ChainValidator + // Testing hooks syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch @@ -150,6 +153,14 @@ type Downloader struct { chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) } +// interface for whitelist service +type ChainValidator interface { + IsValidChain(remoteHeader *types.Header, fetchHeadersByNumber func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) + ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash) + GetCheckpointWhitelist() map[uint64]common.Hash + PurgeCheckpointWhitelist() +} + // LightChain encapsulates functions required to synchronise a light chain. type LightChain interface { // HasHeader verifies a header's presence in the local chain. @@ -204,7 +215,8 @@ type BlockChain interface { } // New creates a new downloader to fetch hashes and blocks from remote peers. -func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { +//nolint: staticcheck +func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func(), whitelistService ChainValidator) *Downloader { if lightchain == nil { lightchain = chain } @@ -221,6 +233,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain Bl quitCh: make(chan struct{}), SnapSyncer: snap.NewSyncer(stateDb), stateSyncStart: make(chan *stateSync), + ChainValidator: whitelistService, } dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) @@ -332,9 +345,11 @@ func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, m case nil, errBusy, errCanceled: return err } + if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || - errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { + errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) || + errors.Is(err, whitelist.ErrCheckpointMismatch) { log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) if d.dropPeer == nil { // The dropPeer method is nil when `--copydb` is used for a local copy. @@ -345,10 +360,17 @@ func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, m } return err } + if errors.Is(err, ErrMergeTransition) { return err // This is an expected fault, don't keep printing it in a spin-loop } - log.Warn("Synchronisation failed, retrying", "err", err) + + if errors.Is(err, whitelist.ErrNoRemoteCheckoint) { + log.Warn("Doesn't have remote checkpoint yet", "peer", id, "err", err) + } + + log.Warn("Synchronisation failed, retrying", "peer", id, "err", err) + return err } @@ -764,12 +786,24 @@ func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, ui return int64(from), count, span - 1, uint64(max) } +// curried fetchHeadersByNumber +func (d *Downloader) getFetchHeadersByNumber(p *peerConnection) func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { + return func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { + return d.fetchHeadersByNumber(p, number, amount, skip, reverse) + } +} + // findAncestor tries to locate the common ancestor link of the local chain and // a remote peers blockchain. In the general case when our node was in sync and // on the correct chain, checking the top N links should already get us a match. // In the rare scenario when we ended up on a long reorganisation (i.e. none of // the head links match), we do a binary search to find the common ancestor. func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { + // Check the validity of chain to be downloaded + if _, err := d.IsValidChain(remoteHeader, d.getFetchHeadersByNumber(p)); err != nil { + return 0, err + } + // Figure out the valid ancestor range to prevent rewrite attacks var ( floor = int64(-1) @@ -1346,6 +1380,7 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { frequency = 1 } + // Although the received headers might be all valid, a legacy // PoW/PoA sync must not accept post-merge headers. Make sure // that any transition is rejected at this point. @@ -1353,13 +1388,16 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode rejected []*types.Header td *big.Int ) + if !beaconMode && ttd != nil { td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) if td == nil { // This should never really happen, but handle gracefully for now log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) + return fmt.Errorf("%w: parent TD missing", errInvalidChain) } + for i, header := range chunkHeaders { td = new(big.Int).Add(td, header.Difficulty) if td.Cmp(ttd) >= 0 { @@ -1373,10 +1411,12 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } else { chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] } + break } } } + if len(chunkHeaders) > 0 { if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { rollbackErr = err @@ -1385,12 +1425,15 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { rollback = chunkHeaders[0].Number.Uint64() } + log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) + return fmt.Errorf("%w: %v", errInvalidChain, err) } // All verifications passed, track all headers within the allowed limits if mode == SnapSync { head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() + if head-rollback > uint64(fsHeaderSafetyNet) { rollback = head - uint64(fsHeaderSafetyNet) } else { @@ -1398,14 +1441,26 @@ func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode } } } + + if len(rejected) != 0 { + // Merge threshold reached, stop importing, but don't roll back + rollback = 0 + + log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) + + return ErrMergeTransition + } + if len(rejected) != 0 { // Merge threshold reached, stop importing, but don't roll back rollback = 0 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) + return ErrMergeTransition } } + // Unless we're doing light chains, schedule the headers for associated content retrieval if mode == FullSync || mode == SnapSync { // If we've reached the allowed number of pending headers, stall a bit diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 6989252c11ac..37b07424dd30 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/downloader/whitelist" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/event" @@ -42,6 +43,8 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + + "github.com/stretchr/testify/assert" ) // downloadTester is a test simulator for mocking out local block chain. @@ -60,25 +63,35 @@ func newTester() *downloadTester { if err != nil { panic(err) } + db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false) if err != nil { panic(err) } + core.GenesisBlockForTesting(db, testAddress, big.NewInt(1000000000000000)) chain, err := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) if err != nil { panic(err) } + tester := &downloadTester{ freezer: freezer, chain: chain, peers: make(map[string]*downloadTesterPeer), } - tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, nil) + + //nolint: staticcheck + tester.downloader = New(0, db, new(event.TypeMux), tester.chain, nil, tester.dropPeer, nil, whitelist.NewService(10)) + return tester } +func (dl *downloadTester) setWhitelist(w ChainValidator) { + dl.downloader.ChainValidator = w +} + // terminate aborts any operations on the embedded downloader and releases all // held resources. func (dl *downloadTester) terminate() { @@ -155,7 +168,7 @@ func (dlp *downloadTesterPeer) Head() (common.Hash, *big.Int) { } func unmarshalRlpHeaders(rlpdata []rlp.RawValue) []*types.Header { - var headers = make([]*types.Header, len(rlpdata)) + headers := make([]*types.Header, len(rlpdata)) for i, data := range rlpdata { var h types.Header if err := rlp.DecodeBytes(data, &h); err != nil { @@ -620,6 +633,7 @@ func TestBoundedHeavyForkedSync66Full(t *testing.T) { func TestBoundedHeavyForkedSync66Snap(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH66, SnapSync) } + func TestBoundedHeavyForkedSync66Light(t *testing.T) { testBoundedHeavyForkedSync(t, eth.ETH66, LightSync) } @@ -711,7 +725,7 @@ func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { // Create peers of every type tester.newPeer("peer 66", eth.ETH66, chain.blocks[1:]) - //tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:) + // tester.newPeer("peer 65", eth.ETH67, chain.blocks[1:) // Synchronise with the requested peer and make sure all blocks were retrieved if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { @@ -916,6 +930,7 @@ func TestHighTDStarvationAttack66Full(t *testing.T) { func TestHighTDStarvationAttack66Snap(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH66, SnapSync) } + func TestHighTDStarvationAttack66Light(t *testing.T) { testHighTDStarvationAttack(t, eth.ETH66, LightSync) } @@ -1268,36 +1283,45 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { expected []int }{ // Remote is way higher. We should ask for the remote head and go backwards - {1500, 1000, + { + 1500, 1000, []int{1323, 1339, 1355, 1371, 1387, 1403, 1419, 1435, 1451, 1467, 1483, 1499}, }, - {15000, 13006, + { + 15000, 13006, []int{14823, 14839, 14855, 14871, 14887, 14903, 14919, 14935, 14951, 14967, 14983, 14999}, }, // Remote is pretty close to us. We don't have to fetch as many - {1200, 1150, + { + 1200, 1150, []int{1149, 1154, 1159, 1164, 1169, 1174, 1179, 1184, 1189, 1194, 1199}, }, // Remote is equal to us (so on a fork with higher td) // We should get the closest couple of ancestors - {1500, 1500, + { + 1500, 1500, []int{1497, 1499}, }, // We're higher than the remote! Odd - {1000, 1500, + { + 1000, 1500, []int{997, 999}, }, // Check some weird edgecases that it behaves somewhat rationally - {0, 1500, + { + 0, 1500, []int{0, 2}, }, - {6000000, 0, + { + 6000000, 0, []int{5999823, 5999839, 5999855, 5999871, 5999887, 5999903, 5999919, 5999935, 5999951, 5999967, 5999983, 5999999}, }, - {0, 0, + { + 0, 0, []int{0, 2}, }, } + reqs := func(from, count, span int) []int { var r []int num := from @@ -1307,32 +1331,39 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { } return r } + for i, tt := range testCases { - from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) - data := reqs(int(from), count, span) + i := i + tt := tt - if max != uint64(data[len(data)-1]) { - t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) - } - failed := false - if len(data) != len(tt.expected) { - failed = true - t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) - } else { - for j, n := range data { - if n != tt.expected[j] { - failed = true - break + t.Run("", func(t *testing.T) { + from, count, span, max := calculateRequestSpan(tt.remoteHeight, tt.localHeight) + data := reqs(int(from), count, span) + + if max != uint64(data[len(data)-1]) { + t.Errorf("test %d: wrong last value %d != %d", i, data[len(data)-1], max) + } + failed := false + if len(data) != len(tt.expected) { + failed = true + t.Errorf("test %d: length wrong, expected %d got %d", i, len(tt.expected), len(data)) + } else { + for j, n := range data { + if n != tt.expected[j] { + failed = true + break + } } } - } - if failed { - res := strings.Replace(fmt.Sprint(data), " ", ",", -1) - exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) - t.Logf("got: %v\n", res) - t.Logf("exp: %v\n", exp) - t.Errorf("test %d: wrong values", i) - } + + if failed { + res := strings.Replace(fmt.Sprint(data), " ", ",", -1) + exp := strings.Replace(fmt.Sprint(tt.expected), " ", ",", -1) + t.Logf("got: %v\n", res) + t.Logf("exp: %v\n", exp) + t.Errorf("test %d: wrong values", i) + } + }) } } @@ -1359,12 +1390,134 @@ func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { if mode == SnapSync || mode == LightSync { expect = errUnsyncedPeer } + if err := tester.sync("peer", nil, mode); !errors.Is(err, expect) { t.Fatalf("block sync error mismatch: have %v, want %v", err, expect) } + if mode == SnapSync || mode == LightSync { assertOwnChain(t, tester, 1) } else { assertOwnChain(t, tester, len(chain.blocks)) } } + +// whitelistFake is a mock for the chain validator service +type whitelistFake struct { + // count denotes the number of times the validate function was called + count int + + // validate is the dynamic function to be called while syncing + validate func(count int) (bool, error) +} + +// newWhitelistFake returns a new mock whitelist +func newWhitelistFake(validate func(count int) (bool, error)) *whitelistFake { + return &whitelistFake{0, validate} +} + +// IsValidChain is the mock function which the downloader will use to validate the chain +// to be received from a peer. +func (w *whitelistFake) IsValidChain(_ *types.Header, _ func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) { + defer func() { + w.count++ + }() + + return w.validate(w.count) +} + +func (w *whitelistFake) ProcessCheckpoint(_ uint64, _ common.Hash) {} + +func (w *whitelistFake) GetCheckpointWhitelist() map[uint64]common.Hash { + return nil +} + +func (w *whitelistFake) PurgeCheckpointWhitelist() {} + +// TestFakedSyncProgress66WhitelistMismatch tests if in case of whitelisted +// checkpoint mismatch with opposite peer, the sync should fail. +func TestFakedSyncProgress66WhitelistMismatch(t *testing.T) { + t.Parallel() + + protocol := uint(eth.ETH66) + mode := FullSync + + tester := newTester() + validate := func(count int) (bool, error) { + return false, whitelist.ErrCheckpointMismatch + } + tester.downloader.ChainValidator = newWhitelistFake(validate) + + defer tester.terminate() + + chainA := testChainForkLightA.blocks + tester.newPeer("light", protocol, chainA[1:]) + + // Synchronise with the peer and make sure all blocks were retrieved + if err := tester.sync("light", nil, mode); err == nil { + t.Fatal("succeeded attacker synchronisation") + } +} + +// TestFakedSyncProgress66WhitelistMatch tests if in case of whitelisted +// checkpoint match with opposite peer, the sync should succeed. +func TestFakedSyncProgress66WhitelistMatch(t *testing.T) { + t.Parallel() + + protocol := uint(eth.ETH66) + mode := FullSync + + tester := newTester() + validate := func(count int) (bool, error) { + return true, nil + } + tester.downloader.ChainValidator = newWhitelistFake(validate) + + defer tester.terminate() + + chainA := testChainForkLightA.blocks + tester.newPeer("light", protocol, chainA[1:]) + + // Synchronise with the peer and make sure all blocks were retrieved + if err := tester.sync("light", nil, mode); err != nil { + t.Fatal("succeeded attacker synchronisation") + } +} + +// TestFakedSyncProgress66NoRemoteCheckpoint tests if in case of missing/invalid +// checkpointed blocks with opposite peer, the sync should fail initially but +// with the retry mechanism, it should succeed eventually. +func TestFakedSyncProgress66NoRemoteCheckpoint(t *testing.T) { + t.Parallel() + + protocol := uint(eth.ETH66) + mode := FullSync + + tester := newTester() + validate := func(count int) (bool, error) { + // only return the `ErrNoRemoteCheckoint` error for the first call + if count == 0 { + return false, whitelist.ErrNoRemoteCheckoint + } + + return true, nil + } + + tester.downloader.ChainValidator = newWhitelistFake(validate) + + defer tester.terminate() + + chainA := testChainForkLightA.blocks + tester.newPeer("light", protocol, chainA[1:]) + + // Synchronise with the peer and make sure all blocks were retrieved + // Should fail in first attempt + if err := tester.sync("light", nil, mode); err != nil { + assert.Equal(t, whitelist.ErrNoRemoteCheckoint, err, "failed synchronisation") + } + + // Try syncing again, should succeed + if err := tester.sync("light", nil, mode); err != nil { + t.Fatal("succeeded attacker synchronisation") + } +} diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index f729def6712c..872bfcd1a99a 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -64,8 +64,11 @@ type chainData struct { offset int } -var chain *chainData -var emptyChain *chainData +var ( + chain *chainData + chainLongerFork *chainData + emptyChain *chainData +) func init() { // Create a chain of blocks to import @@ -75,6 +78,9 @@ func init() { blocks, _ = makeChain(targetBlocks, 0, genesis, true) emptyChain = &chainData{blocks, 0} + + chainLongerForkBlocks, _ := makeChain(1024, 0, blocks[len(blocks)-1], false) + chainLongerFork = &chainData{chainLongerForkBlocks, 0} } func (chain *chainData) headers() []*types.Header { diff --git a/eth/downloader/whitelist/service.go b/eth/downloader/whitelist/service.go new file mode 100644 index 000000000000..7036f24a8f96 --- /dev/null +++ b/eth/downloader/whitelist/service.go @@ -0,0 +1,126 @@ +package whitelist + +import ( + "errors" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" +) + +// Checkpoint whitelist +type Service struct { + m sync.Mutex + checkpointWhitelist map[uint64]common.Hash // Checkpoint whitelist, populated by reaching out to heimdall + checkpointOrder []uint64 // Checkpoint order, populated by reaching out to heimdall + maxCapacity uint +} + +func NewService(maxCapacity uint) *Service { + return &Service{ + checkpointWhitelist: make(map[uint64]common.Hash), + checkpointOrder: []uint64{}, + maxCapacity: maxCapacity, + } +} + +var ( + ErrCheckpointMismatch = errors.New("checkpoint mismatch") + ErrNoRemoteCheckoint = errors.New("remote peer doesn't have a checkoint") +) + +// IsValidChain checks if the chain we're about to receive from this peer is valid or not +// in terms of reorgs. We won't reorg beyond the last bor checkpoint submitted to mainchain. +func (w *Service) IsValidChain(remoteHeader *types.Header, fetchHeadersByNumber func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error)) (bool, error) { + // We want to validate the chain by comparing the last checkpointed block + // we're storing in `checkpointWhitelist` with the peer's block. + // + // Check for availaibility of the last checkpointed block. + // This can be also be empty if our heimdall is not responding + // or we're running without it. + if len(w.checkpointWhitelist) == 0 { + // worst case, we don't have the checkpoints in memory + return true, nil + } + + // Fetch the last checkpoint entry + lastCheckpointBlockNum := w.checkpointOrder[len(w.checkpointOrder)-1] + lastCheckpointBlockHash := w.checkpointWhitelist[lastCheckpointBlockNum] + + // todo: we can extract this as an interface and mock as well or just test IsValidChain in isolation from downloader passing fake fetchHeadersByNumber functions + headers, hashes, err := fetchHeadersByNumber(lastCheckpointBlockNum, 1, 0, false) + if err != nil { + return false, fmt.Errorf("%w: last checkpoint %d, err %v", ErrNoRemoteCheckoint, lastCheckpointBlockNum, err) + } + + if len(headers) == 0 { + return false, fmt.Errorf("%w: last checkpoint %d", ErrNoRemoteCheckoint, lastCheckpointBlockNum) + } + + reqBlockNum := headers[0].Number.Uint64() + reqBlockHash := hashes[0] + + // Check against the checkpointed blocks + if reqBlockNum == lastCheckpointBlockNum && reqBlockHash == lastCheckpointBlockHash { + return true, nil + } + + return false, ErrCheckpointMismatch +} + +func (w *Service) ProcessCheckpoint(endBlockNum uint64, endBlockHash common.Hash) { + w.m.Lock() + defer w.m.Unlock() + + w.enqueueCheckpointWhitelist(endBlockNum, endBlockHash) + // If size of checkpoint whitelist map is greater than 10, remove the oldest entry. + + if w.length() > int(w.maxCapacity) { + w.dequeueCheckpointWhitelist() + } +} + +// GetCheckpointWhitelist returns the existing whitelisted +// entries of checkpoint of the form block number -> block hash. +func (w *Service) GetCheckpointWhitelist() map[uint64]common.Hash { + w.m.Lock() + defer w.m.Unlock() + + return w.checkpointWhitelist +} + +// PurgeCheckpointWhitelist purges data from checkpoint whitelist map +func (w *Service) PurgeCheckpointWhitelist() { + w.m.Lock() + defer w.m.Unlock() + + w.checkpointWhitelist = make(map[uint64]common.Hash) + w.checkpointOrder = make([]uint64, 0) +} + +// EnqueueWhitelistBlock enqueues blockNumber, blockHash to the checkpoint whitelist map +func (w *Service) enqueueCheckpointWhitelist(key uint64, val common.Hash) { + if _, ok := w.checkpointWhitelist[key]; !ok { + log.Debug("Enqueing new checkpoint whitelist", "block number", key, "block hash", val) + + w.checkpointWhitelist[key] = val + w.checkpointOrder = append(w.checkpointOrder, key) + } +} + +// DequeueWhitelistBlock dequeues block, blockhash from the checkpoint whitelist map +func (w *Service) dequeueCheckpointWhitelist() { + if len(w.checkpointOrder) > 0 { + log.Debug("Dequeing checkpoint whitelist", "block number", w.checkpointOrder[0], "block hash", w.checkpointWhitelist[w.checkpointOrder[0]]) + + delete(w.checkpointWhitelist, w.checkpointOrder[0]) + w.checkpointOrder = w.checkpointOrder[1:] + } +} + +// length returns the len of the whitelist. +func (w *Service) length() int { + return len(w.checkpointWhitelist) +} diff --git a/eth/downloader/whitelist/service_test.go b/eth/downloader/whitelist/service_test.go new file mode 100644 index 000000000000..ca202cc3ad4a --- /dev/null +++ b/eth/downloader/whitelist/service_test.go @@ -0,0 +1,107 @@ +package whitelist + +import ( + "errors" + "math/big" + "testing" + + "gotest.tools/assert" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +// NewMockService creates a new mock whitelist service +func NewMockService(maxCapacity uint) *Service { + return &Service{ + checkpointWhitelist: make(map[uint64]common.Hash), + checkpointOrder: []uint64{}, + maxCapacity: maxCapacity, + } +} + +// TestWhitelistCheckpoint checks the checkpoint whitelist map queue mechanism +func TestWhitelistCheckpoint(t *testing.T) { + t.Parallel() + + s := NewMockService(10) + for i := 0; i < 10; i++ { + s.enqueueCheckpointWhitelist(uint64(i), common.Hash{}) + } + assert.Equal(t, s.length(), 10, "expected 10 items in whitelist") + + s.enqueueCheckpointWhitelist(11, common.Hash{}) + s.dequeueCheckpointWhitelist() + assert.Equal(t, s.length(), 10, "expected 10 items in whitelist") +} + +// TestIsValidChain checks che IsValidChain function in isolation +// for different cases by providing a mock fetchHeadersByNumber function +func TestIsValidChain(t *testing.T) { + t.Parallel() + + s := NewMockService(10) + + // case1: no checkpoint whitelist, should consider the chain as valid + res, err := s.IsValidChain(nil, nil) + assert.NilError(t, err, "expected no error") + assert.Equal(t, res, true, "expected chain to be valid") + + // add checkpoint entries and mock fetchHeadersByNumber function + s.ProcessCheckpoint(uint64(0), common.Hash{}) + s.ProcessCheckpoint(uint64(1), common.Hash{}) + + assert.Equal(t, s.length(), 2, "expected 2 items in whitelist") + + // create a false function, returning absolutely nothing + falseFetchHeadersByNumber := func(number uint64, amount int, skip int, reverse bool) ([]*types.Header, []common.Hash, error) { + return nil, nil, nil + } + + // case2: false fetchHeadersByNumber function provided, should consider the chain as invalid + // and throw `ErrNoRemoteCheckoint` error + res, err = s.IsValidChain(nil, falseFetchHeadersByNumber) + if err == nil { + t.Fatal("expected error, got nil") + } + + if !errors.Is(err, ErrNoRemoteCheckoint) { + t.Fatalf("expected error ErrNoRemoteCheckoint, got %v", err) + } + + assert.Equal(t, res, false, "expected chain to be invalid") + + // case3: correct fetchHeadersByNumber function provided, should consider the chain as valid + // create a mock function, returning a the required header + fetchHeadersByNumber := func(number uint64, _ int, _ int, _ bool) ([]*types.Header, []common.Hash, error) { + hash := common.Hash{} + header := types.Header{Number: big.NewInt(0)} + + switch number { + case 0: + return []*types.Header{&header}, []common.Hash{hash}, nil + case 1: + header.Number = big.NewInt(1) + return []*types.Header{&header}, []common.Hash{hash}, nil + case 2: + header.Number = big.NewInt(1) // sending wrong header for misamatch + return []*types.Header{&header}, []common.Hash{hash}, nil + default: + return nil, nil, errors.New("invalid number") + } + } + + res, err = s.IsValidChain(nil, fetchHeadersByNumber) + assert.NilError(t, err, "expected no error") + assert.Equal(t, res, true, "expected chain to be valid") + + // add one more checkpoint whitelist entry + s.ProcessCheckpoint(uint64(2), common.Hash{}) + assert.Equal(t, s.length(), 3, "expected 3 items in whitelist") + + // case4: correct fetchHeadersByNumber function provided with wrong header + // for block number 2. Should consider the chain as invalid and throw an error + res, err = s.IsValidChain(nil, fetchHeadersByNumber) + assert.Equal(t, err, ErrCheckpointMismatch, "expected checkpoint mismatch error") + assert.Equal(t, res, false, "expected chain to be invalid") +} diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 904fb2cbc217..a3c7f49e3984 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -91,13 +91,13 @@ var Defaults = Config{ Miner: miner.Config{ GasCeil: 8000000, GasPrice: big.NewInt(params.GWei), - Recommit: 3 * time.Second, + Recommit: 125 * time.Second, }, TxPool: core.DefaultTxPoolConfig, RPCGasCap: 50000000, RPCEVMTimeout: 5 * time.Second, GPO: FullNodeGPO, - RPCTxFeeCap: 1, // 1 ether + RPCTxFeeCap: 5, // 5 matic } func init() { diff --git a/eth/handler.go b/eth/handler.go index 40edfa2d1758..ab95f5f769ae 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -31,11 +31,13 @@ import ( "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/downloader/whitelist" "github.com/ethereum/go-ethereum/eth/fetcher" "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" @@ -77,15 +79,16 @@ type txPool interface { // handlerConfig is the collection of initialization parameters to create a full // node network handler. type handlerConfig struct { - Database ethdb.Database // Database for direct sync insertions - Chain *core.BlockChain // Blockchain to serve data from - TxPool txPool // Transaction pool to propagate from - Merger *consensus.Merger // The manager for eth1/2 transition - Network uint64 // Network identifier to adfvertise - Sync downloader.SyncMode // Whether to snap or full sync - BloomCache uint64 // Megabytes to alloc for snap sync bloom - EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` - Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges + Database ethdb.Database // Database for direct sync insertions + Chain *core.BlockChain // Blockchain to serve data from + TxPool txPool // Transaction pool to propagate from + Merger *consensus.Merger // The manager for eth1/2 transition + Network uint64 // Network identifier to adfvertise + Sync downloader.SyncMode // Whether to snap or full sync + BloomCache uint64 // Megabytes to alloc for snap sync bloom + EventMux *event.TypeMux //nolint:staticcheck // Legacy event mux, deprecate for `feed` + Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges + EthAPI *ethapi.PublicBlockChainAPI // EthAPI to interact PeerRequiredBlocks map[uint64]common.Hash // Hard coded map of required block hashes for sync challenges } @@ -111,6 +114,8 @@ type handler struct { peers *peerSet merger *consensus.Merger + ethAPI *ethapi.PublicBlockChainAPI // EthAPI to interact + eventMux *event.TypeMux txsCh chan core.NewTxsEvent txsSub event.Subscription @@ -141,6 +146,7 @@ func newHandler(config *handlerConfig) (*handler, error) { chain: config.Chain, peers: newPeerSet(), merger: config.Merger, + ethAPI: config.EthAPI, peerRequiredBlocks: config.PeerRequiredBlocks, quitSync: make(chan struct{}), } @@ -195,7 +201,8 @@ func newHandler(config *handlerConfig) (*handler, error) { // Construct the downloader (long sync) and its backing state bloom if snap // sync is requested. The downloader is responsible for deallocating the state // bloom when it's done. - h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer, success) + // todo: it'd better to extract maxCapacity into config + h.downloader = downloader.New(h.checkpointNumber, config.Database, h.eventMux, h.chain, nil, h.removePeer, success, whitelist.NewService(10)) // Construct the fetcher (short sync) validator := func(header *types.Header) error { diff --git a/eth/handler_bor.go b/eth/handler_bor.go new file mode 100644 index 000000000000..11896f3c47ea --- /dev/null +++ b/eth/handler_bor.go @@ -0,0 +1,75 @@ +package eth + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/bor" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" +) + +var ( + // errCheckpoint is returned when we are unable to fetch the + // latest checkpoint from the local heimdall. + errCheckpoint = errors.New("failed to fetch latest checkpoint") + + // errMissingCheckpoint is returned when we don't have the + // checkpoint blocks locally, yet. + errMissingCheckpoint = errors.New("missing checkpoint blocks") + + // errRootHash is returned when we aren't able to calculate the root hash + // locally for a range of blocks. + errRootHash = errors.New("failed to get local root hash") + + // errCheckpointRootHashMismatch is returned when the local root hash + // doesn't match with the root hash in checkpoint. + errCheckpointRootHashMismatch = errors.New("checkpoint roothash mismatch") + + // errEndBlock is returned when we're unable to fetch a block locally. + errEndBlock = errors.New("failed to get end block") +) + +// fetchWhitelistCheckpoint fetched the latest checkpoint from it's local heimdall +// and verifies the data against bor data. +func (h *ethHandler) fetchWhitelistCheckpoint(bor *bor.Bor) (uint64, common.Hash, error) { + // check for checkpoint whitelisting: bor + checkpoint, err := bor.HeimdallClient.FetchLatestCheckpoint() + if err != nil { + log.Debug("Failed to fetch latest checkpoint for whitelisting") + return 0, common.Hash{}, errCheckpoint + } + + // check if we have the checkpoint blocks + head := h.ethAPI.BlockNumber() + if head < hexutil.Uint64(checkpoint.EndBlock.Uint64()) { + log.Debug("Head block behind checkpoint block", "head", head, "checkpoint end block", checkpoint.EndBlock) + return 0, common.Hash{}, errMissingCheckpoint + } + + // verify the root hash of checkpoint + roothash, err := h.ethAPI.GetRootHash(context.Background(), checkpoint.StartBlock.Uint64(), checkpoint.EndBlock.Uint64()) + if err != nil { + log.Debug("Failed to get root hash of checkpoint while whitelisting") + return 0, common.Hash{}, errRootHash + } + + if roothash != checkpoint.RootHash.String()[2:] { + log.Warn("Checkpoint root hash mismatch while whitelisting", "expected", checkpoint.RootHash.String()[2:], "got", roothash) + return 0, common.Hash{}, errCheckpointRootHashMismatch + } + + // fetch the end checkpoint block hash + block, err := h.ethAPI.GetBlockByNumber(context.Background(), rpc.BlockNumber(checkpoint.EndBlock.Uint64()), false) + if err != nil { + log.Debug("Failed to get end block hash of checkpoint while whitelisting") + return 0, common.Hash{}, errEndBlock + } + + hash := fmt.Sprintf("%v", block["hash"]) + + return checkpoint.EndBlock.Uint64(), common.HexToHash(hash), nil +} diff --git a/go.mod b/go.mod index 852c895e3b49..1b452a67a1af 100644 --- a/go.mod +++ b/go.mod @@ -76,6 +76,7 @@ require ( gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/urfave/cli.v1 v1.20.0 + gotest.tools v2.2.0+incompatible pgregory.net/rapid v0.4.7 ) @@ -135,5 +136,4 @@ require ( google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - gotest.tools v2.2.0+incompatible // indirect ) diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 15536d5d72e8..59b6feba52cf 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2156,6 +2156,17 @@ func (api *PrivateDebugAPI) SetHead(number hexutil.Uint64) { api.b.SetHead(uint64(number)) } +// GetCheckpointWhitelist retrieves the current checkpoint whitelist +// entries (of the form block number -> block hash) +func (api *PrivateDebugAPI) GetCheckpointWhitelist() map[uint64]common.Hash { + return api.b.GetCheckpointWhitelist() +} + +// PurgeCheckpointWhitelist purges the current checkpoint whitelist entries +func (api *PrivateDebugAPI) PurgeCheckpointWhitelist() { + api.b.PurgeCheckpointWhitelist() +} + // PublicNetAPI offers network related RPC methods type PublicNetAPI struct { net *p2p.Server diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 97ce7c24cf68..1287640b83b7 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -99,6 +99,8 @@ type Backend interface { GetBorBlockTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) GetBorBlockTransactionWithBlockHash(ctx context.Context, txHash common.Hash, blockHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) SubscribeChain2HeadEvent(ch chan<- core.Chain2HeadEvent) event.Subscription + GetCheckpointWhitelist() map[uint64]common.Hash + PurgeCheckpointWhitelist() ChainConfig() *params.ChainConfig Engine() consensus.Engine diff --git a/internal/ethapi/transaction_args.go b/internal/ethapi/transaction_args.go index 9c5950af58fe..aa2596fe81e3 100644 --- a/internal/ethapi/transaction_args.go +++ b/internal/ethapi/transaction_args.go @@ -59,6 +59,7 @@ func (args *TransactionArgs) from() common.Address { if args.From == nil { return common.Address{} } + return *args.From } @@ -67,9 +68,11 @@ func (args *TransactionArgs) data() []byte { if args.Input != nil { return *args.Input } + if args.Data != nil { return *args.Data } + return nil } @@ -78,8 +81,10 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { return errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") } + // After london, default to 1559 unless gasPrice is set head := b.CurrentHeader() + // If user specifies both maxPriorityfee and maxFee, then we do not // need to consult the chain for defaults. It's definitely a London tx. if args.MaxPriorityFeePerGas == nil || args.MaxFeePerGas == nil { @@ -90,6 +95,7 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { if err != nil { return err } + args.MaxPriorityFeePerGas = (*hexutil.Big)(tip) } if args.MaxFeePerGas == nil { @@ -97,8 +103,10 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { (*big.Int)(args.MaxPriorityFeePerGas), new(big.Int).Mul(head.BaseFee, big.NewInt(2)), ) + args.MaxFeePerGas = (*hexutil.Big)(gasFeeCap) } + if args.MaxFeePerGas.ToInt().Cmp(args.MaxPriorityFeePerGas.ToInt()) < 0 { return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas) } @@ -106,17 +114,20 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { if args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil { return errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet") } + if args.GasPrice == nil { price, err := b.SuggestGasTipCap(ctx) if err != nil { return err } + if b.ChainConfig().IsLondon(head.Number) { // The legacy tx gas price suggestion should not add 2x base fee // because all fees are consumed, so it would result in a spiral // upwards. price.Add(price, head.BaseFee) } + args.GasPrice = (*hexutil.Big)(price) } } @@ -126,22 +137,28 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { return fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", args.MaxFeePerGas, args.MaxPriorityFeePerGas) } } + if args.Value == nil { args.Value = new(hexutil.Big) } + if args.Nonce == nil { nonce, err := b.GetPoolNonce(ctx, args.from()) if err != nil { return err } + args.Nonce = (*hexutil.Uint64)(&nonce) } + if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) { return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`) } + if args.To == nil && len(args.data()) == 0 { return errors.New(`contract creation without any data provided`) } + // Estimate the gas usage if necessary. if args.Gas == nil { // These fields are immutable during the estimation, safe to @@ -157,18 +174,23 @@ func (args *TransactionArgs) setDefaults(ctx context.Context, b Backend) error { Data: (*hexutil.Bytes)(&data), AccessList: args.AccessList, } + pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) + estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap()) if err != nil { return err } + args.Gas = &estimated log.Trace("Estimate gas usage automatically", "gas", args.Gas) } + if args.ChainID == nil { id := (*hexutil.Big)(b.ChainConfig().ChainID) args.ChainID = id } + return nil } @@ -180,32 +202,41 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { return types.Message{}, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") } + // Set sender address or use zero address if none specified. addr := args.from() + // Gas set for system calls + systemCallGas := (hexutil.Uint64)(uint64(math.MaxUint64 / 2)) + // Set default gas & gas price if none were set gas := globalGasCap if gas == 0 { gas = uint64(math.MaxUint64 / 2) } - if args.Gas != nil { + + if args.Gas != nil && *args.Gas != systemCallGas { gas = uint64(*args.Gas) } + if globalGasCap != 0 && globalGasCap < gas { log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) gas = globalGasCap } + var ( gasPrice *big.Int gasFeeCap *big.Int gasTipCap *big.Int ) + if baseFee == nil { // If there's no basefee, then it must be a non-1559 execution gasPrice = new(big.Int) if args.GasPrice != nil { gasPrice = args.GasPrice.ToInt() } + gasFeeCap, gasTipCap = gasPrice, gasPrice } else { // A basefee is provided, necessitating 1559-type execution @@ -216,30 +247,40 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t } else { // User specified 1559 gas feilds (or none), use those gasFeeCap = new(big.Int) + if args.MaxFeePerGas != nil { gasFeeCap = args.MaxFeePerGas.ToInt() } + gasTipCap = new(big.Int) + if args.MaxPriorityFeePerGas != nil { gasTipCap = args.MaxPriorityFeePerGas.ToInt() } + // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes gasPrice = new(big.Int) + if gasFeeCap.BitLen() > 0 || gasTipCap.BitLen() > 0 { gasPrice = math.BigMin(new(big.Int).Add(gasTipCap, baseFee), gasFeeCap) } } } + value := new(big.Int) if args.Value != nil { value = args.Value.ToInt() } + data := args.data() + var accessList types.AccessList if args.AccessList != nil { accessList = *args.AccessList } + msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, true) + return msg, nil } @@ -247,6 +288,7 @@ func (args *TransactionArgs) ToMessage(globalGasCap uint64, baseFee *big.Int) (t // This assumes that setDefaults has been called. func (args *TransactionArgs) toTransaction() *types.Transaction { var data types.TxData + switch { case args.MaxFeePerGas != nil: al := types.AccessList{} @@ -285,6 +327,7 @@ func (args *TransactionArgs) toTransaction() *types.Transaction { Data: args.data(), } } + return types.NewTx(data) } diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index dd8b34e02593..dcdd5baf234b 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -474,6 +474,16 @@ web3._extend({ params: 2, inputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter], }), + new web3._extend.Method({ + name: 'getCheckpointWhitelist', + call: 'debug_getCheckpointWhitelist', + params: 0, + }), + new web3._extend.Method({ + name: 'purgeCheckpointWhitelist', + call: 'debug_purgeCheckpointWhitelist', + params: 0, + }), ], properties: [] }); diff --git a/les/api_backend.go b/les/api_backend.go index e9e51c4ecca1..c716a3967f79 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -337,17 +337,24 @@ func (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Blo // func (b *LesApiBackend) GetBorBlockReceipt(ctx context.Context, hash common.Hash) (*types.Receipt, error) { - return nil, errors.New("Not implemented") + return nil, errors.New("not implemented") } func (b *LesApiBackend) GetBorBlockLogs(ctx context.Context, hash common.Hash) ([]*types.Log, error) { - return nil, errors.New("Not implemented") + return nil, errors.New("not implemented") } func (b *LesApiBackend) GetBorBlockTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { - return nil, common.Hash{}, 0, 0, errors.New("Not implemented") + return nil, common.Hash{}, 0, 0, errors.New("not implemented") } func (b *LesApiBackend) GetBorBlockTransactionWithBlockHash(ctx context.Context, txHash common.Hash, blockHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { - return nil, common.Hash{}, 0, 0, errors.New("Not implemented") + return nil, common.Hash{}, 0, 0, errors.New("not implemented") +} + +func (b *LesApiBackend) GetCheckpointWhitelist() map[uint64]common.Hash { + return nil +} + +func (b *LesApiBackend) PurgeCheckpointWhitelist() { } diff --git a/miner/worker.go b/miner/worker.go index 14c5ccd1a358..9fcb2140cad0 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -390,27 +390,31 @@ func (w *worker) close() { // recalcRecommit recalculates the resubmitting interval upon feedback. func recalcRecommit(minRecommit, prev time.Duration, target float64, inc bool) time.Duration { - var ( - prevF = float64(prev.Nanoseconds()) - next float64 - ) - - if inc { - next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) - - max := float64(maxRecommitInterval.Nanoseconds()) - if next > max { - next = max - } - } else { - next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) - min := float64(minRecommit.Nanoseconds()) - if next < min { - next = min - } - } - - return time.Duration(int64(next)) + // var ( + // prevF = float64(prev.Nanoseconds()) + // next float64 + // ) + // + // if inc { + // next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target+intervalAdjustBias) + // max := float64(maxRecommitInterval.Nanoseconds()) + // + // if next > max { + // next = max + // } + // } else { + // next = prevF*(1-intervalAdjustRatio) + intervalAdjustRatio*(target-intervalAdjustBias) + // min := float64(minRecommit.Nanoseconds()) + // + // if next < min { + // next = min + // } + // } + // + // log.Info("Recalc Commit", "Prev", prev, "Next", next) + + //returning the Same prev value to keep the recommit interval constant + return prev } // newWorkLoop is a standalone goroutine to submit new sealing work upon received events. diff --git a/miner/worker_test.go b/miner/worker_test.go index 00739844f2ef..6dae391c8863 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -489,10 +489,15 @@ func testRegenerateMiningBlock(t *testing.T, chainConfig *params.ChainConfig, en } func TestAdjustIntervalEthash(t *testing.T) { + // Skipping this test as recommit interval would remain constant + t.Skip() testAdjustInterval(t, ethashChainConfig, ethash.NewFaker()) } func TestAdjustIntervalClique(t *testing.T) { + + // Skipping this test as recommit interval would remain constant + t.Skip() testAdjustInterval(t, cliqueChainConfig, clique.New(cliqueChainConfig.Clique, rawdb.NewMemoryDatabase())) } diff --git a/tests/bor/bor_test.go b/tests/bor/bor_test.go index 434ad805e167..36d515c557d7 100644 --- a/tests/bor/bor_test.go +++ b/tests/bor/bor_test.go @@ -11,10 +11,13 @@ import ( "time" "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + "golang.org/x/crypto/sha3" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/bor" "github.com/ethereum/go-ethereum/consensus/bor/clerk" + "github.com/ethereum/go-ethereum/consensus/bor/heimdall/checkpoint" "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" @@ -25,15 +28,6 @@ import ( "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests/bor/mocks" - - "github.com/stretchr/testify/assert" - "golang.org/x/crypto/sha3" -) - -const ( - spanPath = "bor/span/1" - clerkPath = "clerk/event-record/list" - clerkQueryParams = "from-time=%d&to-time=%d&page=%d&limit=50" ) func TestInsertingSpanSizeBlocks(t *testing.T) { @@ -42,9 +36,20 @@ func TestInsertingSpanSizeBlocks(t *testing.T) { engine := init.ethereum.Engine() _bor := engine.(*bor.Bor) + defer _bor.Close() + h, heimdallSpan, ctrl := getMockedHeimdallClient(t) defer ctrl.Finish() + _, span := loadSpanFromFile(t) + + h.EXPECT().Close().AnyTimes() + h.EXPECT().FetchLatestCheckpoint().Return(&checkpoint.Checkpoint{ + Proposer: span.SelectedProducers[0].Address, + StartBlock: big.NewInt(0), + EndBlock: big.NewInt(int64(spanSize)), + }, nil).AnyTimes() + _bor.SetHeimdallClient(h) db := init.ethereum.ChainDb() @@ -75,6 +80,8 @@ func TestFetchStateSyncEvents(t *testing.T) { engine := init.ethereum.Engine() _bor := engine.(*bor.Bor) + defer _bor.Close() + // A. Insert blocks for 0th sprint db := init.ethereum.ChainDb() block := init.genesis.ToBlock(db) @@ -92,6 +99,7 @@ func TestFetchStateSyncEvents(t *testing.T) { defer ctrl.Finish() h := mocks.NewMockIHeimdallClient(ctrl) + h.EXPECT().Close().AnyTimes() h.EXPECT().Span(uint64(1)).Return(&res.Result, nil).AnyTimes() // B.2 Mock State Sync events @@ -117,6 +125,8 @@ func TestFetchStateSyncEvents_2(t *testing.T) { engine := init.ethereum.Engine() _bor := engine.(*bor.Bor) + defer _bor.Close() + // Mock /bor/span/1 res, _ := loadSpanFromFile(t) @@ -124,6 +134,7 @@ func TestFetchStateSyncEvents_2(t *testing.T) { defer ctrl.Finish() h := mocks.NewMockIHeimdallClient(ctrl) + h.EXPECT().Close().AnyTimes() h.EXPECT().Span(uint64(1)).Return(&res.Result, nil).AnyTimes() // Mock State Sync events @@ -184,9 +195,12 @@ func TestOutOfTurnSigning(t *testing.T) { engine := init.ethereum.Engine() _bor := engine.(*bor.Bor) + defer _bor.Close() + h, _, ctrl := getMockedHeimdallClient(t) defer ctrl.Finish() + h.EXPECT().Close().AnyTimes() _bor.SetHeimdallClient(h) db := init.ethereum.ChainDb() @@ -217,6 +231,7 @@ func TestOutOfTurnSigning(t *testing.T) { bor.CalcProducerDelay(header.Number.Uint64(), 0, init.genesis.Config.Bor)) sign(t, header, signerKey, init.genesis.Config.Bor) block = types.NewBlockWithHeader(header) + _, err = chain.InsertChain([]*types.Block{block}) assert.Equal(t, *err.(*bor.WrongDifficultyError), @@ -225,6 +240,7 @@ func TestOutOfTurnSigning(t *testing.T) { header.Difficulty = new(big.Int).SetUint64(expectedDifficulty) sign(t, header, signerKey, init.genesis.Config.Bor) block = types.NewBlockWithHeader(header) + _, err = chain.InsertChain([]*types.Block{block}) assert.Nil(t, err) } @@ -235,9 +251,13 @@ func TestSignerNotFound(t *testing.T) { engine := init.ethereum.Engine() _bor := engine.(*bor.Bor) + defer _bor.Close() + h, _, ctrl := getMockedHeimdallClient(t) defer ctrl.Finish() + h.EXPECT().Close().AnyTimes() + _bor.SetHeimdallClient(h) db := init.ethereum.ChainDb() @@ -298,6 +318,10 @@ func getSampleEventRecord(t *testing.T) *clerk.EventRecordWithTime { return eventRecords.Result[0] } +func getEventRecords(t *testing.T) []*clerk.EventRecordWithTime { + return stateSyncEventsPayload(t).Result +} + // TestEIP1559Transition tests the following: // // 1. A transaction whose gasFeeCap is greater than the baseFee is valid. diff --git a/tests/bor/mocks/IHeimdallClient.go b/tests/bor/mocks/IHeimdallClient.go index 4e4d7a37768c..f770ed9fa807 100644 --- a/tests/bor/mocks/IHeimdallClient.go +++ b/tests/bor/mocks/IHeimdallClient.go @@ -8,6 +8,7 @@ import ( reflect "reflect" clerk "github.com/ethereum/go-ethereum/consensus/bor/clerk" + checkpoint "github.com/ethereum/go-ethereum/consensus/bor/heimdall/checkpoint" span "github.com/ethereum/go-ethereum/consensus/bor/heimdall/span" gomock "github.com/golang/mock/gomock" ) @@ -47,6 +48,21 @@ func (mr *MockIHeimdallClientMockRecorder) Close() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockIHeimdallClient)(nil).Close)) } +// FetchLatestCheckpoint mocks base method. +func (m *MockIHeimdallClient) FetchLatestCheckpoint() (*checkpoint.Checkpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FetchLatestCheckpoint") + ret0, _ := ret[0].(*checkpoint.Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FetchLatestCheckpoint indicates an expected call of FetchLatestCheckpoint. +func (mr *MockIHeimdallClientMockRecorder) FetchLatestCheckpoint() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchLatestCheckpoint", reflect.TypeOf((*MockIHeimdallClient)(nil).FetchLatestCheckpoint)) +} + // Span mocks base method. func (m *MockIHeimdallClient) Span(arg0 uint64) (*span.HeimdallSpan, error) { m.ctrl.T.Helper()