diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index 1314ea9861..1c2efebe3f 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -55,6 +55,17 @@ OK: 5/5 Fail: 0/5 Skip: 0/5 + basics OK ``` OK: 1/1 Fail: 0/1 Skip: 0/1 +## Block clearance (light client) [Preset: mainnet] +```diff ++ Delayed finality update OK ++ Error conditions OK ++ Incremental sync OK ++ Initial sync OK ++ Low slot numbers OK ++ Reorg OK ++ Reverse incremental sync OK +``` +OK: 7/7 Fail: 0/7 Skip: 0/7 ## Block pool altair processing [Preset: mainnet] ```diff + Invalid signatures [Preset: mainnet] OK @@ -544,4 +555,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 OK: 1/1 Fail: 0/1 Skip: 0/1 ---TOTAL--- -OK: 301/306 Fail: 0/306 Skip: 5/306 +OK: 308/313 Fail: 0/313 Skip: 5/313 diff --git a/Makefile b/Makefile index 4d620f2ab1..92e676108b 100644 --- a/Makefile +++ b/Makefile @@ -309,7 +309,9 @@ define CONNECT_TO_NETWORK_IN_DEV_MODE --network=$(1) $(3) $(GOERLI_TESTNETS_PARAMS) \ --log-level="DEBUG; TRACE:discv5,networking; REQUIRED:none; DISABLED:none" \ --data-dir=build/data/shared_$(1)_$(NODE_ID) \ - --serve-light-client-data=1 --import-light-client-data=only-new \ + --light-client-enable=on \ + --serve-light-client-data=on \ + --import-light-client-data=only-new \ --dump $(NODE_PARAMS) endef diff --git a/beacon_chain/beacon_chain_db_immutable.nim b/beacon_chain/beacon_chain_db_immutable.nim index 9aa13d5047..01a2dd01c1 100644 --- a/beacon_chain/beacon_chain_db_immutable.nim +++ b/beacon_chain/beacon_chain_db_immutable.nim @@ -8,9 +8,6 @@ {.push raises: [Defect].} import - stew/results, - serialization, - eth/db/kvstore, ./spec/datatypes/[base, altair, bellatrix], ./spec/[eth2_ssz_serialization, eth2_merkleization] diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 0175903b6c..4cb9ace1cd 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -22,7 +22,7 @@ import blockchain_dag, block_quarantine, exit_pool, attestation_pool, sync_committee_msg_pool], ./spec/datatypes/base, - ./sync/[sync_manager, request_manager], + ./sync/[optimistic_sync_light_client, sync_manager, request_manager], ./validators/[action_tracker, validator_monitor, validator_pool], ./rpc/state_ttl_cache @@ -30,9 +30,9 @@ export osproc, chronos, httpserver, presto, action_tracker, beacon_clock, beacon_chain_db, conf, light_client, attestation_pool, sync_committee_msg_pool, validator_pool, - eth2_network, eth1_monitor, request_manager, sync_manager, - eth2_processor, blockchain_dag, block_quarantine, base, exit_pool, - validator_monitor, consensus_manager + eth2_network, eth1_monitor, optimistic_sync_light_client, + request_manager, sync_manager, eth2_processor, blockchain_dag, + block_quarantine, base, exit_pool, validator_monitor, consensus_manager type RpcServer* = RpcHttpServer @@ -45,6 +45,7 @@ type db*: BeaconChainDB config*: BeaconNodeConf attachedValidators*: ref ValidatorPool + lcOptSync*: LCOptimisticSync lightClient*: LightClient dag*: ChainDAGRef quarantine*: ref Quarantine diff --git a/beacon_chain/beacon_node_light_client.nim b/beacon_chain/beacon_node_light_client.nim index 8aec9c586e..fa4a505917 100644 --- a/beacon_chain/beacon_node_light_client.nim +++ b/beacon_chain/beacon_node_light_client.nim @@ -29,11 +29,56 @@ proc initLightClient*( # because the light client module also handles gossip subscriptions # for broadcasting light client data as a server. - let lightClient = createLightClient( - node.network, rng, config, cfg, - forkDigests, getBeaconTime, genesis_validators_root) + let + optimisticProcessor = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): + Future[void] {.async.} = + debug "New LC optimistic block", + opt = signedBlock.toBlockId(), + dag = node.dag.head.bid, + wallSlot = node.currentSlot + return + optSync = initLCOptimisticSync( + node.network, getBeaconTime, optimisticProcessor, + config.safeSlotsToImportOptimistically) + + lightClient = createLightClient( + node.network, rng, config, cfg, + forkDigests, getBeaconTime, genesis_validators_root) if config.lightClientEnable.get: + proc shouldSyncOptimistically(slot: Slot): bool = + const + # Minimum number of slots to be ahead of DAG to use optimistic sync + minProgress = 8 * SLOTS_PER_EPOCH + # Maximum age of light client optimistic header to use optimistic sync + maxAge = 2 * SLOTS_PER_EPOCH + + if slot < getStateField(node.dag.headState, slot) + minProgress: + false + elif getBeaconTime().slotOrZero > slot + maxAge: + false + else: + true + + proc onFinalizedHeader(lightClient: LightClient) = + let optimisticHeader = lightClient.optimisticHeader.valueOr: + return + if not shouldSyncOptimistically(optimisticHeader.slot): + return + let finalizedHeader = lightClient.finalizedHeader.valueOr: + return + optSync.setOptimisticHeader(optimisticHeader) + optSync.setFinalizedHeader(finalizedHeader) + + proc onOptimisticHeader(lightClient: LightClient) = + let optimisticHeader = lightClient.optimisticHeader.valueOr: + return + if not shouldSyncOptimistically(optimisticHeader.slot): + return + optSync.setOptimisticHeader(optimisticHeader) + + lightClient.onFinalizedHeader = onFinalizedHeader + lightClient.onOptimisticHeader = onOptimisticHeader lightClient.trustedBlockRoot = config.lightClientTrustedBlockRoot elif config.lightClientTrustedBlockRoot.isSome: @@ -41,12 +86,14 @@ proc initLightClient*( lightClientEnable = config.lightClientEnable.get, lightClientTrustedBlockRoot = config.lightClientTrustedBlockRoot + node.lcOptSync = optSync node.lightClient = lightClient proc startLightClient*(node: BeaconNode) = if not node.config.lightClientEnable.get: return + node.lcOptSync.start() node.lightClient.start() proc installLightClientMessageValidators*(node: BeaconNode) = diff --git a/beacon_chain/conf.nim b/beacon_chain/conf.nim index 7b6e8f891b..ca458e61cc 100644 --- a/beacon_chain/conf.nim +++ b/beacon_chain/conf.nim @@ -517,7 +517,7 @@ type desc: "Modify SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY" # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md#constants defaultValue: 128 - name: "safe-slots-to-import-optimistically" }: uint64 + name: "safe-slots-to-import-optimistically" }: uint16 # Same option as appears in Lighthouse and Prysm # https://lighthouse-book.sigmaprime.io/suggested-fee-recipient.html diff --git a/beacon_chain/consensus_object_pools/attestation_pool.nim b/beacon_chain/consensus_object_pools/attestation_pool.nim index acc4e495f1..eb08a52cc9 100644 --- a/beacon_chain/consensus_object_pools/attestation_pool.nim +++ b/beacon_chain/consensus_object_pools/attestation_pool.nim @@ -490,7 +490,7 @@ func init( if participation_bitmap[validator_index] != 0: # If any flag got set, there was an attestation from this validator. validator_bits[index_in_committee] = true - result.add((slot, committee_index.uint64), validator_bits) + result[(slot, committee_index.uint64)] = validator_bits # This treats all types of rewards as equivalent, which isn't ideal update_attestation_pool_cache( diff --git a/beacon_chain/consensus_object_pools/block_clearance_light_client.nim b/beacon_chain/consensus_object_pools/block_clearance_light_client.nim new file mode 100644 index 0000000000..4b72078285 --- /dev/null +++ b/beacon_chain/consensus_object_pools/block_clearance_light_client.nim @@ -0,0 +1,306 @@ +# beacon_chain +# Copyright (c) 2019-2022 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [Defect].} + +import + std/[deques, math], + chronicles, + ../spec/forks, + ../beacon_chain_db, + ./block_pools_types + +export forks, block_pools_types + +logScope: + topics = "clearance" + +# Clearance (light client) +# --------------------------------------------- +# +# This module validates blocks obtained using the light client sync protocol. +# Those blocks are considered trusted by delegating the full verification to a +# supermajority (> 2/3) of the corresponding sync committee (512 members). +# The validated blocks are downloaded in backwards order into a `deque`. +# +# If the sync committee is trusted, expensive verification already done by the +# sync committee may be skipped: +# - BLS signatures (except the outer block signature not covered by `root`) +# - Verifying whether the state transition function applies +# - `ExecutionPayload` verification +# - `state_root` computation and verification + +type LCBlocks* = object + maxSlots: int # max cache.len + cache: Deque[ref ForkedMsgTrustedSignedBeaconBlock] # by slots descending + headSlot: Slot # matches cache[0].slot once block is downloaded + backfill: BeaconBlockSummary # next expected block + finalizedBid: BlockId + +func initLCBlocks*(maxSlots: int): LCBlocks = + LCBlocks( + maxSlots: maxSlots, + cache: initDeque[ref ForkedMsgTrustedSignedBeaconBlock]( + nextPowerOfTwo(maxSlots)), + headSlot: FAR_FUTURE_SLOT) + +func getHeadSlot*(lcBlocks: LCBlocks): Slot = + lcBlocks.headSlot + +func getFinalizedSlot*(lcBlocks: LCBlocks): Slot = + lcBlocks.finalizedBid.slot + +func getFrontfillSlot*(lcBlocks: LCBlocks): Slot = + lcBlocks.headSlot + 1 - lcBlocks.cache.lenu64 + +func getBackfillSlot*(lcBlocks: LCBlocks): Slot = + if lcBlocks.backfill.slot != FAR_FUTURE_SLOT: + max(lcBlocks.backfill.slot, lcBlocks.getFrontfillSlot()) + else: + lcBlocks.headSlot + 1 + +func getBackfillRoot*(lcBlocks: LCBlocks): Option[Eth2Digest] = + if lcBlocks.headSlot == FAR_FUTURE_SLOT: + none(Eth2Digest) + elif lcBlocks.backfill.slot < lcBlocks.getFrontfillSlot(): + none(Eth2Digest) + else: + some lcBlocks.backfill.parent_root + +func getCacheIndex(lcBlocks: LCBlocks, slot: Slot): uint64 = + if slot < lcBlocks.headSlot and lcBlocks.headSlot != FAR_FUTURE_SLOT: + lcBlocks.headSlot - slot + else: + 0 + +func getBlockAtSlot*( + lcBlocks: LCBlocks, slot: Slot): Opt[ForkedMsgTrustedSignedBeaconBlock] = + if slot < lcBlocks.backfill.slot: + return err() + + let index = lcBlocks.getCacheIndex(slot) + if index >= lcBlocks.cache.lenu64: + return err() + let existing = lcBlocks.cache[index] + if existing == nil: + return err() + return ok existing[] + +func getLatestBlockThroughSlot*( + lcBlocks: LCBlocks, maxSlot: Slot): Opt[ForkedMsgTrustedSignedBeaconBlock] = + if maxSlot < lcBlocks.backfill.slot: + return err() + + let startIndex = lcBlocks.getCacheIndex(maxSlot) + for i in startIndex ..< lcBlocks.cache.lenu64: + let blck = lcBlocks.cache[i] + if blck != nil: + return ok blck[] + err() + +proc processBlock( + lcBlocks: var LCBlocks, + signedBlock: ForkySignedBeaconBlock, + isNewBlock = true): Result[void, BlockError] = + logScope: + headSlot = lcBlocks.headSlot + backfill = (lcBlocks.backfill.slot, shortLog(lcBlocks.backfill.parent_root)) + blck = shortLog(signedBlock.toBlockId()) + + let startTick = Moment.now() + + template blck(): untyped = signedBlock.message + template blockRoot(): untyped = signedBlock.root + + if blck.slot > lcBlocks.headSlot: + debug "LC block too new" + return err(BlockError.Duplicate) + + # Handle head block + if lcBlocks.backfill.slot == FAR_FUTURE_SLOT: + if blck.slot < lcBlocks.headSlot: + if isNewBlock: + debug "Head LC block skipped" + return err(BlockError.MissingParent) + + if blockRoot != lcBlocks.backfill.parent_root: + if isNewBlock: + debug "Head LC block from unviable fork" + return err(BlockError.UnviableFork) + + const index = 0'u64 # Head block is always mapped to index 0 (never empty) + if index >= lcBlocks.cache.lenu64: + lcBlocks.backfill.slot = blck.slot + debug "Final head LC block" + return ok() + + lcBlocks.backfill = blck.toBeaconBlockSummary() + let existing = lcBlocks.cache[index] + if existing != nil: + if blockRoot == existing[].root: + if isNewBlock: + debug "Head LC block already known" + return ok() + warn "Head LC block reorg", existing = existing[] + lcBlocks.cache[index] = + newClone ForkedMsgTrustedSignedBeaconBlock.init( + signedBlock.asMsgTrusted()) + debug "Head LC block cached", cacheDur = Moment.now() - startTick + return ok() + + # Handle duplicate block + if blck.slot >= lcBlocks.getBackfillSlot(): + let index = lcBlocks.getCacheIndex(blck.slot) + doAssert index < lcBlocks.cache.lenu64 + let existing = lcBlocks.cache[index] + if existing == nil: + debug "Duplicate LC block for empty slot" + return err(BlockError.UnviableFork) + + doAssert blck.slot == existing[].slot + if blockRoot != existing[].root: + debug "Duplicate LC block from unviable fork" + return err(BlockError.UnviableFork) + + debug "Duplicate LC block" + return err(BlockError.Duplicate) + + # Handle new block + if blck.slot > lcBlocks.backfill.slot: + debug "LC block for empty slot" + return err(BlockError.UnviableFork) + + if blockRoot != lcBlocks.backfill.parent_root: + if blck.slot == lcBlocks.backfill.slot: + debug "Final LC block from unviable fork" + return err(BlockError.UnviableFork) + if isNewBlock: + debug "LC block does not match expected backfill root" + return err(BlockError.MissingParent) + + if blck.slot == lcBlocks.backfill.slot: + debug "Duplicate final LC block" + return err(BlockError.Duplicate) + + let + previousIndex = lcBlocks.getCacheIndex(lcBlocks.backfill.slot) + index = lcBlocks.getCacheIndex(blck.slot) + for i in previousIndex + 1 ..< min(index, lcBlocks.cache.lenu64): + let existing = lcBlocks.cache[i] + if existing != nil: + warn "LC block reorg to empty", existing = existing[] + lcBlocks.cache[i] = nil + + if index >= lcBlocks.cache.lenu64: + lcBlocks.backfill.slot = blck.slot + debug "Final LC block" + return ok() + + lcBlocks.backfill = blck.toBeaconBlockSummary() + let existing = lcBlocks.cache[index] + if existing != nil: + if blockRoot == existing[].root: + if isNewBlock: + debug "LC block already known" + return ok() + warn "LC block reorg", existing = existing[] + lcBlocks.cache[index] = + newClone ForkedMsgTrustedSignedBeaconBlock.init( + signedBlock.asMsgTrusted()) + debug "LC block cached", cacheDur = Moment.now() - startTick + ok() + +proc setHeadBid*(lcBlocks: var LCBlocks, headBid: BlockId) = + debug "New LC head block", headBid + if lcBlocks.maxSlots == 0: + discard + + elif lcBlocks.headSlot == FAR_FUTURE_SLOT or + headBid.slot >= lcBlocks.headSlot + lcBlocks.maxSlots.uint64 or ( + lcBlocks.headSlot - lcBlocks.cache.lenu64 != FAR_FUTURE_SLOT and + headBid.slot <= lcBlocks.headSlot - lcBlocks.cache.lenu64): + lcBlocks.cache.clear() + for i in 0 ..< min(headBid.slot + 1, lcBlocks.maxSlots.Slot).int: + lcBlocks.cache.addLast(nil) + + elif headBid.slot > lcBlocks.headSlot: + let numNewSlots = headBid.slot - lcBlocks.headSlot + doAssert numNewSlots <= lcBlocks.maxSlots.uint64 + if numNewSlots > lcBlocks.maxSlots.uint64 - lcBlocks.cache.lenu64: + lcBlocks.cache.shrink( + fromLast = numNewSlots.int + lcBlocks.cache.len - lcBlocks.maxSlots) + for i in 0 ..< numNewSlots: + lcBlocks.cache.addFirst(nil) + + else: + lcBlocks.cache.shrink(fromFirst = (lcBlocks.headSlot - headBid.slot).int) + let startLen = lcBlocks.cache.len + for i in startLen ..< min(headBid.slot + 1, lcBlocks.maxSlots.Slot).int: + lcBlocks.cache.addLast(nil) + + lcBlocks.headSlot = headBid.slot + lcBlocks.backfill.slot = FAR_FUTURE_SLOT + lcBlocks.backfill.parent_root = headBid.root + + for i in 0 ..< lcBlocks.cache.len: + let existing = lcBlocks.cache[i] + if existing != nil: + let res = + withBlck(existing[]): + lcBlocks.processBlock(blck.asSigned(), isNewBlock = false) + if res.isErr: + break + +proc setFinalizedBid*(lcBlocks: var LCBlocks, finalizedBid: BlockId) = + if finalizedBid.slot > lcBlocks.headSlot: + lcBlocks.setHeadBid(finalizedBid) + if finalizedBid != lcBlocks.finalizedBid: + debug "New LC finalized block", finalizedBid + lcBlocks.finalizedBid = finalizedBid + + if finalizedBid.slot <= lcBlocks.headSlot and + finalizedBid.slot >= lcBlocks.getBackfillSlot: + let index = lcBlocks.getCacheIndex(finalizedBid.slot) + doAssert index < lcBlocks.cache.lenu64 + let existing = lcBlocks.cache[index] + if existing == nil or finalizedBid.root != existing[].root: + if existing != nil: + error "Finalized LC block reorg", existing = existing[] + else: + error "Finalized LC block reorg" + lcBlocks.cache.clear() + lcBlocks.backfill.reset() + lcBlocks.headSlot.reset() + lcBlocks.setHeadBid(finalizedBid) + +proc addBlock*( + lcBlocks: var LCBlocks, + signedBlock: ForkedSignedBeaconBlock): Result[void, BlockError] = + let oldBackfillSlot = lcBlocks.getBackfillSlot() + + withBlck(signedBlock): + ? lcBlocks.processBlock(blck) + + if oldBackfillSlot > lcBlocks.finalizedBid.slot and + lcBlocks.getBackfillSlot() <= lcBlocks.finalizedBid.slot: + if signedBlock.slot != lcBlocks.finalizedBid.slot or + signedBlock.root != lcBlocks.finalizedBid.root: + error "LC finalized block from unviable fork" + lcBlocks.setFinalizedBid(lcBlocks.finalizedBid) + return err(BlockError.UnviableFork) + + let slot = signedBlock.slot + for i in lcBlocks.getCacheIndex(slot) + 1 ..< lcBlocks.cache.lenu64: + let existing = lcBlocks.cache[i] + if existing != nil: + let res = + withBlck(existing[]): + lcBlocks.processBlock(blck.asSigned(), isNewBlock = false) + if res.isErr: + break + + ok() diff --git a/beacon_chain/consensus_object_pools/block_quarantine.nim b/beacon_chain/consensus_object_pools/block_quarantine.nim index ad51116027..dfa4b6b814 100644 --- a/beacon_chain/consensus_object_pools/block_quarantine.nim +++ b/beacon_chain/consensus_object_pools/block_quarantine.nim @@ -155,11 +155,11 @@ func addUnviable*(quarantine: var Quarantine, root: Eth2Digest) = for k in toRemove: quarantine.orphans.del k - quarantine.unviable.add(k[0], ()) + quarantine.unviable[k[0]] = () toRemove.setLen(0) - quarantine.unviable.add(root, ()) + quarantine.unviable[root] = () func cleanupOrphans(quarantine: var Quarantine, finalizedSlot: Slot) = var toDel: seq[(Eth2Digest, ValidatorSig)] @@ -201,7 +201,7 @@ func addOrphan*( let parent_root = getForkedBlockField(signedBlock, parent_root) if parent_root in quarantine.unviable: - quarantine.unviable.add(signedBlock.root, ()) + quarantine.unviable[signedBlock.root] = () return true # Even if the quarantine is full, we need to schedule its parent for diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index cc2a004e7d..4ba4ab146c 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -21,11 +21,11 @@ export eth2_merkleization, eth2_ssz_serialization, block_pools_types, results, beacon_chain_db -# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics +# https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics declareGauge beacon_head_root, "Root of the head block of the beacon chain" declareGauge beacon_head_slot, "Slot of the head block of the beacon chain" -# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics +# https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics declareGauge beacon_finalized_epoch, "Current finalized epoch" # On epoch transition declareGauge beacon_finalized_root, "Current finalized root" # On epoch transition declareGauge beacon_current_justified_epoch, "Current justified epoch" # On epoch transition @@ -552,7 +552,7 @@ func isNextSyncCommitteeFinalized*( proc updateBeaconMetrics( state: ForkedHashedBeaconState, bid: BlockId, cache: var StateCache) = - # https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics + # https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#additional-metrics # both non-negative, so difference can't overflow or underflow int64 beacon_head_root.set(bid.root.toGaugeValue) diff --git a/beacon_chain/era_db.nim b/beacon_chain/era_db.nim index 63baff1ba1..66b6f48187 100644 --- a/beacon_chain/era_db.nim +++ b/beacon_chain/era_db.nim @@ -352,10 +352,10 @@ proc getPartialState( false iterator getBlockIds*( - db: EraDB, historical_roots: openArray[Eth2Digest], startSlot: Slot): BlockId = + db: EraDB, historical_roots: openArray[Eth2Digest], start_slot: Slot): BlockId = var state = (ref PartialBeaconState)() # avoid stack overflow - slot = startSlot + slot = start_slot while true: # `case` ensures we're on a fork for which the `PartialBeaconState` diff --git a/beacon_chain/eth1/eth1_monitor.nim b/beacon_chain/eth1/eth1_monitor.nim index db17b32cee..e6a4cf0dca 100644 --- a/beacon_chain/eth1/eth1_monitor.nim +++ b/beacon_chain/eth1/eth1_monitor.nim @@ -11,7 +11,7 @@ import std/[deques, options, strformat, strutils, sequtils, tables, typetraits, uri, json], # Nimble packages: - chronos, json, metrics, chronicles/timings, stint/endians2, + chronos, metrics, chronicles/timings, stint/endians2, web3, web3/ethtypes as web3Types, web3/ethhexstrings, web3/engine_api, eth/common/eth_types, eth/async_utils, stew/[byteutils, objects, shims/hashes], diff --git a/beacon_chain/fork_choice/proto_array.nim b/beacon_chain/fork_choice/proto_array.nim index 2bde7d05c7..cf61459696 100644 --- a/beacon_chain/fork_choice/proto_array.nim +++ b/beacon_chain/fork_choice/proto_array.nim @@ -103,7 +103,7 @@ func init*(T: type ProtoArray, indices: {node.root: 0}.toTable() ) -# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/fork-choice.md#configuration +# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/fork-choice.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/fork-choice.md#get_latest_attesting_balance const PROPOSER_SCORE_BOOST* = 40 func calculateProposerBoost(validatorBalances: openArray[Gwei]): int64 = diff --git a/beacon_chain/gossip_processing/block_processor.nim b/beacon_chain/gossip_processing/block_processor.nim index 9ca0395ac6..bac5e05b73 100644 --- a/beacon_chain/gossip_processing/block_processor.nim +++ b/beacon_chain/gossip_processing/block_processor.nim @@ -330,7 +330,7 @@ proc runForkchoiceUpdated( # block hash provided by this event is stubbed with # `0x0000000000000000000000000000000000000000000000000000000000000000`." # and - # https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/bellatrix/validator.md#executionpayload + # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/bellatrix/validator.md#executionpayload # notes "`finalized_block_hash` is the hash of the latest finalized execution # payload (`Hash32()` if none yet finalized)" if headBlockRoot.isZero: diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index 7213fc1324..b4e8e56fa7 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -281,7 +281,11 @@ proc checkForPotentialDoppelganger( validatorIndex, validatorPubkey, attestation = shortLog(attestation) - quit QuitFailure + + # Avoid colliding with + # https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Process%20Exit%20Codes + const QuitDoppelganger = 1031 + quit QuitDoppelganger proc attestationValidator*( self: ref Eth2Processor, src: MsgSource, diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index 8708ab16b3..f976a37ce9 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -170,7 +170,7 @@ type MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [Defect, CatchableError].} MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [Defect].} - # https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#goodbye + # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/p2p-interface.md#goodbye DisconnectionReason* = enum # might see other values on the wire! ClientShutDown = 1 diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index 10aaa34a2c..882d02f3c0 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -126,7 +126,7 @@ template init(T: type RestServerRef, res.get() -# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics +# https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics declareGauge beacon_slot, "Latest slot of the beacon chain state" declareGauge beacon_current_epoch, "Current epoch" diff --git a/beacon_chain/nimbus_binary_common.nim b/beacon_chain/nimbus_binary_common.nim index 2ecda64e70..27086fa1ad 100644 --- a/beacon_chain/nimbus_binary_common.nim +++ b/beacon_chain/nimbus_binary_common.nim @@ -11,7 +11,7 @@ import # Standard library - std/[os, tables, strutils, terminal, typetraits], + std/[tables, strutils, terminal, typetraits], # Nimble packages chronos, confutils, toml_serialization, @@ -96,6 +96,9 @@ proc detectTTY*(stdoutKind: StdoutLogKind): StdoutLogKind = else: stdoutKind +when defaultChroniclesStream.outputs.type.arity == 2: + from std/os import splitFile + proc setupLogging*( logLevel: string, stdoutKind: StdoutLogKind, logFile: Option[OutFile]) = # In the cfg file for nimbus, we create two formats: textlines and json. diff --git a/beacon_chain/rpc/rest_beacon_api.nim b/beacon_chain/rpc/rest_beacon_api.nim index 6194c2ebfb..c2e2ef2fa6 100644 --- a/beacon_chain/rpc/rest_beacon_api.nim +++ b/beacon_chain/rpc/rest_beacon_api.nim @@ -6,7 +6,7 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import - std/[typetraits, sequtils, strutils, sets], + std/[typetraits, sequtils, sets], stew/[results, base10], chronicles, ./rest_utils, diff --git a/beacon_chain/rpc/rest_validator_api.nim b/beacon_chain/rpc/rest_validator_api.nim index d39910d3cb..6d302546dc 100644 --- a/beacon_chain/rpc/rest_validator_api.nim +++ b/beacon_chain/rpc/rest_validator_api.nim @@ -1,9 +1,10 @@ -# Copyright (c) 2018-2021 Status Research & Development GmbH +# Copyright (c) 2018-2022 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. -import std/[typetraits, strutils, sets] + +import std/[typetraits, sets] import stew/[results, base10], chronicles import ".."/[beacon_chain_db, beacon_node], ".."/networking/eth2_network, diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index 3fa99f4ef8..644ad106f5 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -512,7 +512,7 @@ proc is_valid_indexed_attestation*( # Attestation validation # ------------------------------------------------------------------------------------------ # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/beacon-chain.md#attestations -# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id +# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id func check_attestation_slot_target*(data: AttestationData): Result[Slot, cstring] = if not (data.target.epoch == epoch(data.slot)): diff --git a/beacon_chain/spec/datatypes/altair.nim b/beacon_chain/spec/datatypes/altair.nim index 443f0b005d..7ac552860b 100644 --- a/beacon_chain/spec/datatypes/altair.nim +++ b/beacon_chain/spec/datatypes/altair.nim @@ -480,15 +480,31 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + MsgTrustedSignedBeaconBlock* = object + message*: TrustedBeaconBlock + signature*: ValidatorSig + + root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock - SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock - SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody + SomeSignedBeaconBlock* = + SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock + SomeBeaconBlock* = + BeaconBlock | + SigVerifiedBeaconBlock | + TrustedBeaconBlock + SomeBeaconBlockBody* = + BeaconBlockBody | + SigVerifiedBeaconBlockBody | + TrustedBeaconBlockBody SomeSyncAggregate* = SyncAggregate | TrustedSyncAggregate @@ -729,13 +745,26 @@ func clear*(info: var EpochInfo) = info.validators.setLen(0) info.balances = UnslashedParticipatingBalances() -template asSigned*(x: SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock): - SignedBeaconBlock = +template asSigned*( + x: SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) -template asSigVerified*(x: SignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = +template asSigVerified*( + x: SignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) +template asMsgTrusted*( + x: SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = + isomorphicCast[MsgTrustedSignedBeaconBlock](x) + template asTrusted*( - x: SignedBeaconBlock | SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = + x: SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) diff --git a/beacon_chain/spec/datatypes/bellatrix.nim b/beacon_chain/spec/datatypes/bellatrix.nim index 7b1399e075..74c0cf1eba 100644 --- a/beacon_chain/spec/datatypes/bellatrix.nim +++ b/beacon_chain/spec/datatypes/bellatrix.nim @@ -313,15 +313,31 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + MsgTrustedSignedBeaconBlock* = object + message*: TrustedBeaconBlock + signature*: ValidatorSig + + root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock - SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock - SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody + SomeSignedBeaconBlock* = + SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock + SomeBeaconBlock* = + BeaconBlock | + SigVerifiedBeaconBlock | + TrustedBeaconBlock + SomeBeaconBlockBody* = + BeaconBlockBody | + SigVerifiedBeaconBlockBody | + TrustedBeaconBlockBody BlockParams = object parentHash*: string @@ -378,13 +394,26 @@ func shortLog*(v: SomeSignedBeaconBlock): auto = signature: shortLog(v.signature) ) -template asSigned*(x: SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock): - SignedBeaconBlock = +template asSigned*( + x: SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) -template asSigVerified*(x: SignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = +template asSigVerified*( + x: SignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) +template asMsgTrusted*( + x: SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = + isomorphicCast[MsgTrustedSignedBeaconBlock](x) + template asTrusted*( - x: SignedBeaconBlock | SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = + x: SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) diff --git a/beacon_chain/spec/datatypes/phase0.nim b/beacon_chain/spec/datatypes/phase0.nim index 89530deb10..4287328782 100644 --- a/beacon_chain/spec/datatypes/phase0.nim +++ b/beacon_chain/spec/datatypes/phase0.nim @@ -218,15 +218,31 @@ type root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + MsgTrustedSignedBeaconBlock* = object + message*: TrustedBeaconBlock + signature*: ValidatorSig + + root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block + TrustedSignedBeaconBlock* = object message*: TrustedBeaconBlock signature*: TrustedSig root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block - SomeSignedBeaconBlock* = SignedBeaconBlock | SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock - SomeBeaconBlock* = BeaconBlock | SigVerifiedBeaconBlock | TrustedBeaconBlock - SomeBeaconBlockBody* = BeaconBlockBody | SigVerifiedBeaconBlockBody | TrustedBeaconBlockBody + SomeSignedBeaconBlock* = + SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock + SomeBeaconBlock* = + BeaconBlock | + SigVerifiedBeaconBlock | + TrustedBeaconBlock + SomeBeaconBlockBody* = + BeaconBlockBody | + SigVerifiedBeaconBlockBody | + TrustedBeaconBlockBody EpochInfo* = object ## Information about the outcome of epoch processing @@ -261,13 +277,26 @@ func shortLog*(v: SomeSignedBeaconBlock): auto = signature: shortLog(v.signature) ) -template asSigned*(x: SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock): - SignedBeaconBlock = +template asSigned*( + x: SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock): SignedBeaconBlock = isomorphicCast[SignedBeaconBlock](x) -template asSigVerified*(x: SignedBeaconBlock | TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = +template asSigVerified*( + x: SignedBeaconBlock | + MsgTrustedSignedBeaconBlock | + TrustedSignedBeaconBlock): SigVerifiedSignedBeaconBlock = isomorphicCast[SigVerifiedSignedBeaconBlock](x) +template asMsgTrusted*( + x: SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + TrustedSignedBeaconBlock): MsgTrustedSignedBeaconBlock = + isomorphicCast[MsgTrustedSignedBeaconBlock](x) + template asTrusted*( - x: SignedBeaconBlock | SigVerifiedSignedBeaconBlock): TrustedSignedBeaconBlock = + x: SignedBeaconBlock | + SigVerifiedSignedBeaconBlock | + MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock = isomorphicCast[TrustedSignedBeaconBlock](x) diff --git a/beacon_chain/spec/digest.nim b/beacon_chain/spec/digest.nim index 1d2bcbffa5..3629b82b9a 100644 --- a/beacon_chain/spec/digest.nim +++ b/beacon_chain/spec/digest.nim @@ -126,5 +126,5 @@ proc readValue*(r: var JsonReader, a: var Eth2Digest) {.raises: [Defect, IOError func toGaugeValue*(hash: Eth2Digest): int64 = # Only the last 8 bytes are taken into consideration in accordance # to the ETH2 metrics spec: - # https://github.com/ethereum/eth2.0-metrics/blob/6a79914cb31f7d54858c7dd57eee75b6162ec737/metrics.md#interop-metrics + # https://github.com/ethereum/beacon-metrics/blob/6a79914cb31f7d54858c7dd57eee75b6162ec737/metrics.md#interop-metrics cast[int64](uint64.fromBytesLE(hash.data.toOpenArray(24, 31))) diff --git a/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim b/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim index 1370ef925b..0af27282b8 100644 --- a/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim +++ b/beacon_chain/spec/eth2_apis/eth2_rest_serialization.nim @@ -693,7 +693,7 @@ template unrecognizedFieldWarning = # TODO: There should be a different notification mechanism for informing the # caller of a deserialization routine for unexpected fields. # The chonicles import in this module should be removed. - debug "JSON field not recognized by the current version of Nimbus. Consider upgrading", + trace "JSON field not recognized by the current version of Nimbus. Consider upgrading", fieldName, typeName = typetraits.name(typeof value) ## ForkedBeaconBlock diff --git a/beacon_chain/spec/eth2_apis/rest_keymanager_types.nim b/beacon_chain/spec/eth2_apis/rest_keymanager_types.nim index 55d97a480a..23bfa88539 100644 --- a/beacon_chain/spec/eth2_apis/rest_keymanager_types.nim +++ b/beacon_chain/spec/eth2_apis/rest_keymanager_types.nim @@ -1,5 +1,11 @@ +# beacon_chain +# Copyright (c) 2021-2022 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + import - std/[tables, strutils, uri], ".."/[crypto, keystore], ../../validators/slashing_protection_common diff --git a/beacon_chain/spec/eth2_apis/rest_remote_signer_calls.nim b/beacon_chain/spec/eth2_apis/rest_remote_signer_calls.nim index 576cc68bb8..da13068601 100644 --- a/beacon_chain/spec/eth2_apis/rest_remote_signer_calls.nim +++ b/beacon_chain/spec/eth2_apis/rest_remote_signer_calls.nim @@ -1,4 +1,4 @@ -# Copyright (c) 2018-2021 Status Research & Development GmbH +# Copyright (c) 2018-2022 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -9,7 +9,6 @@ import std/strutils, chronicles, metrics, chronos, chronos/apps/http/httpclient, presto, presto/client, - nimcrypto/utils as ncrutils, serialization, json_serialization, json_serialization/std/[options, net, sets], stew/[results, base10, byteutils], diff --git a/beacon_chain/spec/eth2_apis/rest_types.nim b/beacon_chain/spec/eth2_apis/rest_types.nim index 3239b99b5f..a18af6b0ab 100644 --- a/beacon_chain/spec/eth2_apis/rest_types.nim +++ b/beacon_chain/spec/eth2_apis/rest_types.nim @@ -14,7 +14,7 @@ {.push raises: [Defect].} import - std/[json, typetraits], + std/json, stew/base10, web3/ethtypes, ".."/forks, ".."/datatypes/[phase0, altair, bellatrix], diff --git a/beacon_chain/spec/forks.nim b/beacon_chain/spec/forks.nim index d0a403c827..2b595d886d 100644 --- a/beacon_chain/spec/forks.nim +++ b/beacon_chain/spec/forks.nim @@ -131,11 +131,22 @@ type altair.SigVerifiedSignedBeaconBlock | bellatrix.SigVerifiedSignedBeaconBlock + ForkyMsgTrustedSignedBeaconBlock* = + phase0.MsgTrustedSignedBeaconBlock | + altair.MsgTrustedSignedBeaconBlock | + bellatrix.MsgTrustedSignedBeaconBlock + ForkyTrustedSignedBeaconBlock* = phase0.TrustedSignedBeaconBlock | altair.TrustedSignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock + ForkedMsgTrustedSignedBeaconBlock* = object + case kind*: BeaconBlockFork + of BeaconBlockFork.Phase0: phase0Data*: phase0.MsgTrustedSignedBeaconBlock + of BeaconBlockFork.Altair: altairData*: altair.MsgTrustedSignedBeaconBlock + of BeaconBlockFork.Bellatrix: bellatrixData*: bellatrix.MsgTrustedSignedBeaconBlock + ForkedTrustedSignedBeaconBlock* = object case kind*: BeaconBlockFork of BeaconBlockFork.Phase0: phase0Data*: phase0.TrustedSignedBeaconBlock @@ -145,6 +156,7 @@ type SomeForkySignedBeaconBlock* = ForkySignedBeaconBlock | ForkySigVerifiedSignedBeaconBlock | + ForkyMsgTrustedSignedBeaconBlock | ForkyTrustedSignedBeaconBlock EpochInfoFork* {.pure.} = enum @@ -223,6 +235,13 @@ template init*(T: type ForkedSignedBeaconBlock, forked: ForkedBeaconBlock, root: blockRoot, signature: signature)) +template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: phase0.MsgTrustedSignedBeaconBlock): T = + T(kind: BeaconBlockFork.Phase0, phase0Data: blck) +template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: altair.MsgTrustedSignedBeaconBlock): T = + T(kind: BeaconBlockFork.Altair, altairData: blck) +template init*(T: type ForkedMsgTrustedSignedBeaconBlock, blck: bellatrix.MsgTrustedSignedBeaconBlock): T = + T(kind: BeaconBlockFork.Bellatrix, bellatrixData: blck) + template init*(T: type ForkedTrustedSignedBeaconBlock, blck: phase0.TrustedSignedBeaconBlock): T = T(kind: BeaconBlockFork.Phase0, phase0Data: blck) template init*(T: type ForkedTrustedSignedBeaconBlock, blck: altair.TrustedSignedBeaconBlock): T = @@ -233,18 +252,21 @@ template init*(T: type ForkedTrustedSignedBeaconBlock, blck: bellatrix.TrustedSi template toFork*[T: phase0.SignedBeaconBlock | phase0.SigVerifiedSignedBeaconBlock | + phase0.MsgTrustedSignedBeaconBlock | phase0.TrustedSignedBeaconBlock]( t: type T): BeaconBlockFork = BeaconBlockFork.Phase0 template toFork*[T: altair.SignedBeaconBlock | altair.SigVerifiedSignedBeaconBlock | + altair.MsgTrustedSignedBeaconBlock | altair.TrustedSignedBeaconBlock]( t: type T): BeaconBlockFork = BeaconBlockFork.Altair template toFork*[T: bellatrix.SignedBeaconBlock | bellatrix.SigVerifiedSignedBeaconBlock | + bellatrix.MsgTrustedSignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock]( t: type T): BeaconBlockFork = BeaconBlockFork.Bellatrix @@ -366,15 +388,25 @@ template atEpoch*( forkDigests: ForkDigests, epoch: Epoch, cfg: RuntimeConfig): ForkDigest = forkDigests.atStateFork(cfg.stateForkAtEpoch(epoch)) -template asSigned*(x: ForkedTrustedSignedBeaconBlock): ForkedSignedBeaconBlock = +template asSigned*( + x: ForkedMsgTrustedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock): ForkedSignedBeaconBlock = isomorphicCast[ForkedSignedBeaconBlock](x) -template asTrusted*(x: ForkedSignedBeaconBlock): ForkedTrustedSignedBeaconBlock = +template asMsgTrusted*( + x: ForkedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock): ForkedMsgTrustedSignedBeaconBlock = + isomorphicCast[ForkedMsgTrustedSignedBeaconBlock](x) + +template asTrusted*( + x: ForkedSignedBeaconBlock | + ForkedMsgTrustedSignedBeaconBlock): ForkedTrustedSignedBeaconBlock = isomorphicCast[ForkedTrustedSignedBeaconBlock](x) template withBlck*( x: ForkedBeaconBlock | Web3SignerForkedBeaconBlock | - ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock, + ForkedSignedBeaconBlock | ForkedMsgTrustedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock, body: untyped): untyped = case x.kind of BeaconBlockFork.Phase0: @@ -398,38 +430,51 @@ func hash_tree_root*(x: ForkedBeaconBlock): Eth2Digest = func hash_tree_root*(x: Web3SignerForkedBeaconBlock): Eth2Digest {.borrow.} -template getForkedBlockField*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock, y: untyped): untyped = +template getForkedBlockField*( + x: ForkedSignedBeaconBlock | + ForkedMsgTrustedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock, + y: untyped): untyped = # unsafeAddr avoids a copy of the field in some cases (case x.kind of BeaconBlockFork.Phase0: unsafeAddr x.phase0Data.message.y of BeaconBlockFork.Altair: unsafeAddr x.altairData.message.y of BeaconBlockFork.Bellatrix: unsafeAddr x.bellatrixData.message.y)[] -template signature*(x: ForkedSignedBeaconBlock): ValidatorSig = +template signature*(x: ForkedSignedBeaconBlock | + ForkedMsgTrustedSignedBeaconBlock): ValidatorSig = withBlck(x): blck.signature template signature*(x: ForkedTrustedSignedBeaconBlock): TrustedSig = withBlck(x): blck.signature -template root*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): Eth2Digest = +template root*(x: ForkedSignedBeaconBlock | + ForkedMsgTrustedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock): Eth2Digest = withBlck(x): blck.root -template slot*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): Slot = +template slot*(x: ForkedSignedBeaconBlock | + ForkedMsgTrustedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock): Slot = withBlck(x): blck.message.slot template shortLog*(x: ForkedBeaconBlock): auto = withBlck(x): shortLog(blck) -template shortLog*(x: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): auto = +template shortLog*(x: ForkedSignedBeaconBlock | + ForkedMsgTrustedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock): auto = withBlck(x): shortLog(blck) chronicles.formatIt ForkedBeaconBlock: it.shortLog chronicles.formatIt ForkedSignedBeaconBlock: it.shortLog +chronicles.formatIt ForkedMsgTrustedSignedBeaconBlock: it.shortLog chronicles.formatIt ForkedTrustedSignedBeaconBlock: it.shortLog template withStateAndBlck*( s: ForkedHashedBeaconState, b: ForkedBeaconBlock | ForkedSignedBeaconBlock | + ForkedMsgTrustedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock, body: untyped): untyped = case s.kind @@ -465,10 +510,10 @@ template toBeaconBlockHeader*( blck.message.toBeaconBlockHeader template toBeaconBlockHeader*( - blckParam: ForkedTrustedSignedBeaconBlock): BeaconBlockHeader = - ## Reduce a given `ForkedTrustedSignedBeaconBlock` to its `BeaconBlockHeader`. - withBlck(blckParam): - blck.toBeaconBlockHeader() + blckParam: ForkedMsgTrustedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock): BeaconBlockHeader = + ## Reduce a given signed beacon block to its `BeaconBlockHeader`. + withBlck(blckParam): blck.toBeaconBlockHeader() func genesisFork*(cfg: RuntimeConfig): Fork = Fork( @@ -611,8 +656,13 @@ func init*(T: type ForkDigests, compute_fork_digest(cfg.SHARDING_FORK_VERSION, genesis_validators_root), ) +func toBlockId*(header: BeaconBlockHeader): BlockId = + BlockId(root: header.hash_tree_root(), slot: header.slot) + func toBlockId*(blck: SomeForkySignedBeaconBlock): BlockId = BlockId(root: blck.root, slot: blck.message.slot) -func toBlockId*(blck: ForkedSignedBeaconBlock | ForkedTrustedSignedBeaconBlock): BlockId = +func toBlockId*(blck: ForkedSignedBeaconBlock | + ForkedMsgTrustedSignedBeaconBlock | + ForkedTrustedSignedBeaconBlock): BlockId = withBlck(blck): BlockId(root: blck.root, slot: blck.message.slot) diff --git a/beacon_chain/spec/helpers.nim b/beacon_chain/spec/helpers.nim index 68f3dcf194..378a066bf3 100644 --- a/beacon_chain/spec/helpers.nim +++ b/beacon_chain/spec/helpers.nim @@ -597,6 +597,13 @@ func is_merge_transition_complete*(state: bellatrix.BeaconState): bool = const defaultExecutionPayloadHeader = default(ExecutionPayloadHeader) state.latest_execution_payload_header != defaultExecutionPayloadHeader +# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md#helpers +func is_execution_block*( + body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody | + bellatrix.SigVerifiedBeaconBlockBody): bool = + const defaultBellatrixExecutionPayload = default(bellatrix.ExecutionPayload) + body.execution_payload != defaultBellatrixExecutionPayload + # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/bellatrix/beacon-chain.md#is_merge_transition_block func is_merge_transition_block( state: bellatrix.BeaconState, diff --git a/beacon_chain/spec/keystore.nim b/beacon_chain/spec/keystore.nim index dcdd9ef874..46a8e80e6a 100644 --- a/beacon_chain/spec/keystore.nim +++ b/beacon_chain/spec/keystore.nim @@ -29,7 +29,7 @@ import nimcrypto/utils as ncrutils export results, burnMem, writeValue, readValue -{.localPassc: "-fno-lto".} # no LTO for crypto +{.localPassC: "-fno-lto".} # no LTO for crypto type KeystoreMode* = enum diff --git a/beacon_chain/spec/network.nim b/beacon_chain/spec/network.nim index 52328e660a..206dd73f58 100644 --- a/beacon_chain/spec/network.nim +++ b/beacon_chain/spec/network.nim @@ -84,13 +84,13 @@ func getAttestationTopic*(forkDigest: ForkDigest, ## For subscribing and unsubscribing to/from a subnet. eth2Prefix(forkDigest) & "beacon_attestation_" & $(subnetId) & "/ssz_snappy" -# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/altair/p2p-interface.md#topics-and-messages +# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/altair/p2p-interface.md#topics-and-messages func getSyncCommitteeTopic*(forkDigest: ForkDigest, subcommitteeIdx: SyncSubcommitteeIndex): string = ## For subscribing and unsubscribing to/from a subnet. eth2Prefix(forkDigest) & "sync_committee_" & $subcommitteeIdx & "/ssz_snappy" -# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/altair/p2p-interface.md#topics-and-messages +# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/altair/p2p-interface.md#topics-and-messages func getSyncCommitteeContributionAndProofTopic*(forkDigest: ForkDigest): string = ## For subscribing and unsubscribing to/from a subnet. eth2Prefix(forkDigest) & "sync_committee_contribution_and_proof/ssz_snappy" diff --git a/beacon_chain/spec/state_transition_epoch.nim b/beacon_chain/spec/state_transition_epoch.nim index 593eaf8600..64259f8544 100644 --- a/beacon_chain/spec/state_transition_epoch.nim +++ b/beacon_chain/spec/state_transition_epoch.nim @@ -1038,23 +1038,23 @@ func process_inactivity_updates*( proc process_epoch*( cfg: RuntimeConfig, state: var phase0.BeaconState, flags: UpdateFlags, cache: var StateCache, info: var phase0.EpochInfo): Result[void, cstring] = - let currentEpoch = get_current_epoch(state) + let current_epoch = get_current_epoch(state) trace "process_epoch", - current_epoch = currentEpoch + current_epoch init(info, state) info.process_attestations(state, cache) process_justification_and_finalization(state, info.balances, flags) # state.slot hasn't been incremented yet. - if verifyFinalization in flags and currentEpoch >= 2: - doAssert state.current_justified_checkpoint.epoch + 2 >= currentEpoch + if verifyFinalization in flags and current_epoch >= 2: + doAssert state.current_justified_checkpoint.epoch + 2 >= current_epoch - if verifyFinalization in flags and currentEpoch >= 3: + if verifyFinalization in flags and current_epoch >= 3: # Rule 2/3/4 finalization results in the most pessimal case. The other # three finalization rules finalize more quickly as long as the any of # the finalization rules triggered. - doAssert state.finalized_checkpoint.epoch + 3 >= currentEpoch + doAssert state.finalized_checkpoint.epoch + 3 >= current_epoch process_rewards_and_penalties(state, info) ? process_registry_updates(cfg, state, cache) @@ -1115,7 +1115,7 @@ proc process_epoch*( process_inactivity_updates(cfg, state, info) # [New in Altair] - # https://github.com/ethereum/consensus-specs/blob/v1.1.1/specs/phase0/beacon-chain.md#rewards-and-penalties-1 + # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/beacon-chain.md#process_rewards_and_penalties process_rewards_and_penalties(cfg, state, info) # https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/beacon-chain.md#registry-updates diff --git a/beacon_chain/sync/optimistic_sync_light_client.nim b/beacon_chain/sync/optimistic_sync_light_client.nim new file mode 100644 index 0000000000..4c45e32250 --- /dev/null +++ b/beacon_chain/sync/optimistic_sync_light_client.nim @@ -0,0 +1,286 @@ +# beacon_chain +# Copyright (c) 2019-2022 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [Defect].} + +import + chronos, + ../consensus_object_pools/block_clearance_light_client, + ../networking/eth2_network, + ../beacon_clock, + ./request_manager + +logScope: + topics = "optsync" + +type + MsgTrustedBlockProcessor* = + proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): Future[void] {. + gcsafe, raises: [Defect].} + + SyncStrategy {.pure.} = enum + None, + RequestManager, + SyncManager + + LCOptimisticSync* = ref object + network: Eth2Node + getBeaconTime: GetBeaconTimeFn + optimisticProcessor: MsgTrustedBlockProcessor + safeSlotsToImportOptimistically: uint16 + lcBlocks: LCBlocks + blockVerifier: request_manager.BlockVerifier + requestManager: RequestManager + finalizedBid, optimisticBid, optimisticCandidateBid: BlockId + finalizedIsExecutionBlock: Option[bool] + syncStrategy: SyncStrategy + syncFut, processFut: Future[void] + +# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/sync/optimistic.md +proc reportOptimisticCandidateBlock(optSync: LCOptimisticSync) {.gcsafe.} = + if optSync.processFut != nil: + return + + # Check if finalized is execution block (implies that justified is, too) + if optSync.finalizedIsExecutionBlock.isNone: + let + finalizedSlot = optSync.lcBlocks.getFinalizedSlot() + finalizedBlock = optSync.lcBlocks.getBlockAtSlot(finalizedSlot) + if finalizedBlock.isOk: + optSync.finalizedIsExecutionBlock = + withBlck(finalizedBlock.get): + when stateFork >= BeaconStateFork.Bellatrix: + some blck.message.body.is_execution_block() + else: + some false + + let + currentSlot = optSync.lcBlocks.getHeadSlot() + signedBlock = + if optSync.finalizedIsExecutionBlock.get(false): + # If finalized is execution block, can import any later block + optSync.lcBlocks.getLatestBlockThroughSlot(currentSlot) + else: + # Else, block must be deep (min `SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY`) + let + minAge = optSync.safeSlotsToImportOptimistically + maxSlot = max(currentSlot, minAge.Slot) - minAge.uint64 + optSync.lcBlocks.getLatestBlockThroughSlot(maxSlot) + if signedBlock.isOk: + let bid = signedBlock.get.toBlockId() + if bid.slot > optSync.optimisticCandidateBid.slot: + optSync.optimisticCandidateBid = bid + optSync.processFut = optSync.optimisticProcessor(signedBlock.get) + + proc handleFinishedProcess(future: pointer) = + optSync.processFut = nil + optSync.reportOptimisticCandidateBlock() + + optSync.processFut.addCallback(handleFinishedProcess) + +proc initLCOptimisticSync*( + network: Eth2Node, + getBeaconTime: GetBeaconTimeFn, + optimisticProcessor: MsgTrustedBlockProcessor, + safeSlotsToImportOptimistically: uint16): LCOptimisticSync = + const numExtraSlots = 2 * SLOTS_PER_EPOCH.int + 1 + let maxSlots = safeSlotsToImportOptimistically.int + numExtraSlots + + let optSync = LCOptimisticSync( + network: network, + getBeaconTime: getBeaconTime, + optimisticProcessor: optimisticProcessor, + safeSlotsToImportOptimistically: safeSlotsToImportOptimistically, + lcBlocks: initLCBlocks(maxSlots)) + + proc blockVerifier(signedBlock: ForkedSignedBeaconBlock): + Future[Result[void, BlockError]] = + let res = optSync.lcBlocks.addBlock(signedBlock) + if res.isOk: + if optSync.syncStrategy == SyncStrategy.RequestManager: + let root = optSync.lcBlocks.getBackfillRoot() + if root.isSome: + optSync.requestManager.fetchAncestorBlocks( + @[FetchRecord(root: root.get)]) + else: + if not optSync.syncFut.finished: + optSync.syncFut.cancel() + + optSync.reportOptimisticCandidateBlock() + + let resfut = newFuture[Result[void, BlockError]]("lcOptSyncBlockVerifier") + resfut.complete(res) + resfut + + optSync.blockVerifier = blockVerifier + optSync.requestManager = RequestManager.init(network, optSync.blockVerifier) + + optSync + +proc start*(optSync: LCOptimisticSync) = + optSync.requestManager.start() + +func supportsRetarget(syncStrategy: SyncStrategy): bool = + case syncStrategy + of SyncStrategy.None, SyncStrategy.RequestManager: + true + of SyncStrategy.SyncManager: + false + +proc syncUsingRequestManager(optSync: LCOptimisticSync) {.async.} = + let startTick = Moment.now() + + var cancellationRequested = false + while not cancellationRequested: + let root = optSync.lcBlocks.getBackfillRoot() + if root.isNone: + break + + if optSync.requestManager.inpQueue.empty: + optSync.requestManager.fetchAncestorBlocks(@[FetchRecord(root: root.get)]) + + try: + await chronos.sleepAsync(chronos.seconds(10)) + except CancelledError as exc: + cancellationRequested = true + + debug "LC optimistic sync complete", + headSlot = optSync.lcBlocks.getHeadSlot(), + finalizedSlot = optSync.lcBlocks.getFinalizedSlot(), + backfillSlot = optSync.lcBlocks.getBackfillSlot(), + frontfillSlot = optSync.lcBlocks.getFrontfillSlot(), + syncStrategy = optSync.syncStrategy, + cancellationRequested, + syncDur = Moment.now() - startTick + +proc syncUsingSyncManager(optSync: LCOptimisticSync) {.async.} = + let startTick = Moment.now() + + func getLocalHeadSlot(): Slot = + optSync.lcBlocks.getHeadSlot() + 1 + + proc getLocalWallSlot(): Slot = + optSync.getBeaconTime().slotOrZero + + var cancellationRequested = false + func getProgressSlot(): Slot = + if not cancellationRequested: + optSync.lcBlocks.getBackfillSlot() + else: + # Report out-of-band completion of sync + optSync.lcBlocks.getFrontfillSlot() + + func getFinalizedSlot(): Slot = + getProgressSlot() + + func getBackfillSlot(): Slot = + getProgressSlot() + + func getFrontfillSlot(): Slot = + optSync.lcBlocks.getFrontfillSlot() + + let lcOptSyncManager = newSyncManager[Peer, PeerID]( + optSync.network.peerPool, SyncQueueKind.Backward, getLocalHeadSlot, + getLocalWallSlot, getFinalizedSlot, getBackfillSlot, getFrontfillSlot, + progressPivot = optSync.lcBlocks.getHeadSlot(), optSync.blockVerifier, + maxHeadAge = 0, flags = {SyncManagerFlag.NoMonitor}, ident = "lcOptSync") + lcOptSyncManager.start() + while lcOptSyncManager.inProgress: + try: + await chronos.sleepAsync(chronos.seconds(10)) + except CancelledError as exc: + cancellationRequested = true + + debug "LC optimistic sync complete", + headSlot = optSync.lcBlocks.getHeadSlot(), + finalizedSlot = optSync.lcBlocks.getFinalizedSlot(), + backfillSlot = optSync.lcBlocks.getBackfillSlot(), + frontfillSlot = optSync.lcBlocks.getFrontfillSlot(), + syncStrategy = optSync.syncStrategy, + cancellationRequested, + syncDur = Moment.now() - startTick + +proc continueSync(optSync: LCOptimisticSync) {.gcsafe.} = + let + currentHeadSlot = optSync.lcBlocks.getHeadSlot() + targetHeadSlot = optSync.optimisticBid.slot + headDiff = + if targetHeadSlot > currentHeadSlot: + targetHeadSlot - currentHeadSlot + else: + currentHeadSlot - targetHeadSlot + + currentFinalizedSlot = optSync.lcBlocks.getFinalizedSlot() + targetFinalizedSlot = optSync.finalizedBid.slot + + backfillSlot = optSync.lcBlocks.getBackfillSlot() + frontfillSlot = optSync.lcBlocks.getFrontfillSlot() + syncDistance = + if backfillSlot > frontfillSlot: + backfillSlot - frontfillSlot + else: + 0 + + # If sync is complete, work is done + if currentHeadSlot == targetHeadSlot and + currentFinalizedSlot == targetFinalizedSlot and + syncDistance == 0: + return + + # Cancel ongoing sync if sync target jumped + if headDiff >= SLOTS_PER_EPOCH and optSync.syncFut != nil: + if not optSync.syncFut.finished: + optSync.syncFut.cancel() + return + + # When retargeting ongoing sync is not possible, cancel on finality change + if not optSync.syncStrategy.supportsRetarget: + if currentFinalizedSlot != targetFinalizedSlot and optSync.syncFut != nil: + if not optSync.syncFut.finished: + optSync.syncFut.cancel() + return + + # Set new sync target + let + finalizedBid = optSync.finalizedBid + optimisticBid = optSync.optimisticBid + doAssert optimisticBid.slot >= finalizedBid.slot + if optSync.lcBlocks.getHeadSlot() != optimisticBid.slot: + optSync.lcBlocks.setHeadBid(optimisticBid) + if optSync.lcBlocks.getFinalizedSlot() != finalizedBid.slot: + optSync.lcBlocks.setFinalizedBid(finalizedBid) + optSync.finalizedIsExecutionBlock.reset() + optSync.reportOptimisticCandidateBlock() + + if optSync.syncFut == nil: + # Select sync strategy + optSync.syncFut = + if headDiff >= SLOTS_PER_EPOCH: + optSync.syncStrategy = SyncStrategy.SyncManager + optSync.syncUsingSyncManager() + else: + optSync.syncStrategy = SyncStrategy.RequestManager + optSync.syncUsingRequestManager() + + # Continue syncing until complete + proc handleFinishedSync(future: pointer) = + optSync.syncStrategy.reset() + optSync.syncFut = nil + optSync.continueSync() + optSync.syncFut.addCallback(handleFinishedSync) + +proc setOptimisticHeader*( + optSync: LCOptimisticSync, optimisticHeader: BeaconBlockHeader) = + optSync.optimisticBid = optimisticHeader.toBlockId + optSync.continueSync() + +proc setFinalizedHeader*( + optSync: LCOptimisticSync, finalizedHeader: BeaconBlockHeader) = + optSync.finalizedBid = finalizedHeader.toBlockId + if optSync.finalizedBid.slot > optSync.optimisticBid.slot: + optSync.optimisticBid = optSync.finalizedBid + optSync.continueSync() diff --git a/beacon_chain/sync/request_manager.nim b/beacon_chain/sync/request_manager.nim index d5c9534f51..d7fd6ff882 100644 --- a/beacon_chain/sync/request_manager.nim +++ b/beacon_chain/sync/request_manager.nim @@ -15,7 +15,7 @@ import ../networking/eth2_network, ../consensus_object_pools/block_quarantine, "."/sync_protocol, "."/sync_manager -export sync_manager +export block_quarantine, sync_manager logScope: topics = "requman" diff --git a/beacon_chain/sync/sync_manager.nim b/beacon_chain/sync/sync_manager.nim index efbe2b334a..2dafb52554 100644 --- a/beacon_chain/sync/sync_manager.nim +++ b/beacon_chain/sync/sync_manager.nim @@ -12,7 +12,7 @@ import stew/[results, base10], chronos, chronicles import ../spec/datatypes/[phase0, altair], ../spec/eth2_apis/rest_types, - ../spec/[helpers, forks], + ../spec/[helpers, forks, network], ../networking/[peer_pool, peer_scores, eth2_network], ../beacon_clock, "."/[sync_protocol, sync_queue] diff --git a/beacon_chain/validator_client/attestation_service.nim b/beacon_chain/validator_client/attestation_service.nim index cbe4334546..638ff77aff 100644 --- a/beacon_chain/validator_client/attestation_service.nim +++ b/beacon_chain/validator_client/attestation_service.nim @@ -1,4 +1,11 @@ -import std/[sets, sequtils] +# beacon_chain +# Copyright (c) 2021-2022 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import std/sets import chronicles import "."/[common, api, block_service] diff --git a/beacon_chain/validator_client/common.nim b/beacon_chain/validator_client/common.nim index b452902b46..8115f543a3 100644 --- a/beacon_chain/validator_client/common.nim +++ b/beacon_chain/validator_client/common.nim @@ -1,4 +1,11 @@ -import std/[tables, os, sets, sequtils, strutils] +# beacon_chain +# Copyright (c) 2021-2022 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +import std/[tables, os, sets, sequtils] import chronos, presto, presto/client as presto_client, chronicles, confutils, json_serialization/std/[options, net], stew/[base10, results, byteutils] diff --git a/beacon_chain/validator_client/sync_committee_service.nim b/beacon_chain/validator_client/sync_committee_service.nim index a716da00aa..dc38e9acc4 100644 --- a/beacon_chain/validator_client/sync_committee_service.nim +++ b/beacon_chain/validator_client/sync_committee_service.nim @@ -1,5 +1,12 @@ +# beacon_chain +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + import - std/[sets, sequtils], + std/sets, chronicles, "."/[common, api, block_service], ../spec/datatypes/[phase0, altair, bellatrix], diff --git a/beacon_chain/validators/validator_duties.nim b/beacon_chain/validators/validator_duties.nim index 570dbb578e..aba2cb687c 100644 --- a/beacon_chain/validators/validator_duties.nim +++ b/beacon_chain/validators/validator_duties.nim @@ -13,7 +13,7 @@ import # Standard library - std/[os, osproc, sequtils, streams, tables], + std/[os, sequtils, tables], # Nimble packages stew/[assign2, byteutils, objects], diff --git a/docs/the_nimbus_book/src/options.md b/docs/the_nimbus_book/src/options.md index fa7c5b7477..1af8d49d96 100644 --- a/docs/the_nimbus_book/src/options.md +++ b/docs/the_nimbus_book/src/options.md @@ -9,7 +9,7 @@ You can pass any `nimbus_beacon_node` options to the `prater` and `mainnet` scri To see a list of the command line options availabe to you, with descriptions, run: ``` -build/./nimbus_beacon_node --help +build/nimbus_beacon_node --help ``` You should see the following output: @@ -128,3 +128,10 @@ num-threads = 0 trusted-node-url = "http://192.168.1.20:5052" ``` +# Exit Codes + +| Exit code | Description | +|-----------|---------| +| 0 | Successful exit | +| 1 | Generic failure or unspecified error | +| 1031 | Doppelganger detection; one might prefer not to restart automatically | diff --git a/ncli/ncli.nim b/ncli/ncli.nim index 94ad4c9278..c5ac235883 100644 --- a/ncli/ncli.nim +++ b/ncli/ncli.nim @@ -18,7 +18,6 @@ type slots = "Apply empty slots" NcliConf* = object - eth2Network* {. desc: "The Eth2 network preset to use" name: "network" }: Option[string] @@ -99,22 +98,19 @@ proc loadFile(filename: string, T: type): T = proc doTransition(conf: NcliConf) = let - stateY = (ref ForkedHashedBeaconState)( - phase0Data: phase0.HashedBeaconState( - data: loadFile(conf.preState, phase0.BeaconState)), - kind: BeaconStateFork.Phase0 - ) - blckX = loadFile(conf.blck, phase0.SignedBeaconBlock) + cfg = getRuntimeConfig(conf.eth2Network) + stateY = newClone(readSszForkedHashedBeaconState( + cfg, readAllBytes(conf.preState).tryGet())) + blckX = readSszForkedSignedBeaconBlock( + cfg, readAllBytes(conf.blck).tryGet()) flags = if not conf.verifyStateRoot: {skipStateRootValidation} else: {} - setStateRoot(stateY[], hash_tree_root(stateY[].phase0Data.data)) - var cache = StateCache() info = ForkedEpochInfo() - let res = state_transition( - getRuntimeConfig(conf.eth2Network), stateY[], blckX, cache, info, - flags, noRollback) + let res = withBlck(blckX): + state_transition( + cfg, stateY[], blck, cache, info, flags, noRollback) if res.isErr(): error "State transition failed", error = res.error() quit 1 @@ -131,14 +127,10 @@ proc doSlots(conf: NcliConf) = var timers: array[Timers, RunningStat] let - stateY = withTimerRet(timers[tLoadState]): (ref ForkedHashedBeaconState)( - phase0Data: phase0.HashedBeaconState( - data: loadFile(conf.preState2, phase0.BeaconState)), - kind: BeaconStateFork.Phase0 - ) - - setStateRoot(stateY[], hash_tree_root(stateY[].phase0Data.data)) - + cfg = getRuntimeConfig(conf.eth2Network) + stateY = withTimerRet(timers[tLoadState]): + newClone(readSszForkedHashedBeaconState( + cfg, readAllBytes(conf.preState2).tryGet())) var cache = StateCache() info = ForkedEpochInfo() @@ -146,11 +138,11 @@ proc doSlots(conf: NcliConf) = let isEpoch = (getStateField(stateY[], slot) + 1).is_epoch withTimer(timers[if isEpoch: tApplyEpochSlot else: tApplySlot]): process_slots( - defaultRuntimeConfig, stateY[], getStateField(stateY[], slot) + 1, + cfg, stateY[], getStateField(stateY[], slot) + 1, cache, info, {}).expect("should be able to advance slot") withTimer(timers[tSaveState]): - saveSSZFile(conf.postState, stateY[]) + saveSSZFile(conf.postState2, stateY[]) printTimers(false, timers) @@ -200,7 +192,8 @@ proc doSSZ(conf: NcliConf) = of "voluntary_exit": printit(VoluntaryExit) when isMainModule: - let conf = NcliConf.load() + let + conf = NcliConf.load() case conf.cmd: of hashTreeRoot: doSSZ(conf) diff --git a/scripts/launch_local_testnet.sh b/scripts/launch_local_testnet.sh index 5fcda5589b..192d7aedcd 100755 --- a/scripts/launch_local_testnet.sh +++ b/scripts/launch_local_testnet.sh @@ -676,7 +676,9 @@ for NUM_NODE in $(seq 0 $(( NUM_NODES - 1 ))); do ${STOP_AT_EPOCH_FLAG} \ --rest-port="$(( BASE_REST_PORT + NUM_NODE ))" \ --metrics-port="$(( BASE_METRICS_PORT + NUM_NODE ))" \ - --serve-light-client-data=1 --import-light-client-data=only-new \ + --light-client-enable=on \ + --serve-light-client-data=on \ + --import-light-client-data=only-new \ ${EXTRA_ARGS} \ &> "${DATA_DIR}/log${NUM_NODE}.txt" & diff --git a/tests/all_tests.nim b/tests/all_tests.nim index af36573b88..e511d49fc4 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -15,6 +15,7 @@ import # Unit test ./test_attestation_pool, ./test_beacon_chain_db, ./test_beacon_time, + ./test_block_clearance_light_client, ./test_block_dag, ./test_block_processor, ./test_block_quarantine, diff --git a/tests/test_block_clearance_light_client.nim b/tests/test_block_clearance_light_client.nim new file mode 100644 index 0000000000..e5cc4179b9 --- /dev/null +++ b/tests/test_block_clearance_light_client.nim @@ -0,0 +1,599 @@ +# beacon_chain +# Copyright (c) 2022 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [Defect].} +{.used.} + +import + # Status libraries + eth/keys, taskpools, + # Beacon chain internals + ../beacon_chain/consensus_object_pools/ + [block_clearance_light_client, block_clearance, + block_quarantine, blockchain_dag], + ../beacon_chain/spec/state_transition, + # Test utilities + ./testutil, ./testdbutil + +suite "Block clearance (light client)" & preset(): + let + cfg = block: + var res = defaultRuntimeConfig + res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH + 1 + res + taskpool = Taskpool.new() + + proc newTestDag(): ChainDAGRef = + const num_validators = SLOTS_PER_EPOCH + let + validatorMonitor = newClone(ValidatorMonitor.init()) + dag = ChainDAGRef.init( + cfg, makeTestDB(num_validators), validatorMonitor, {}) + dag + + proc addBlocks( + dag: ChainDAGRef, + numBlocks: int, + finalizedCheckpoints: var seq[Checkpoint], + syncCommitteeRatio = 0.0, + numSkippedSlots = 0.uint64) = + let quarantine = newClone(Quarantine.init()) + var + cache: StateCache + verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool) + if numSkippedSlots > 0: + var info: ForkedEpochInfo + let slot = getStateField(dag.headState, slot) + numSkippedSlots + process_slots( + cfg, dag.headState, slot, cache, info, flags = {}).expect("no failure") + for blck in makeTestBlocks(dag.headState, cache, numBlocks, + attested = true, syncCommitteeRatio, cfg): + let added = + case blck.kind + of BeaconBlockFork.Phase0: + const nilCallback = OnPhase0BlockAdded(nil) + dag.addHeadBlock(verifier, blck.phase0Data, nilCallback) + of BeaconBlockFork.Altair: + const nilCallback = OnAltairBlockAdded(nil) + dag.addHeadBlock(verifier, blck.altairData, nilCallback) + of BeaconBlockFork.Bellatrix: + const nilCallback = OnBellatrixBlockAdded(nil) + dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback) + check: added.isOk() + dag.updateHead(added[], quarantine[]) + withState(dag.headState): + if finalizedCheckpoints.len == 0 or + state.data.finalized_checkpoint != finalizedCheckpoints[^1]: + finalizedCheckpoints.add(state.data.finalized_checkpoint) + + proc checkBlocks(lcBlocks: LCBlocks, dag: ChainDAGRef, slots: Slice[Slot]) = + for slot in slots.a .. slots.b: + let + latestLcBlck = lcBlocks.getLatestBlockThroughSlot(slot) + lcBlck = lcBlocks.getBlockAtSlot(slot) + bsi = dag.getBlockIdAtSlot(slot) + dagBlck = + if bsi.isOk: + dag.getForkedBlock(bsi.get.bid) + else: + Opt[ForkedTrustedSignedBeaconBlock].err() + check: + lcBlck.isOk == dagBlck.isOk + lcBlck.isOk == latestLcBlck.isOk + if lcBlck.isOk: + check: + lcBlck.get.root == dagBlck.get.root + lcBlck.get.root == latestLcBlck.get.root + + setup: + let dag = newTestDag() + var finalizedCheckpoints: seq[Checkpoint] = @[] + dag.addBlocks(200, finalizedCheckpoints) + + test "Initial sync": + const maxSlots = 160 + var lcBlocks = initLCBlocks(maxSlots) + let minSlot = dag.head.slot + 1 - maxSlots + check: + lcBlocks.getHeadSlot() == FAR_FUTURE_SLOT + lcBlocks.getFinalizedSlot() == GENESIS_SLOT + lcBlocks.getFrontfillSlot() == GENESIS_SLOT + lcBlocks.getBackfillSlot() == GENESIS_SLOT + lcBlocks.setHeadBid(dag.head.bid) + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == GENESIS_SLOT + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + var bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check: + res.isOk + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == max(bdata.slot, minSlot) + bid = dag.parent(bid).valueOr: + break + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == minSlot + lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot) + + test "Delayed finality update": + const maxSlots = 160 + var lcBlocks = initLCBlocks(maxSlots) + let minSlot = dag.head.slot + 1 - maxSlots + lcBlocks.setHeadBid(dag.head.bid) + var bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + + for finalizedCheckpoint in finalizedCheckpoints: + let bsi = dag.getBlockIdAtSlot(finalizedCheckpoint.epoch.start_slot) + check bsi.isOk + lcBlocks.setFinalizedBid(bsi.get.bid) + + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == minSlot + lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot) + + test "Incremental sync": + const maxSlots = 160 + var lcBlocks = initLCBlocks(maxSlots) + let + oldHeadSlot = dag.head.slot + oldMinSlot = dag.head.slot + 1 - maxSlots + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + var bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + + dag.addBlocks(20, finalizedCheckpoints) + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + let newMinSlot = dag.head.slot + 1 - maxSlots + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == newMinSlot + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + + bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == newMinSlot + lcBlocks.getBackfillSlot() == newMinSlot + lcBlocks.checkBlocks(dag, newMinSlot .. dag.head.slot) + + dag.addBlocks(200, finalizedCheckpoints) + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + let minSlot = dag.head.slot + 1 - maxSlots + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + + bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == minSlot + lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot) + + test "Reverse incremental sync": + const maxSlots = 160 + var lcBlocks = initLCBlocks(maxSlots) + let + newHeadBid = dag.head.bid + newFinalizedBid = dag.finalizedHead.blck.bid + + dag.addBlocks(20, finalizedCheckpoints) + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + let oldMinSlot = dag.head.slot + 1 - maxSlots + + var bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == oldMinSlot + lcBlocks.getBackfillSlot() == oldMinSlot + lcBlocks.checkBlocks(dag, oldMinSlot .. dag.head.slot) + + lcBlocks.setHeadBid(newHeadBid) + lcBlocks.setFinalizedBid(newFinalizedBid) + let newMinSlot = newHeadBid.slot + 1 - maxSlots + check: + lcBlocks.getHeadSlot() == newHeadBid.slot + lcBlocks.getFinalizedSlot() == newFinalizedBid.slot + lcBlocks.getFrontfillSlot() == newMinSlot + lcBlocks.getBackfillSlot() == oldMinSlot + + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + + check: + lcBlocks.getHeadSlot() == newHeadBid.slot + lcBlocks.getFinalizedSlot() == newFinalizedBid.slot + lcBlocks.getFrontfillSlot() == newMinSlot + lcBlocks.getBackfillSlot() == newMinSlot + lcBlocks.checkBlocks(dag, newMinSlot .. newHeadBid.slot) + + test "Reorg": + const maxSlots = 160 + var lcBlocks = initLCBlocks(maxSlots) + let minSlot = dag.head.slot + 1 - maxSlots + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + var bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == minSlot + lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot) + + let dag2 = newTestDag() + var finalizedCheckpoints2: seq[Checkpoint] = @[] + dag2.addBlocks(200, finalizedCheckpoints2, syncCommitteeRatio = 0.1) + lcBlocks.setHeadBid(dag2.head.bid) + lcBlocks.setFinalizedBid(dag2.finalizedHead.blck.bid) + check: + lcBlocks.getHeadSlot() == dag2.head.slot + lcBlocks.getFinalizedSlot() == dag2.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == dag2.head.slot + 1 + bid = dag2.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag2.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag2.parent(bid).valueOr: + break + check: + lcBlocks.getHeadSlot() == dag2.head.slot + lcBlocks.getFinalizedSlot() == dag2.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == minSlot + lcBlocks.checkBlocks(dag2, minSlot .. dag2.head.slot) + + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + check: + lcBlocks.getHeadSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == + max(dag.finalizedHead.slot, maxSlots.Slot) + 1 - maxSlots + lcBlocks.getBackfillSlot() == dag.finalizedHead.blck.slot + 1 + bid = dag.finalizedHead.blck.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + lcBlocks.setHeadBid(dag.head.bid) + bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == minSlot + lcBlocks.getBackfillSlot() == minSlot + lcBlocks.checkBlocks(dag, minSlot .. dag.head.slot) + + test "Low slot numbers": + const maxSlots = 320 # DAG slot numbers are smaller than `maxSlots` + var lcBlocks = initLCBlocks(maxSlots) + let + oldHeadBid = dag.head.bid + oldFinalizedBid = dag.finalizedHead.blck.bid + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + var bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == GENESIS_SLOT + lcBlocks.getBackfillSlot() == GENESIS_SLOT + lcBlocks.checkBlocks(dag, GENESIS_SLOT .. dag.head.slot) + + dag.addBlocks(20, finalizedCheckpoints) + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + bid = dag.head.bid + while lcBlocks.getBackfillSlot() > lcBlocks.getFrontfillSlot(): + let + bdata = dag.getForkedBlock(bid).valueOr: + break + res = lcBlocks.addBlock(bdata.asSigned()) + check res.isOk + bid = dag.parent(bid).valueOr: + break + check: + lcBlocks.getHeadSlot() == dag.head.slot + lcBlocks.getFinalizedSlot() == dag.finalizedHead.blck.slot + lcBlocks.getFrontfillSlot() == GENESIS_SLOT + lcBlocks.getBackfillSlot() == GENESIS_SLOT + + lcBlocks.setHeadBid(oldHeadBid) + lcBlocks.setFinalizedBid(oldFinalizedBid) + check: + lcBlocks.getHeadSlot() == oldHeadBid.slot + lcBlocks.getFinalizedSlot() == oldFinalizedBid.slot + lcBlocks.getFrontfillSlot() == GENESIS_SLOT + lcBlocks.getBackfillSlot() == GENESIS_SLOT + + test "Error conditions": + let dag2 = newTestDag() + var finalizedCheckpoints2: seq[Checkpoint] = @[] + dag2.addBlocks(200, finalizedCheckpoints2, syncCommitteeRatio = 0.1) + + const maxSlots = 2 + var lcBlocks = initLCBlocks(maxSlots) + check: + lcBlocks.getBlockAtSlot(GENESIS_SLOT).isErr + lcBlocks.getBlockAtSlot(FAR_FUTURE_SLOT).isErr + lcBlocks.getLatestBlockThroughSlot(GENESIS_SLOT).isErr + lcBlocks.getLatestBlockThroughSlot(FAR_FUTURE_SLOT).isErr + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + check: + lcBlocks.getBlockAtSlot(GENESIS_SLOT).isErr + lcBlocks.getBlockAtSlot(FAR_FUTURE_SLOT).isErr + lcBlocks.getBlockAtSlot(dag.head.slot).isErr + lcBlocks.getBlockAtSlot(dag.finalizedHead.blck.slot).isErr + lcBlocks.getLatestBlockThroughSlot(GENESIS_SLOT).isErr + lcBlocks.getLatestBlockThroughSlot(FAR_FUTURE_SLOT).isErr + lcBlocks.getLatestBlockThroughSlot(dag.head.slot).isErr + lcBlocks.getLatestBlockThroughSlot(dag.finalizedHead.blck.slot).isErr + let + parentBid = dag.parent(dag.head.bid).expect("Parent exists") + parentBdata = dag.getForkedBlock(parentBid).expect("Parent block exists") + var res = lcBlocks.addBlock(parentBdata.asSigned()) + check: + res.isErr + res.error == BlockError.MissingParent + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + let bdata2 = dag2.getForkedBlock(dag2.head.bid).expect("DAG 2 block exists") + res = lcBlocks.addBlock(bdata2.asSigned()) + check: + res.isErr + res.error == BlockError.UnviableFork + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + let bdata = dag.getForkedBlock(dag.head.bid).expect("DAG block exists") + res = lcBlocks.addBlock(bdata.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag.head.slot + res = lcBlocks.addBlock(bdata2.asSigned()) + check: + res.isErr + res.error == BlockError.UnviableFork + lcBlocks.getBackfillSlot() == dag.head.slot + res = lcBlocks.addBlock(bdata.asSigned()) + check: + res.isErr + res.error == BlockError.Duplicate + lcBlocks.getBackfillSlot() == dag.head.slot + let + onePastBid = dag.parent(parentBid).expect("Parent of parent exists") + onePastBdata = dag.getForkedBlock(onePastBid).expect("Block exists") + res = lcBlocks.addBlock(onePastBdata.asSigned()) + check: + res.isErr + res.error == BlockError.MissingParent + lcBlocks.getBackfillSlot() == dag.head.slot + res = lcBlocks.addBlock(parentBdata.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == parentBdata.slot + lcBlocks.getBlockAtSlot(parentBdata.slot).isOk + lcBlocks.getLatestBlockThroughSlot(parentBdata.slot).isOk + res = lcBlocks.addBlock(onePastBdata.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag.head.slot + 1 - maxSlots + lcBlocks.getBlockAtSlot(onePastBdata.slot).isErr + lcBlocks.getLatestBlockThroughSlot(onePastBdata.slot).isErr + res = lcBlocks.addBlock(onePastBdata.asSigned()) + check: + res.isErr + res.error == BlockError.Duplicate + lcBlocks.getBackfillSlot() == dag.head.slot + 1 - maxSlots + + let oldHeadBid = dag.head.bid + dag.addBlocks(1, finalizedCheckpoints, numSkippedSlots = 3) # ---X + dag2.addBlocks(2, finalizedCheckpoints2, numSkippedSlots = 2) # --XX + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + + let newBdata = dag.getForkedBlock(dag.head.bid).expect("New block ok") + res = lcBlocks.addBlock(newBdata.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag.head.slot + res = lcBlocks.addBlock(bdata.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag.head.slot + 1 - maxSlots + lcBlocks.getBlockAtSlot(dag.head.slot).isOk + lcBlocks.getBlockAtSlot(dag.head.slot - 1).isErr + lcBlocks.getBlockAtSlot(dag.head.slot - 2).isErr + let + newParentBid2 = dag2.parent(dag2.head.bid).expect("New parent 2 exists") + newParentBdata2 = dag2.getForkedBlock(newParentBid2).expect("Parent 2 ok") + res = lcBlocks.addBlock(newParentBdata2.asSigned()) + check: + res.isErr + res.error == BlockError.UnviableFork + lcBlocks.getBackfillSlot() == dag.head.slot + 1 - maxSlots + + lcBlocks.setHeadBid(dag2.head.bid) + lcBlocks.setFinalizedBid(newParentBid2) + let newBdata2 = dag2.getForkedBlock(dag2.head.bid).expect("New block 2 ok") + res = lcBlocks.addBlock(newBdata2.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag2.head.slot + res = lcBlocks.addBlock(newParentBdata2.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag2.head.slot + 1 - maxSlots + + lcBlocks.setHeadBid(dag.head.bid) + res = lcBlocks.addBlock(newBdata.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag.head.slot + res = lcBlocks.addBlock(bdata.asSigned()) + check: + res.isErr + res.error == BlockError.UnviableFork + lcBlocks.getHeadSlot() == newParentBid2.slot + lcBlocks.getFinalizedSlot() == newParentBid2.slot + lcBlocks.getFrontfillSlot() == newParentBid2.slot + 1 - maxSlots + lcBlocks.getBackfillSlot() == newParentBid2.slot + 1 + res = lcBlocks.addBlock(newParentBdata2.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == newParentBid2.slot + res = lcBlocks.addBlock(bdata2.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == newParentBid2.slot + 1 - maxSlots + + lcBlocks.setHeadBid(dag2.head.bid) + lcBlocks.setFinalizedBid(oldHeadBid) + res = lcBlocks.addBlock(newBdata2.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag2.head.slot + res = lcBlocks.addBlock(newParentBdata2.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == newParentBid2.slot + res = lcBlocks.addBlock(bdata.asSigned()) + check: + res.isErr + res.error == BlockError.MissingParent + lcBlocks.getBackfillSlot() == newParentBid2.slot + + lcBlocks = initLCBlocks(maxSlots = 0) + lcBlocks.setHeadBid(dag.head.bid) + lcBlocks.setFinalizedBid(dag.finalizedHead.blck.bid) + res = lcBlocks.addBlock(newBdata2.asSigned()) + check: + res.isErr + res.error == BlockError.UnviableFork + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + res = lcBlocks.addBlock(newBdata.asSigned()) + check: + res.isOk + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + res = lcBlocks.addBlock(newBdata2.asSigned()) + check: + res.isErr + res.error == BlockError.UnviableFork + lcBlocks.getBackfillSlot() == dag.head.slot + 1 + res = lcBlocks.addBlock(newBdata.asSigned()) + check: + res.isErr + res.error == BlockError.Duplicate + lcBlocks.getBackfillSlot() == dag.head.slot + 1