From 70b8055072f7d3e906fa93e21f37038e9e0004f5 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Mon, 8 May 2023 23:36:55 +0200 Subject: [PATCH 1/6] accelerate `getShufflingRef` When an uncached `ShufflingRef` is requested, we currently replay state which can take several seconds. Acceleration is possible by: 1. Start from any state with locked-in `get_active_validator_indices`. Any blocks / slots applied to such a state can only affect that result for future epochs, so are viable for querying target epoch. `compute_activation_exit_epoch(state.slot.epoch) > target.epoch` 2. Determine highest common ancestor among `state` and `target.blck`. At the ancestor slot, same rules re `get_active_validator_indices`. `compute_activation_exit_epoch(ancestorSlot.epoch) > target.epoch` 3. We now have a `state` that shares history with `target.blck` up through a common ancestor slot. Any blocks / slots that the `state` contains, which are not part of the `target.blck` history, affect `get_active_validator_indices` at epochs _after_ `target.epoch`. 4. Select `state.randao_mixes[N]` that is closest to common ancestor. Either direction is fine (above / below ancestor). 5. From that RANDAO mix, mix in / out all RANDAO reveals from blocks in-between. This is just an XOR operation, so fully reversible. `mix = mix xor SHA256(blck.message.body.randao_reveal)` 6. Compute the attester dependent slot from `target.epoch`. `if epoch >= 2: (target.epoch - 1).start_slot - 1 else: GENESIS_SLOT` 7. Trace back from `target.blck` to the attester dependent slot. We now have the destination for which we want to obtain RANDAO. 8. Mix in all RANDAO reveals from blocks up through the `dependentBlck`. Same method, no special handling necessary for epoch transitions. 9. Combine `get_active_validator_indices` from `state` at `target.epoch` with the recovered RANDAO value at `dependentBlck` to obtain the requested shuffling, and construct the `ShufflingRef` without replay. --- AllTests-mainnet.md | 7 +- .../consensus_object_pools/block_dag.nim | 24 ++ .../consensus_object_pools/blockchain_dag.nim | 321 +++++++++++++++++- beacon_chain/spec/helpers.nim | 23 +- beacon_chain/spec/validator.nim | 16 +- tests/test_blockchain_dag.nim | 66 +++- 6 files changed, 427 insertions(+), 30 deletions(-) diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index 1212685959..bd7f3e36d0 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -436,6 +436,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 + RestErrorMessage writer tests OK ``` OK: 2/2 Fail: 0/2 Skip: 0/2 +## Shufflings +```diff ++ Accelerated shuffling computation OK +``` +OK: 1/1 Fail: 0/1 Skip: 0/1 ## Slashing Interchange tests [Preset: mainnet] ```diff + Slashing test: duplicate_pubkey_not_slashable.json OK @@ -667,4 +672,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 OK: 9/9 Fail: 0/9 Skip: 0/9 ---TOTAL--- -OK: 380/385 Fail: 0/385 Skip: 5/385 +OK: 381/386 Fail: 0/386 Skip: 5/386 diff --git a/beacon_chain/consensus_object_pools/block_dag.nim b/beacon_chain/consensus_object_pools/block_dag.nim index b1b5a636c0..eda657fe5e 100644 --- a/beacon_chain/consensus_object_pools/block_dag.nim +++ b/beacon_chain/consensus_object_pools/block_dag.nim @@ -149,6 +149,30 @@ func get_ancestor*(blck: BlockRef, slot: Slot, blck = blck.parent +func commonAncestor*(a, b: BlockRef, lowSlot: Slot): Opt[BlockRef] = + ## Return the common ancestor with highest slot of two non-nil `BlockRef`, + ## limited by `lowSlot` (`err` if exceeded). + doAssert a != nil + doAssert b != nil + if a.slot < lowSlot or b.slot < lowSlot: + return err() + + var + aa = a + bb = b + while aa != bb: + if aa.slot >= bb.slot: + aa = aa.parent + doAssert aa != nil, "All `BlockRef` lead to `finalizedHead`" + if aa.slot < lowSlot: + return err() + else: + bb = bb.parent + doAssert bb != nil, "All `BlockRef` lead to `finalizedHead`" + if bb.slot < lowSlot: + return err() + ok aa + func atSlot*(blck: BlockRef, slot: Slot): BlockSlot = ## Return a BlockSlot at a given slot, with the block set to the closest block ## available. If slot comes from before the block, a suitable block ancestor diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index 77aa91cd2a..f611f95c43 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -9,7 +9,7 @@ import std/[algorithm, sequtils, tables, sets], - stew/[assign2, byteutils, results], + stew/[arrayops, assign2, byteutils, results], metrics, snappy, chronicles, ../spec/[beaconstate, eth2_merkleization, eth2_ssz_serialization, helpers, state_transition, validator], @@ -469,12 +469,15 @@ func epochKey(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochKey] = Opt.some(EpochKey(bid: bsi.bid, epoch: epoch)) +func shufflingDependentSlot*(epoch: Epoch): Slot = + if epoch >= 2: (epoch - 1).start_slot() - 1 else: Slot(0) + func findShufflingRef*( dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[ShufflingRef] = ## Lookup a shuffling in the cache, returning `none` if it's not present - see ## `getShufflingRef` for a version that creates a new instance if it's missing let - dependent_slot = if epoch > 2: (epoch - 1).start_slot() - 1 else: Slot(0) + dependent_slot = epoch.shufflingDependentSlot dependent_bsi = dag.atSlot(bid, dependent_slot).valueOr: return Opt.none(ShufflingRef) @@ -1299,23 +1302,315 @@ proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef = dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false).expect( "getEpochRef for finalized head should always succeed") +func ancestorSlotForShuffling*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, + blck: BlockRef, epoch: Epoch): Opt[Slot] = + ## Return slot of `blck` ancestor to which `state` can be rewinded + ## so that RANDAO at `epoch.shufflingDependentSlot` can be computed. + ## Return `err` if `state` is unviable to compute shuffling for `blck@epoch`. + + # A state must be somewhat recent so that `get_active_validator_indices` + # for the queried `epoch` cannot be affected by any such skipped processing. + const numDelayEpochs = compute_activation_exit_epoch(GENESIS_EPOCH).uint64 + let + lowEpoch = max(epoch, (numDelayEpochs - 1).Epoch) - (numDelayEpochs - 1) + lowSlot = lowEpoch.start_slot + if state.data.slot < lowSlot or blck.slot < lowSlot: + return err() + + # Check that state is related to the information stored in the DAG, + # and determine the corresponding `BlockRef`, or `finalizedHead` if finalized + let + stateBid = state.latest_block_id + stateBlck = + if dag.finalizedHead.blck == nil: + return err() + elif stateBid.slot > dag.finalizedHead.blck.slot: + ? dag.getBlockRef(stateBid.root) + elif stateBid.slot == dag.finalizedHead.blck.slot: + if stateBid.root != dag.finalizedHead.blck.root: + return err() + dag.finalizedHead.blck + else: + let bsi = ? dag.getBlockIdAtSlot(stateBid.slot) + if bsi.bid != stateBid: + return err() + dag.finalizedHead.blck + + # Check that history up to `lowSlot` is included in `state`, + # otherwise `get_active_validator_indices` may still change + if lowSlot <= dag.finalizedHead.blck.slot: + let + bsi = ? dag.getBlockIdAtSlot(lowSlot) + stateLowBlockRoot = + if state.data.slot == lowSlot: + stateBid.root + else: + state.data.get_block_root_at_slot(lowSlot) + if stateLowBlockRoot != bsi.bid.root: + return err() + + # Compute ancestor slot for starting RANDAO recovery + let + ancestorBlck = + if stateBlck == dag.finalizedHead.blck: + dag.finalizedHead.blck + else: + ? commonAncestor(blck, stateBlck, lowSlot) + dependentSlot = epoch.shufflingDependentSlot + doAssert dependentSlot >= lowSlot + ok min(min(stateBid.slot, ancestorBlck.slot), dependentSlot) + +func estimateComputeRandaoComplexity*( + stateSlot, ancestorSlot, dependentSlot: Slot): uint64 = + ## Estimate the number of slots for which blocks need to be loaded + ## to recover the RANDAO seed at `dependentSlot`, assuming that we start + ## at `stateSlot` and have to mix out RANDAO back to `ancestorSlot` first. + doAssert ancestorSlot <= stateSlot + doAssert ancestorSlot <= dependentSlot + + let + stateEpoch = stateSlot.epoch + ancestorEpoch = ancestorSlot.epoch + + # First, need to move from `state` back to `ancestor` + highRandaoSlot = + # `randao_mixes[ancestorEpoch]` + if stateEpoch == ancestorEpoch: + stateSlot + else: + (ancestorEpoch + 1).start_slot - 1 + distanceToancestorSlot = + if ancestorEpoch == GENESIS_EPOCH: + # Can only move backward + highRandaoSlot - ancestorSlot + else: + # `randao_mixes[ancestorEpoch - 1]` + let lowRandaoSlot = ancestorEpoch.start_slot - 1 + min(highRandaoSlot - ancestorSlot, ancestorSlot - lowRandaoSlot) + + # Then, need to apply RANDAO mix-ins up to the shuffling dependent slot + distanceToDependentSlot = dependentSlot - ancestorSlot + + distanceToancestorSlot + distanceToDependentSlot + +proc mixRandao( + dag: ChainDAGRef, mix: var Eth2Digest, + bid: BlockId): Opt[void] = + ## Mix in/out the RANDAO reveal from the given block. + let bdata = ? dag.getForkedBlock(bid) + withBlck(bdata): # See `process_randao` / `process_randao_mixes_reset` + mix.data.mxor eth2digest(blck.message.body.randao_reveal.toRaw()).data + ok() + +proc computeRandaoMix*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, ancestorSlot: Slot, + blck: BlockRef, epoch: Epoch +): Opt[tuple[dependentBid: BlockId, mix: Eth2Digest]] = + ## Compute the requested RANDAO mix for `blck@epoch` based on `state`. + ## `state` must have the correct ``get_active_validator_indices` for `epoch`, + ## which is checked by `ancestorSlotForShuffling` (passed via `ancestorSlot`). + ## RANDAO reveals of blocks from `state.data.slot` back to `ancestorSlot` are + ## mixed out from `state.data.randao_mixes`, and RANDAO reveals from blocks + ## up through `epoch.shufflingDependentSlot` are mixed in. Number of blocks + ## to load can be estimated using `estimateComputeRandaoComplexity`. + let + stateSlot = state.data.slot + dependentSlot = epoch.shufflingDependentSlot + doAssert ancestorSlot <= stateSlot + doAssert ancestorSlot <= dependentSlot + + # Load initial mix + var mix {.noinit.}: Eth2Digest + let + stateEpoch = stateSlot.epoch + ancestorEpoch = ancestorSlot.epoch + highRandaoSlot = + # `randao_mixes[ancestorEpoch]` + if stateEpoch == ancestorEpoch: + stateSlot + else: + (ancestorEpoch + 1).start_slot - 1 + startSlot = + if ancestorEpoch == GENESIS_EPOCH: + # Can only move backward + mix = state.data.get_randao_mix(ancestorEpoch) + highRandaoSlot + else: + # `randao_mixes[ancestorEpoch - 1]` + let lowRandaoSlot = ancestorEpoch.start_slot - 1 + if highRandaoSlot - ancestorSlot < ancestorSlot - lowRandaoSlot: + mix = state.data.get_randao_mix(ancestorEpoch) + highRandaoSlot + else: + mix = state.data.get_randao_mix(ancestorEpoch - 1) + lowRandaoSlot + slotsToMix = + if startSlot > ancestorSlot: + (ancestorSlot + 1) .. startSlot + else: + (startSlot + 1) .. ancestorSlot + highRoot = + if slotsToMix.b == stateSlot: + state.latest_block_root + else: + doAssert slotsToMix.b < stateSlot + state.data.get_block_root_at_slot(slotsToMix.b) + + # Move `mix` from `startSlot` to `ancestorSlot` + var bid = + if slotsToMix.b >= dag.finalizedHead.slot: + var b = ? dag.getBlockRef(highRoot) + let lowSlot = max(slotsToMix.a, dag.finalizedHead.slot) + while b.bid.slot > lowSlot: + ? dag.mixRandao(mix, b.bid) + b = b.parent + doAssert b != nil + b.bid + else: + var highSlot = slotsToMix.b + const availableSlots = SLOTS_PER_HISTORICAL_ROOT + let lowSlot = max(state.data.slot, availableSlots.Slot) - availableSlots + while highSlot > lowSlot and + state.data.get_block_root_at_slot(highSlot - 1) == highRoot: + dec highSlot + if highSlot + SLOTS_PER_HISTORICAL_ROOT > state.data.slot: + BlockId(slot: highSlot, root: highRoot) + else: + let bsi = ? dag.getBlockIdAtSlot(highSlot) + doAssert bsi.bid.root == highRoot + bsi.bid + while bid.slot >= slotsToMix.a: + ? dag.mixRandao(mix, bid) + bid = ? dag.parent(bid) + + # Move `mix` from `ancestorSlot` to `dependentSlot` + var dependentBid {.noinit.}: BlockId + bid = + if dependentSlot >= dag.finalizedHead.slot: + var b = blck.get_ancestor(dependentSlot) + doAssert b != nil + dependentBid = b.bid + let lowSlot = max(ancestorSlot, dag.finalizedHead.slot) + while b.bid.slot > lowSlot: + ? dag.mixRandao(mix, b.bid) + b = b.parent + doAssert b != nil + b.bid + else: + let bsi = ? dag.getBlockIdAtSlot(dependentSlot) + dependentBid = bsi.bid + bsi.bid + while bid.slot > ancestorSlot: + ? dag.mixRandao(mix, bid) + bid = ? dag.parent(bid) + + ok (dependentBid: dependentBid, mix: mix) + +proc computeShufflingRef*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, ancestorSlot: Slot, + blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = + let (dependentBid, mix) = + ? dag.computeRandaoMix(state, ancestorSlot, blck, epoch) + + return ok ShufflingRef( + epoch: epoch, + attester_dependent_root: dependentBid.root, + shuffled_active_validator_indices: + state.data.get_shuffled_active_validator_indices(epoch, mix)) + +proc computeShufflingRef*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = + let dependentSlot = epoch.shufflingDependentSlot + + type StateKind {.pure.} = enum + Head + EpochRef + Clearance + + var + bestComplexity = uint64.high + bestAncestorSlot: Opt[Slot] + bestState: StateKind + + template prepareState(state: ForkedHashedBeaconState, kind: StateKind) = + if bestComplexity > 0: + withState(state): + let ancestorSlot = dag.ancestorSlotForShuffling(forkyState, blck, epoch) + if ancestorSlot.isSome: + let complexity = estimateComputeRandaoComplexity( + forkyState.data.slot, ancestorSlot.get, dependentSlot) + if complexity < bestComplexity: + bestComplexity = complexity + bestAncestorSlot.ok ancestorSlot.get + bestState = kind + + prepareState(dag.headState, StateKind.Head) + prepareState(dag.epochRefState, StateKind.EpochRef) + prepareState(dag.clearanceState, StateKind.Clearance) + + if bestAncestorSlot.isSome: + return + case bestState + of StateKind.Head: + withState(dag.headState): + dag.computeShufflingRef(forkyState, bestAncestorSlot.get, blck, epoch) + of StateKind.EpochRef: + withState(dag.epochRefState): + dag.computeShufflingRef(forkyState, bestAncestorSlot.get, blck, epoch) + of StateKind.Clearance: + withState(dag.clearanceState): + dag.computeShufflingRef(forkyState, bestAncestorSlot.get, blck, epoch) + + # Load state from DB, as DAG states are unviable for computing the shuffling + let state = newClone(dag.headState) + var + e = dependentSlot.epoch + b = blck + while e > GENESIS_EPOCH and compute_activation_exit_epoch(e) > epoch: + let boundaryBlockSlot = e.start_slot - 1 + b = b.get_ancestor(boundaryBlockSlot) # nil if < finalized head + let + bid = + if b != nil: + b.bid + else: + let bsi = ? dag.getBlockIdAtSlot(boundaryBlockSlot) + bsi.bid + bsi = BlockSlotId.init(bid, boundaryBlockSlot + 1) + if not dag.getState(bsi, state[]): + dec e + continue + + return withState(state[]): + let ancestorSlot = ? dag.ancestorSlotForShuffling(forkyState, blck, epoch) + dag.computeShufflingRef(forkyState, ancestorSlot, blck, epoch) + err() + proc getShufflingRef*( dag: ChainDAGRef, blck: BlockRef, epoch: Epoch, preFinalized: bool): Opt[ShufflingRef] = ## Return the shuffling in the given history and epoch - this potentially is ## faster than returning a full EpochRef because the shuffling is determined ## an epoch in advance and therefore is less sensitive to reorgs - let shufflingRef = dag.findShufflingRef(blck.bid, epoch) - if shufflingRef.isNone: - # TODO here, we could check the existing cached states and see if any one - # has the right dependent root - unlike EpochRef, we don't need an _exact_ - # epoch match - let epochRef = dag.getEpochRef(blck, epoch, preFinalized).valueOr: - return Opt.none ShufflingRef - dag.putShufflingRef(epochRef.shufflingRef) - Opt.some epochRef.shufflingRef - else: - shufflingRef + var shufflingRef = dag.findShufflingRef(blck.bid, epoch) + if shufflingRef.isSome: + return shufflingRef + + # Use existing states to quickly compute the shuffling + shufflingRef = dag.computeShufflingRef(blck, epoch) + if shufflingRef.isSome: + dag.putShufflingRef(shufflingRef.get) + return shufflingRef + + # Last resort, this can take several seconds as this may replay states + # TODO here, we could check the existing cached states and see if any one + # has the right dependent root - unlike EpochRef, we don't need an _exact_ + # epoch match + let epochRef = dag.getEpochRef(blck, epoch, preFinalized).valueOr: + return Opt.none ShufflingRef + dag.putShufflingRef(epochRef.shufflingRef) + Opt.some epochRef.shufflingRef func stateCheckpoint*(dag: ChainDAGRef, bsi: BlockSlotId): BlockSlotId = ## The first ancestor BlockSlot that is a state checkpoint diff --git a/beacon_chain/spec/helpers.nim b/beacon_chain/spec/helpers.nim index 330e5e0364..56c70ee4da 100644 --- a/beacon_chain/spec/helpers.nim +++ b/beacon_chain/spec/helpers.nim @@ -176,23 +176,24 @@ func compute_signing_root*(ssz_object: auto, domain: Eth2Domain): Eth2Digest = hash_tree_root(domain_wrapped_object) # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/beacon-chain.md#get_seed -func get_seed*(state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType): - Eth2Digest = +func get_seed*( + state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType, + mix: Eth2Digest): Eth2Digest = ## Return the seed at ``epoch``. - var seed_input : array[4+8+32, byte] - - # Detect potential underflow - static: - doAssert EPOCHS_PER_HISTORICAL_VECTOR > MIN_SEED_LOOKAHEAD - seed_input[0..3] = domain_type.data seed_input[4..11] = uint_to_bytes(epoch.uint64) - seed_input[12..43] = - get_randao_mix(state, # Avoid underflow - epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1).data + seed_input[12..43] = mix.data eth2digest(seed_input) +func get_seed*(state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType): + Eth2Digest = + # Detect potential underflow + static: doAssert EPOCHS_PER_HISTORICAL_VECTOR > MIN_SEED_LOOKAHEAD + let mix = get_randao_mix(state, # Avoid underflow + epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1) + state.get_seed(epoch, domain_type, mix) + # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#add_flag func add_flag*(flags: ParticipationFlags, flag_index: int): ParticipationFlags = let flag = ParticipationFlags(1'u8 shl flag_index) diff --git a/beacon_chain/spec/validator.nim b/beacon_chain/spec/validator.nim index ad5fd5b2a0..4916be6434 100644 --- a/beacon_chain/spec/validator.nim +++ b/beacon_chain/spec/validator.nim @@ -125,14 +125,22 @@ func shuffle_list*(input: var seq[ValidatorIndex], seed: Eth2Digest) = shuffle func get_shuffled_active_validator_indices*( - state: ForkyBeaconState, epoch: Epoch): seq[ValidatorIndex] = + state: ForkyBeaconState, epoch: Epoch, + mix: Eth2Digest): seq[ValidatorIndex] = # Non-spec function, to cache a data structure from which one can cheaply # compute both get_active_validator_indexes() and get_beacon_committee(). var active_validator_indices = get_active_validator_indices(state, epoch) + let seed = get_seed(state, epoch, DOMAIN_BEACON_ATTESTER, mix) + shuffle_list(active_validator_indices, seed) + active_validator_indices - shuffle_list( - active_validator_indices, get_seed(state, epoch, DOMAIN_BEACON_ATTESTER)) - +func get_shuffled_active_validator_indices*( + state: ForkyBeaconState, epoch: Epoch): seq[ValidatorIndex] = + # Non-spec function, to cache a data structure from which one can cheaply + # compute both get_active_validator_indexes() and get_beacon_committee(). + var active_validator_indices = get_active_validator_indices(state, epoch) + let seed = get_seed(state, epoch, DOMAIN_BEACON_ATTESTER) + shuffle_list(active_validator_indices, seed) active_validator_indices func get_shuffled_active_validator_indices*( diff --git a/tests/test_blockchain_dag.nim b/tests/test_blockchain_dag.nim index 7e37ddccab..b73b63c3d8 100644 --- a/tests/test_blockchain_dag.nim +++ b/tests/test_blockchain_dag.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2022 Status Research & Development GmbH +# Copyright (c) 2018-2023 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -1165,3 +1165,67 @@ suite "Pruning": check: dag.tail.slot == Epoch(EPOCHS_PER_STATE_SNAPSHOT).start_slot - 1 not db.containsBlock(blocks[1].root) + +suite "Shufflings": + const + numEpochs = 10 + numValidators = SLOTS_PER_EPOCH + let + cfg = defaultRuntimeConfig + validatorMonitor = newClone(ValidatorMonitor.init()) + dag = ChainDAGRef.init( + cfg, makeTestDB(numValidators, cfg = cfg), validatorMonitor, {}) + quarantine = newClone(Quarantine.init()) + taskpool = Taskpool.new() + var + verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool) + cache: StateCache + proc addBlocks(blocks: uint64) = + for blck in makeTestBlocks( + dag.headState, cache, blocks.int, attested = true, cfg = cfg): + let added = + case blck.kind + of ConsensusFork.Phase0: + const nilCallback = OnPhase0BlockAdded(nil) + dag.addHeadBlock(verifier, blck.phase0Data, nilCallback) + of ConsensusFork.Altair: + const nilCallback = OnAltairBlockAdded(nil) + dag.addHeadBlock(verifier, blck.altairData, nilCallback) + of ConsensusFork.Bellatrix: + const nilCallback = OnBellatrixBlockAdded(nil) + dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback) + of ConsensusFork.Capella: + const nilCallback = OnCapellaBlockAdded(nil) + dag.addHeadBlock(verifier, blck.capellaData, nilCallback) + of ConsensusFork.Deneb: + const nilCallback = OnDenebBlockAdded(nil) + dag.addHeadBlock(verifier, blck.denebData, nilCallback) + doAssert added.isOk() + dag.updateHead(added[], quarantine[], []) + + var states: array[numEpochs, ref ForkedHashedBeaconState] + for i in 0 ..< states.len: + states[i] = newClone(dag.headState) + addBlocks(SLOTS_PER_EPOCH) + + test "Accelerated shuffling computation": + var blck = dag.head + while blck != nil and blck.bid.slot >= dag.finalizedHead.slot: + for epoch in 0.Epoch .. numEpochs.Epoch + 2: + let expectedShuffling = dag.getShufflingRef(blck, epoch, true) + check expectedShuffling.isSome + let computedShuffling = dag.computeShufflingRef(blck, epoch) + check: + computedShuffling.isSome + computedShuffling.get[] == expectedShuffling.get[] + for state in states: + withState(state[]): + let ancestorSlot = dag.ancestorSlotForShuffling( + forkyState, blck, epoch) + if ancestorSlot.isSome: + let shufflingRef = dag.computeShufflingRef( + forkyState, ancestorSlot.get, blck, epoch) + check: + shufflingRef.isSome + shufflingRef.get[] == expectedShuffling.get[] + blck = blck.parent From f862140a965bf71370ad39770b29c322ea38f0ad Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Tue, 9 May 2023 13:19:05 +0200 Subject: [PATCH 2/6] more tests and simplify logic --- .../consensus_object_pools/blockchain_dag.nim | 175 ++++++++---------- tests/test_blockchain_dag.nim | 94 +++++++--- tests/testblockutil.nim | 12 +- 3 files changed, 150 insertions(+), 131 deletions(-) diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index f611f95c43..f24dcce966 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -393,6 +393,17 @@ func nextTimestamp[I, T](cache: var LRUCache[I, T]): uint32 = inc cache.timestamp cache.timestamp +template peekIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] = + block: + var res: Opt[T] + for i in 0 ..< I: + template e: untyped = cache.entries[i] + template it: untyped {.inject, used.} = e.value + if e.lastUsed != 0 and predicate: + res.ok it + break + res + template findIt[I, T](cache: var LRUCache[I, T], predicate: untyped): Opt[T] = block: var res: Opt[T] @@ -472,6 +483,15 @@ func epochKey(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochKey] = func shufflingDependentSlot*(epoch: Epoch): Slot = if epoch >= 2: (epoch - 1).start_slot() - 1 else: Slot(0) +func putShufflingRef*(dag: ChainDAGRef, shufflingRef: ShufflingRef) = + ## Store shuffling in the cache + if shufflingRef.epoch < dag.finalizedHead.slot.epoch(): + # Only cache epoch information for unfinalized blocks - earlier states + # are seldomly used (ie RPC), so no need to cache + return + + dag.shufflingRefs.put shufflingRef + func findShufflingRef*( dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[ShufflingRef] = ## Lookup a shuffling in the cache, returning `none` if it's not present - see @@ -479,19 +499,23 @@ func findShufflingRef*( let dependent_slot = epoch.shufflingDependentSlot dependent_bsi = dag.atSlot(bid, dependent_slot).valueOr: - return Opt.none(ShufflingRef) + return err() - dag.shufflingRefs.findIt( - it.epoch == epoch and dependent_bsi.bid.root == it.attester_dependent_root) + # Check `ShufflingRef` cache + let shufflingRef = dag.shufflingRefs.findIt( + it.epoch == epoch and it.attester_dependent_root == dependent_bsi.bid.root) + if shufflingRef.isOk: + return shufflingRef -func putShufflingRef*(dag: ChainDAGRef, shufflingRef: ShufflingRef) = - ## Store shuffling in the cache - if shufflingRef.epoch < dag.finalizedHead.slot.epoch(): - # Only cache epoch information for unfinalized blocks - earlier states - # are seldomly used (ie RPC), so no need to cache - return + # Check `EpochRef` cache + let epochRef = dag.epochRefs.peekIt( + it.shufflingRef.epoch == epoch and + it.shufflingRef.attester_dependent_root == dependent_bsi.bid.root) + if epochRef.isOk: + dag.putShufflingRef(epochRef.get.shufflingRef) + return ok epochRef.get.shufflingRef - dag.shufflingRefs.put shufflingRef + err() func findEpochRef*( dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochRef] = @@ -1302,7 +1326,7 @@ proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef = dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false).expect( "getEpochRef for finalized head should always succeed") -func ancestorSlotForShuffling*( +func ancestorSlotForShuffling( dag: ChainDAGRef, state: ForkyHashedBeaconState, blck: BlockRef, epoch: Epoch): Opt[Slot] = ## Return slot of `blck` ancestor to which `state` can be rewinded @@ -1361,39 +1385,6 @@ func ancestorSlotForShuffling*( doAssert dependentSlot >= lowSlot ok min(min(stateBid.slot, ancestorBlck.slot), dependentSlot) -func estimateComputeRandaoComplexity*( - stateSlot, ancestorSlot, dependentSlot: Slot): uint64 = - ## Estimate the number of slots for which blocks need to be loaded - ## to recover the RANDAO seed at `dependentSlot`, assuming that we start - ## at `stateSlot` and have to mix out RANDAO back to `ancestorSlot` first. - doAssert ancestorSlot <= stateSlot - doAssert ancestorSlot <= dependentSlot - - let - stateEpoch = stateSlot.epoch - ancestorEpoch = ancestorSlot.epoch - - # First, need to move from `state` back to `ancestor` - highRandaoSlot = - # `randao_mixes[ancestorEpoch]` - if stateEpoch == ancestorEpoch: - stateSlot - else: - (ancestorEpoch + 1).start_slot - 1 - distanceToancestorSlot = - if ancestorEpoch == GENESIS_EPOCH: - # Can only move backward - highRandaoSlot - ancestorSlot - else: - # `randao_mixes[ancestorEpoch - 1]` - let lowRandaoSlot = ancestorEpoch.start_slot - 1 - min(highRandaoSlot - ancestorSlot, ancestorSlot - lowRandaoSlot) - - # Then, need to apply RANDAO mix-ins up to the shuffling dependent slot - distanceToDependentSlot = dependentSlot - ancestorSlot - - distanceToancestorSlot + distanceToDependentSlot - proc mixRandao( dag: ChainDAGRef, mix: var Eth2Digest, bid: BlockId): Opt[void] = @@ -1404,19 +1395,19 @@ proc mixRandao( ok() proc computeRandaoMix*( - dag: ChainDAGRef, state: ForkyHashedBeaconState, ancestorSlot: Slot, + dag: ChainDAGRef, state: ForkyHashedBeaconState, blck: BlockRef, epoch: Epoch ): Opt[tuple[dependentBid: BlockId, mix: Eth2Digest]] = ## Compute the requested RANDAO mix for `blck@epoch` based on `state`. - ## `state` must have the correct ``get_active_validator_indices` for `epoch`, - ## which is checked by `ancestorSlotForShuffling` (passed via `ancestorSlot`). + ## `state` must have the correct `get_active_validator_indices` for `epoch`. ## RANDAO reveals of blocks from `state.data.slot` back to `ancestorSlot` are ## mixed out from `state.data.randao_mixes`, and RANDAO reveals from blocks - ## up through `epoch.shufflingDependentSlot` are mixed in. Number of blocks - ## to load can be estimated using `estimateComputeRandaoComplexity`. + ## up through `epoch.shufflingDependentSlot` are mixed in. let stateSlot = state.data.slot dependentSlot = epoch.shufflingDependentSlot + # Check `state` has locked-in `get_active_validator_indices` for `epoch` + ancestorSlot = ? dag.ancestorSlotForShuffling(state, blck, epoch) doAssert ancestorSlot <= stateSlot doAssert ancestorSlot <= dependentSlot @@ -1507,11 +1498,11 @@ proc computeRandaoMix*( ok (dependentBid: dependentBid, mix: mix) -proc computeShufflingRef*( - dag: ChainDAGRef, state: ForkyHashedBeaconState, ancestorSlot: Slot, +proc computeShufflingRefFromState*( + dag: ChainDAGRef, state: ForkyHashedBeaconState, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = let (dependentBid, mix) = - ? dag.computeRandaoMix(state, ancestorSlot, blck, epoch) + ? dag.computeRandaoMix(state, blck, epoch) return ok ShufflingRef( epoch: epoch, @@ -1519,51 +1510,26 @@ proc computeShufflingRef*( shuffled_active_validator_indices: state.data.get_shuffled_active_validator_indices(epoch, mix)) -proc computeShufflingRef*( +proc computeShufflingRefFromMemory*( dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = - let dependentSlot = epoch.shufflingDependentSlot - - type StateKind {.pure.} = enum - Head - EpochRef - Clearance - - var - bestComplexity = uint64.high - bestAncestorSlot: Opt[Slot] - bestState: StateKind - - template prepareState(state: ForkedHashedBeaconState, kind: StateKind) = - if bestComplexity > 0: + ## Compute `ShufflingRef` from states available in memory (up to ~5 ms) + template tryWithState(state: ForkedHashedBeaconState) = + block: withState(state): - let ancestorSlot = dag.ancestorSlotForShuffling(forkyState, blck, epoch) - if ancestorSlot.isSome: - let complexity = estimateComputeRandaoComplexity( - forkyState.data.slot, ancestorSlot.get, dependentSlot) - if complexity < bestComplexity: - bestComplexity = complexity - bestAncestorSlot.ok ancestorSlot.get - bestState = kind - - prepareState(dag.headState, StateKind.Head) - prepareState(dag.epochRefState, StateKind.EpochRef) - prepareState(dag.clearanceState, StateKind.Clearance) - - if bestAncestorSlot.isSome: - return - case bestState - of StateKind.Head: - withState(dag.headState): - dag.computeShufflingRef(forkyState, bestAncestorSlot.get, blck, epoch) - of StateKind.EpochRef: - withState(dag.epochRefState): - dag.computeShufflingRef(forkyState, bestAncestorSlot.get, blck, epoch) - of StateKind.Clearance: - withState(dag.clearanceState): - dag.computeShufflingRef(forkyState, bestAncestorSlot.get, blck, epoch) - - # Load state from DB, as DAG states are unviable for computing the shuffling - let state = newClone(dag.headState) + let shufflingRef = + dag.computeShufflingRefFromState(forkyState, blck, epoch) + if shufflingRef.isOk: + return shufflingRef + tryWithState dag.headState + tryWithState dag.epochRefState + tryWithState dag.clearanceState + +proc computeShufflingRefFromDatabase*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = + ## Load state from DB, for when DAG states are unviable (up to ~500 ms) + let + dependentSlot = epoch.shufflingDependentSlot + state = newClone(dag.headState) var e = dependentSlot.epoch b = blck @@ -1583,10 +1549,25 @@ proc computeShufflingRef*( continue return withState(state[]): - let ancestorSlot = ? dag.ancestorSlotForShuffling(forkyState, blck, epoch) - dag.computeShufflingRef(forkyState, ancestorSlot, blck, epoch) + dag.computeShufflingRefFromState(forkyState, blck, epoch) err() +proc computeShufflingRef*( + dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] = + # Try to compute `ShufflingRef` from states available in memory + template tryWithState(state: ForkedHashedBeaconState) = + withState(state): + let shufflingRef = + dag.computeShufflingRefFromState(forkyState, blck, epoch) + if shufflingRef.isOk: + return shufflingRef + tryWithState dag.headState + tryWithState dag.epochRefState + tryWithState dag.clearanceState + + # Fall back to database + dag.computeShufflingRefFromDatabase(blck, epoch) + proc getShufflingRef*( dag: ChainDAGRef, blck: BlockRef, epoch: Epoch, preFinalized: bool): Opt[ShufflingRef] = diff --git a/tests/test_blockchain_dag.nim b/tests/test_blockchain_dag.nim index b73b63c3d8..2f88049118 100644 --- a/tests/test_blockchain_dag.nim +++ b/tests/test_blockchain_dag.nim @@ -8,6 +8,7 @@ {.used.} import + std/[random, sequtils], unittest2, eth/keys, taskpools, ../beacon_chain/spec/datatypes/base, @@ -1167,9 +1168,7 @@ suite "Pruning": not db.containsBlock(blocks[1].root) suite "Shufflings": - const - numEpochs = 10 - numValidators = SLOTS_PER_EPOCH + const numValidators = SLOTS_PER_EPOCH let cfg = defaultRuntimeConfig validatorMonitor = newClone(ValidatorMonitor.init()) @@ -1180,9 +1179,12 @@ suite "Shufflings": var verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool) cache: StateCache - proc addBlocks(blocks: uint64) = + graffiti: GraffitiBytes + proc addBlocks(blocks: uint64, attested: bool) = + inc distinctBase(graffiti)[0] # Avoid duplicate blocks across branches for blck in makeTestBlocks( - dag.headState, cache, blocks.int, attested = true, cfg = cfg): + dag.headState, cache, blocks.int, + attested = attested, graffiti = graffiti, cfg = cfg): let added = case blck.kind of ConsensusFork.Phase0: @@ -1200,32 +1202,64 @@ suite "Shufflings": of ConsensusFork.Deneb: const nilCallback = OnDenebBlockAdded(nil) dag.addHeadBlock(verifier, blck.denebData, nilCallback) - doAssert added.isOk() + check added.isOk() dag.updateHead(added[], quarantine[], []) - var states: array[numEpochs, ref ForkedHashedBeaconState] - for i in 0 ..< states.len: - states[i] = newClone(dag.headState) - addBlocks(SLOTS_PER_EPOCH) + var states: seq[ref ForkedHashedBeaconState] + + # Genesis state + states.add newClone(dag.headState) + + # Linear part of history (6 epochs) + for _ in 0 ..< 8: + addBlocks((SLOTS_PER_EPOCH * 3) div 4, attested = true) + states.add newClone(dag.headState) + + # Start branching (3 epochs) + var oldHead = dag.head + for _ in 0 ..< 4: + addBlocks((SLOTS_PER_EPOCH * 3) div 4, attested = false) + states.add newClone(dag.headState) + dag.updateHead(oldHead, quarantine[], []) + + # Cover entire range of epochs + const maxEpochOfInterest = + compute_activation_exit_epoch((6 + 3).Epoch) + 2 test "Accelerated shuffling computation": - var blck = dag.head - while blck != nil and blck.bid.slot >= dag.finalizedHead.slot: - for epoch in 0.Epoch .. numEpochs.Epoch + 2: - let expectedShuffling = dag.getShufflingRef(blck, epoch, true) - check expectedShuffling.isSome - let computedShuffling = dag.computeShufflingRef(blck, epoch) - check: - computedShuffling.isSome - computedShuffling.get[] == expectedShuffling.get[] - for state in states: - withState(state[]): - let ancestorSlot = dag.ancestorSlotForShuffling( - forkyState, blck, epoch) - if ancestorSlot.isSome: - let shufflingRef = dag.computeShufflingRef( - forkyState, ancestorSlot.get, blck, epoch) - check: - shufflingRef.isSome - shufflingRef.get[] == expectedShuffling.get[] - blck = blck.parent + randomize() + let forkBlocks = dag.forkBlocks.toSeq() + for _ in 0 ..< 1000: + let + blck = sample(forkBlocks).data + epoch = rand(GENESIS_EPOCH .. maxEpochOfInterest) + checkpoint "blck: " & $shortLog(blck) & " / epoch: " & $shortLog(epoch) + + let epochRef = dag.getEpochRef(blck, epoch, true) + check epochRef.isOk + + proc checkShuffling(computedShufflingRef: Opt[ShufflingRef]) = + ## Check that computed shuffling matches the one from `EpochRef`. + if computedShufflingRef.isOk: + check computedShufflingRef.get[] == epochRef.get.shufflingRef[] + + # If shuffling is computable from DAG, check its correctness + checkShuffling dag.computeShufflingRefFromMemory(blck, epoch) + + # If shuffling is computable from DB, check its correctness + checkShuffling dag.computeShufflingRefFromDatabase(blck, epoch) + + # Shuffling should be correct when starting from any cached state + for state in states: + withState(state[]): + let + shufflingRef = + dag.computeShufflingRefFromState(forkyState, blck, epoch) + stateEpoch = forkyState.data.get_current_epoch + blckEpoch = blck.bid.slot.epoch + minEpoch = min(stateEpoch, blckEpoch) + if compute_activation_exit_epoch(minEpoch) <= epoch: + check shufflingRef.isErr + else: + check shufflingRef.isOk + checkShuffling shufflingRef diff --git a/tests/testblockutil.nim b/tests/testblockutil.nim index 27aa85020b..984229e529 100644 --- a/tests/testblockutil.nim +++ b/tests/testblockutil.nim @@ -522,9 +522,9 @@ iterator makeTestBlocks*( blocks: int, attested: bool, syncCommitteeRatio = 0.0, + graffiti = default(GraffitiBytes), cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock = - var - state = assignClone(state) + var state = assignClone(state) for _ in 0.. Date: Wed, 10 May 2023 17:19:28 +0200 Subject: [PATCH 3/6] test with different number of deposits per branch --- tests/test_blockchain_dag.nim | 75 +++++++++++++++++++++++--------- tests/test_gossip_validation.nim | 8 ++-- tests/testblockutil.nim | 17 +++++++- tests/testdbutil.nim | 15 +++++-- 4 files changed, 86 insertions(+), 29 deletions(-) diff --git a/tests/test_blockchain_dag.nim b/tests/test_blockchain_dag.nim index 4ef63ad163..1e1b347f19 100644 --- a/tests/test_blockchain_dag.nim +++ b/tests/test_blockchain_dag.nim @@ -11,6 +11,7 @@ import std/[random, sequtils], unittest2, eth/keys, taskpools, + ../beacon_chain/eth1/merkle_minimal, ../beacon_chain/spec/datatypes/base, ../beacon_chain/spec/[beaconstate, forks, helpers, signatures, state_transition], ../beacon_chain/[beacon_chain_db], @@ -574,8 +575,7 @@ suite "chain DAG finalization tests" & preset(): test "init with gaps" & preset(): for blck in makeTestBlocks( - dag.headState, cache, int(SLOTS_PER_EPOCH * 6 - 2), - true): + dag.headState, cache, int(SLOTS_PER_EPOCH * 6 - 2), attested = true): let added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback) check: added.isOk() dag.updateHead(added[], quarantine, []) @@ -1168,22 +1168,34 @@ suite "Pruning": not db.containsBlock(blocks[1].root) suite "Shufflings": - const numValidators = SLOTS_PER_EPOCH + const + numValidators = SLOTS_PER_EPOCH + targetNumValidators = 20 * SLOTS_PER_EPOCH * MAX_DEPOSITS + let cfg = defaultRuntimeConfig + var deposits = newSeqOfCap[Deposit](targetNumValidators) + for depositIndex in 0 ..< targetNumValidators: + deposits.add Deposit(data: makeDeposit(depositIndex.int, cfg = cfg)) let - cfg = defaultRuntimeConfig + eth1Data = Eth1Data( + deposit_root: deposits.attachMerkleProofs(), + deposit_count: deposits.lenu64) validatorMonitor = newClone(ValidatorMonitor.init()) dag = ChainDAGRef.init( - cfg, makeTestDB(numValidators, cfg = cfg), validatorMonitor, {}) + cfg, makeTestDB( + numValidators, eth1Data = Opt.some(eth1Data), + flags = {}, cfg = cfg), + validatorMonitor, {}) quarantine = newClone(Quarantine.init()) taskpool = Taskpool.new() + var verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool) - cache: StateCache graffiti: GraffitiBytes - proc addBlocks(blocks: uint64, attested: bool) = + proc addBlocks(blocks: uint64, attested: bool, cache: var StateCache) = inc distinctBase(graffiti)[0] # Avoid duplicate blocks across branches for blck in makeTestBlocks( - dag.headState, cache, blocks.int, attested = attested, + dag.headState, cache, blocks.int, eth1_data = eth1Data, + attested = attested, allDeposits = deposits, graffiti = graffiti, cfg = cfg): let added = case blck.kind @@ -1210,27 +1222,48 @@ suite "Shufflings": # Genesis state states.add newClone(dag.headState) - # Linear part of history (6 epochs) - for _ in 0 ..< 8: - addBlocks((SLOTS_PER_EPOCH * 3) div 4, attested = true) + # Create a segment and cache the post state (0.75 epochs + empty slots) + proc createSegment(attested: bool, delaySlots = 0.uint64) = + var cache: StateCache + + # Add some empty slots to have different deposit history + if delaySlots > 0: + var info: ForkedEpochInfo + check cfg.process_slots( + dag.headState, + getStateField(dag.headState, slot) + delaySlots, + cache, info, flags = {}).isOk + + # Add 0.75 epochs + addBlocks((SLOTS_PER_EPOCH * 3) div 4, attested = attested, cache) states.add newClone(dag.headState) - # Start branching (3 epochs) - var oldHead = dag.head - for _ in 0 ..< 4: # Number of branches - for _ in 0 ..< 4: # 3 epochs per branch (4 * 3/4) - addBlocks((SLOTS_PER_EPOCH * 3) div 4, attested = false) - states.add newClone(dag.headState) + # Linear part of history (3.75 epochs) + for _ in 0 ..< 5: + createSegment(attested = true) + + # Start branching (6 epochs + up to 0.5 epoch) + func numDelaySlots(branchId: int): uint64 = + branchId.uint64 * SLOTS_PER_EPOCH div 8 + for a in 0 ..< 2: + let oldHead = dag.head + createSegment(attested = false, delaySlots = a.numDelaySlots) + for b in 0 ..< 2: + let oldHead = dag.head + createSegment(attested = false, delaySlots = b.numDelaySlots) + for _ in 0 ..< 3: + createSegment(attested = false, delaySlots = a.numDelaySlots) + createSegment(attested = false, delaySlots = b.numDelaySlots) + dag.updateHead(oldHead, quarantine[], []) dag.updateHead(oldHead, quarantine[], []) - # Cover entire range of epochs - const maxEpochOfInterest = - compute_activation_exit_epoch((6 + 3).Epoch) + 2 + # Cover entire range of epochs plus some extra + const maxEpochOfInterest = compute_activation_exit_epoch(11.Epoch) + 2 test "Accelerated shuffling computation": randomize() let forkBlocks = dag.forkBlocks.toSeq() - for _ in 0 ..< 750: # Number of random tests + for _ in 0 ..< 100: # Number of random tests (against _all_ cached states) let blck = sample(forkBlocks).data epoch = rand(GENESIS_EPOCH .. maxEpochOfInterest) diff --git a/tests/test_gossip_validation.nim b/tests/test_gossip_validation.nim index b11456d23b..857de34e9d 100644 --- a/tests/test_gossip_validation.nim +++ b/tests/test_gossip_validation.nim @@ -74,10 +74,9 @@ suite "Gossip validation " & preset(): committeeLen(63) == 0 test "validateAttestation": - var - cache: StateCache + var cache: StateCache for blck in makeTestBlocks( - dag.headState, cache, int(SLOTS_PER_EPOCH * 5), false): + dag.headState, cache, int(SLOTS_PER_EPOCH * 5), attested = false): let added = dag.addHeadBlock(verifier, blck.phase0Data) do ( blckRef: BlockRef, signedBlock: phase0.TrustedSignedBeaconBlock, epochRef: EpochRef, unrealized: FinalityCheckpoints): @@ -196,7 +195,8 @@ suite "Gossip validation - Extra": # Not based on preset config cfg, makeTestDB(num_validators), validatorMonitor, {}) var cache = StateCache() for blck in makeTestBlocks( - dag.headState, cache, int(SLOTS_PER_EPOCH), false, cfg = cfg): + dag.headState, cache, int(SLOTS_PER_EPOCH), + attested = false, cfg = cfg): let added = case blck.kind of ConsensusFork.Phase0: diff --git a/tests/testblockutil.nim b/tests/testblockutil.nim index 984229e529..1a3d29a556 100644 --- a/tests/testblockutil.nim +++ b/tests/testblockutil.nim @@ -520,7 +520,9 @@ iterator makeTestBlocks*( state: ForkedHashedBeaconState, cache: var StateCache, blocks: int, - attested: bool, + eth1_data = Eth1Data(), + attested = false, + allDeposits = newSeq[Deposit](), syncCommitteeRatio = 0.0, graffiti = default(GraffitiBytes), cfg = defaultRuntimeConfig): ForkedSignedBeaconBlock = @@ -534,11 +536,24 @@ iterator makeTestBlocks*( state[], parent_root, getStateField(state[], slot), cache) else: @[] + stateEth1 = getStateField(state[], eth1_data) + stateDepositIndex = getStateField(state[], eth1_deposit_index) + deposits = + if stateDepositIndex < stateEth1.deposit_count: + let + lowIndex = stateDepositIndex + numDeposits = min(MAX_DEPOSITS, stateEth1.deposit_count - lowIndex) + highIndex = lowIndex + numDeposits - 1 + allDeposits[lowIndex .. highIndex] + else: + newSeq[Deposit]() sync_aggregate = makeSyncAggregate(state[], syncCommitteeRatio, cfg) yield addTestBlock( state[], cache, + eth1_data = eth1_data, attestations = attestations, + deposits = deposits, sync_aggregate = sync_aggregate, graffiti = graffiti, cfg = cfg) diff --git a/tests/testdbutil.nim b/tests/testdbutil.nim index 5ee8513bce..43d3b3ce5f 100644 --- a/tests/testdbutil.nim +++ b/tests/testdbutil.nim @@ -17,7 +17,10 @@ import export beacon_chain_db, testblockutil, kvstore, kvstore_sqlite3 proc makeTestDB*( - validators: Natural, cfg = defaultRuntimeConfig): BeaconChainDB = + validators: Natural, + eth1Data = Opt.none(Eth1Data), + flags: UpdateFlags = {skipBlsValidation}, + cfg = defaultRuntimeConfig): BeaconChainDB = let genState = (ref ForkedHashedBeaconState)( kind: ConsensusFork.Phase0, @@ -25,8 +28,14 @@ proc makeTestDB*( cfg, ZERO_HASH, 0, - makeInitialDeposits(validators.uint64, flags = {skipBlsValidation}), - {skipBlsValidation})) + makeInitialDeposits(validators.uint64, flags), + flags)) + + # Override Eth1Data on request, skipping the lengthy Eth1 voting process + if eth1Data.isOk: + withState(genState[]): + forkyState.data.eth1_data = eth1Data.get + forkyState.root = hash_tree_root(forkyState.data) result = BeaconChainDB.new("", cfg = cfg, inMemory = true) ChainDAGRef.preInit(result, genState[]) From 32dc1d5c2f72672f90c585efd1b540596aa6c426 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 11 May 2023 10:49:58 +0200 Subject: [PATCH 4/6] Update beacon_chain/consensus_object_pools/blockchain_dag.nim Co-authored-by: Jacek Sieka --- beacon_chain/consensus_object_pools/blockchain_dag.nim | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index a6deaa63e9..ebfaa6e5e9 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -498,8 +498,7 @@ func findShufflingRef*( ## `getShufflingRef` for a version that creates a new instance if it's missing let dependent_slot = epoch.shufflingDependentSlot - dependent_bsi = dag.atSlot(bid, dependent_slot).valueOr: - return err() + dependent_bsi = ? dag.atSlot(bid, dependent_slot) # Check `ShufflingRef` cache let shufflingRef = dag.shufflingRefs.findIt( From 29ad048e909c316575515c79b79a851ed007bbcf Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 11 May 2023 11:25:23 +0200 Subject: [PATCH 5/6] `commonAncestor` tests --- tests/test_block_dag.nim | 255 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 254 insertions(+), 1 deletion(-) diff --git a/tests/test_block_dag.nim b/tests/test_block_dag.nim index 7f32b4a74f..a9d8359910 100644 --- a/tests/test_block_dag.nim +++ b/tests/test_block_dag.nim @@ -1,5 +1,5 @@ # beacon_chain -# Copyright (c) 2018-2022 Status Research & Development GmbH +# Copyright (c) 2018-2023 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). @@ -51,6 +51,259 @@ suite "BlockRef and helpers": s4.get_ancestor(Slot(3)) == s2 s4.get_ancestor(Slot(4)) == s4 + test "commonAncestor sanity": + # s0 + # / \ + # s1 s3 + # / \ + # s2 s6 + # / \ \ + # s4 s5 s7 + # \ + # s8 + # \ + # s9 + let + s0 = BlockRef(bid: BlockId(slot: Slot(0))) + s1 = BlockRef(bid: BlockId(slot: Slot(1)), parent: s0) + s2 = BlockRef(bid: BlockId(slot: Slot(2)), parent: s1) + s3 = BlockRef(bid: BlockId(slot: Slot(3)), parent: s0) + s4 = BlockRef(bid: BlockId(slot: Slot(4)), parent: s2) + s5 = BlockRef(bid: BlockId(slot: Slot(5)), parent: s2) + s6 = BlockRef(bid: BlockId(slot: Slot(6)), parent: s3) + s7 = BlockRef(bid: BlockId(slot: Slot(7)), parent: s6) + s8 = BlockRef(bid: BlockId(slot: Slot(8)), parent: s4) + s9 = BlockRef(bid: BlockId(slot: Slot(9)), parent: s8) + check: + commonAncestor(s0, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s1, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s2, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s3, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s4, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s5, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s6, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s7, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s8, Slot(0)) == Opt.some(s0) + commonAncestor(s0, s9, Slot(0)) == Opt.some(s0) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s0, b, Slot(1)) == Opt.none(BlockRef) + + check: + commonAncestor(s1, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s1, s1, Slot(0)) == Opt.some(s1) + commonAncestor(s1, s2, Slot(0)) == Opt.some(s1) + commonAncestor(s1, s3, Slot(0)) == Opt.some(s0) + commonAncestor(s1, s4, Slot(0)) == Opt.some(s1) + commonAncestor(s1, s5, Slot(0)) == Opt.some(s1) + commonAncestor(s1, s6, Slot(0)) == Opt.some(s0) + commonAncestor(s1, s7, Slot(0)) == Opt.some(s0) + commonAncestor(s1, s8, Slot(0)) == Opt.some(s1) + commonAncestor(s1, s9, Slot(0)) == Opt.some(s1) + for b in [s0, s3, s6, s7]: + check commonAncestor(s1, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s1, b, Slot(2)) == Opt.none(BlockRef) + + check: + commonAncestor(s2, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s2, s1, Slot(0)) == Opt.some(s1) + commonAncestor(s2, s2, Slot(0)) == Opt.some(s2) + commonAncestor(s2, s3, Slot(0)) == Opt.some(s0) + commonAncestor(s2, s4, Slot(0)) == Opt.some(s2) + commonAncestor(s2, s5, Slot(0)) == Opt.some(s2) + commonAncestor(s2, s6, Slot(0)) == Opt.some(s0) + commonAncestor(s2, s7, Slot(0)) == Opt.some(s0) + commonAncestor(s2, s8, Slot(0)) == Opt.some(s2) + commonAncestor(s2, s9, Slot(0)) == Opt.some(s2) + for b in [s0, s3, s6, s7]: + check commonAncestor(s2, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s3, s6, s7]: + check commonAncestor(s2, b, Slot(2)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s2, b, Slot(3)) == Opt.none(BlockRef) + + check: + commonAncestor(s3, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s3, s1, Slot(0)) == Opt.some(s0) + commonAncestor(s3, s2, Slot(0)) == Opt.some(s0) + commonAncestor(s3, s3, Slot(0)) == Opt.some(s3) + commonAncestor(s3, s4, Slot(0)) == Opt.some(s0) + commonAncestor(s3, s5, Slot(0)) == Opt.some(s0) + commonAncestor(s3, s6, Slot(0)) == Opt.some(s3) + commonAncestor(s3, s7, Slot(0)) == Opt.some(s3) + commonAncestor(s3, s8, Slot(0)) == Opt.some(s0) + commonAncestor(s3, s9, Slot(0)) == Opt.some(s0) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s3, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s3, b, Slot(2)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s3, b, Slot(3)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s3, b, Slot(4)) == Opt.none(BlockRef) + + check: + commonAncestor(s4, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s4, s1, Slot(0)) == Opt.some(s1) + commonAncestor(s4, s2, Slot(0)) == Opt.some(s2) + commonAncestor(s4, s3, Slot(0)) == Opt.some(s0) + commonAncestor(s4, s4, Slot(0)) == Opt.some(s4) + commonAncestor(s4, s5, Slot(0)) == Opt.some(s2) + commonAncestor(s4, s6, Slot(0)) == Opt.some(s0) + commonAncestor(s4, s7, Slot(0)) == Opt.some(s0) + commonAncestor(s4, s8, Slot(0)) == Opt.some(s4) + commonAncestor(s4, s9, Slot(0)) == Opt.some(s4) + for b in [s0, s3, s6, s7]: + check commonAncestor(s4, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s3, s6, s7]: + check commonAncestor(s4, b, Slot(2)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s5, s6, s7]: + check commonAncestor(s4, b, Slot(3)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s5, s6, s7]: + check commonAncestor(s4, b, Slot(4)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s4, b, Slot(5)) == Opt.none(BlockRef) + + check: + commonAncestor(s5, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s5, s1, Slot(0)) == Opt.some(s1) + commonAncestor(s5, s2, Slot(0)) == Opt.some(s2) + commonAncestor(s5, s3, Slot(0)) == Opt.some(s0) + commonAncestor(s5, s4, Slot(0)) == Opt.some(s2) + commonAncestor(s5, s5, Slot(0)) == Opt.some(s5) + commonAncestor(s5, s6, Slot(0)) == Opt.some(s0) + commonAncestor(s5, s7, Slot(0)) == Opt.some(s0) + commonAncestor(s5, s8, Slot(0)) == Opt.some(s2) + commonAncestor(s5, s9, Slot(0)) == Opt.some(s2) + for b in [s0, s3, s6, s7]: + check commonAncestor(s5, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s3, s6, s7]: + check commonAncestor(s5, b, Slot(2)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]: + check commonAncestor(s5, b, Slot(3)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]: + check commonAncestor(s5, b, Slot(4)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]: + check commonAncestor(s5, b, Slot(5)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s5, b, Slot(6)) == Opt.none(BlockRef) + + check: + commonAncestor(s6, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s6, s1, Slot(0)) == Opt.some(s0) + commonAncestor(s6, s2, Slot(0)) == Opt.some(s0) + commonAncestor(s6, s3, Slot(0)) == Opt.some(s3) + commonAncestor(s6, s4, Slot(0)) == Opt.some(s0) + commonAncestor(s6, s5, Slot(0)) == Opt.some(s0) + commonAncestor(s6, s6, Slot(0)) == Opt.some(s6) + commonAncestor(s6, s7, Slot(0)) == Opt.some(s6) + commonAncestor(s6, s8, Slot(0)) == Opt.some(s0) + commonAncestor(s6, s9, Slot(0)) == Opt.some(s0) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s6, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s6, b, Slot(2)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s6, b, Slot(3)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s8, s9]: + check commonAncestor(s6, b, Slot(4)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s8, s9]: + check commonAncestor(s6, b, Slot(5)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s8, s9]: + check commonAncestor(s6, b, Slot(6)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s6, b, Slot(7)) == Opt.none(BlockRef) + + check: + commonAncestor(s7, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s7, s1, Slot(0)) == Opt.some(s0) + commonAncestor(s7, s2, Slot(0)) == Opt.some(s0) + commonAncestor(s7, s3, Slot(0)) == Opt.some(s3) + commonAncestor(s7, s4, Slot(0)) == Opt.some(s0) + commonAncestor(s7, s5, Slot(0)) == Opt.some(s0) + commonAncestor(s7, s6, Slot(0)) == Opt.some(s6) + commonAncestor(s7, s7, Slot(0)) == Opt.some(s7) + commonAncestor(s7, s8, Slot(0)) == Opt.some(s0) + commonAncestor(s7, s9, Slot(0)) == Opt.some(s0) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s7, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s7, b, Slot(2)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s4, s5, s8, s9]: + check commonAncestor(s7, b, Slot(3)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s8, s9]: + check commonAncestor(s7, b, Slot(4)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s8, s9]: + check commonAncestor(s7, b, Slot(5)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s8, s9]: + check commonAncestor(s7, b, Slot(6)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s8, s9]: + check commonAncestor(s7, b, Slot(7)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s7, b, Slot(8)) == Opt.none(BlockRef) + + check: + commonAncestor(s8, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s8, s1, Slot(0)) == Opt.some(s1) + commonAncestor(s8, s2, Slot(0)) == Opt.some(s2) + commonAncestor(s8, s3, Slot(0)) == Opt.some(s0) + commonAncestor(s8, s4, Slot(0)) == Opt.some(s4) + commonAncestor(s8, s5, Slot(0)) == Opt.some(s2) + commonAncestor(s8, s6, Slot(0)) == Opt.some(s0) + commonAncestor(s8, s7, Slot(0)) == Opt.some(s0) + commonAncestor(s8, s8, Slot(0)) == Opt.some(s8) + commonAncestor(s8, s9, Slot(0)) == Opt.some(s8) + for b in [s0, s3, s6, s7]: + check commonAncestor(s8, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s3, s6, s7]: + check commonAncestor(s8, b, Slot(2)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s5, s6, s7]: + check commonAncestor(s8, b, Slot(3)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s5, s6, s7]: + check commonAncestor(s8, b, Slot(4)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7]: + check commonAncestor(s8, b, Slot(5)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7]: + check commonAncestor(s8, b, Slot(6)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7]: + check commonAncestor(s8, b, Slot(7)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7]: + check commonAncestor(s8, b, Slot(8)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s8, b, Slot(9)) == Opt.none(BlockRef) + + check: + commonAncestor(s9, s0, Slot(0)) == Opt.some(s0) + commonAncestor(s9, s1, Slot(0)) == Opt.some(s1) + commonAncestor(s9, s2, Slot(0)) == Opt.some(s2) + commonAncestor(s9, s3, Slot(0)) == Opt.some(s0) + commonAncestor(s9, s4, Slot(0)) == Opt.some(s4) + commonAncestor(s9, s5, Slot(0)) == Opt.some(s2) + commonAncestor(s9, s6, Slot(0)) == Opt.some(s0) + commonAncestor(s9, s7, Slot(0)) == Opt.some(s0) + commonAncestor(s9, s8, Slot(0)) == Opt.some(s8) + commonAncestor(s9, s9, Slot(0)) == Opt.some(s9) + for b in [s0, s3, s6, s7]: + check commonAncestor(s9, b, Slot(1)) == Opt.none(BlockRef) + for b in [s0, s1, s3, s6, s7]: + check commonAncestor(s9, b, Slot(2)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s5, s6, s7]: + check commonAncestor(s9, b, Slot(3)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s5, s6, s7]: + check commonAncestor(s9, b, Slot(4)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7]: + check commonAncestor(s9, b, Slot(5)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7]: + check commonAncestor(s9, b, Slot(6)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7]: + check commonAncestor(s9, b, Slot(7)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7]: + check commonAncestor(s9, b, Slot(8)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8]: + check commonAncestor(s9, b, Slot(9)) == Opt.none(BlockRef) + for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]: + check commonAncestor(s9, b, Slot(10)) == Opt.none(BlockRef) + suite "BlockSlot and helpers": test "atSlot sanity": let From 485a07103b75544f4dfc4cd9f403fe2bbded33e3 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Thu, 11 May 2023 12:16:15 +0200 Subject: [PATCH 6/6] lint --- AllTests-mainnet.md | 5 +++-- tests/test_block_dag.nim | 1 + tests/test_blockchain_dag.nim | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index d9b706260c..99589a17e7 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -99,10 +99,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 OK: 2/2 Fail: 0/2 Skip: 0/2 ## BlockRef and helpers ```diff ++ commonAncestor sanity OK + get_ancestor sanity OK + isAncestorOf sanity OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 +OK: 3/3 Fail: 0/3 Skip: 0/3 ## BlockSlot and helpers ```diff + atSlot sanity OK @@ -674,4 +675,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2 OK: 9/9 Fail: 0/9 Skip: 0/9 ---TOTAL--- -OK: 383/388 Fail: 0/388 Skip: 5/388 +OK: 384/389 Fail: 0/389 Skip: 5/389 diff --git a/tests/test_block_dag.nim b/tests/test_block_dag.nim index a9d8359910..33d55d7c85 100644 --- a/tests/test_block_dag.nim +++ b/tests/test_block_dag.nim @@ -74,6 +74,7 @@ suite "BlockRef and helpers": s7 = BlockRef(bid: BlockId(slot: Slot(7)), parent: s6) s8 = BlockRef(bid: BlockId(slot: Slot(8)), parent: s4) s9 = BlockRef(bid: BlockId(slot: Slot(9)), parent: s8) + check: commonAncestor(s0, s0, Slot(0)) == Opt.some(s0) commonAncestor(s0, s1, Slot(0)) == Opt.some(s0) diff --git a/tests/test_blockchain_dag.nim b/tests/test_blockchain_dag.nim index 1e1b347f19..bdd2eba16f 100644 --- a/tests/test_blockchain_dag.nim +++ b/tests/test_blockchain_dag.nim @@ -1263,7 +1263,7 @@ suite "Shufflings": test "Accelerated shuffling computation": randomize() let forkBlocks = dag.forkBlocks.toSeq() - for _ in 0 ..< 100: # Number of random tests (against _all_ cached states) + for _ in 0 ..< 150: # Number of random tests (against _all_ cached states) let blck = sample(forkBlocks).data epoch = rand(GENESIS_EPOCH .. maxEpochOfInterest)