diff --git a/Makefile b/Makefile index 007f49f5b3..d637d26cf8 100644 --- a/Makefile +++ b/Makefile @@ -94,13 +94,15 @@ pyspec: install_test: python3 -m venv venv; . venv/bin/activate; python3 -m pip install -e .[lint]; python3 -m pip install -e .[test] +# Testing against `minimal` config by default test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.mainnet --cov=eth2spec.altair.mainnet --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec +# Testing against `minimal` config by default find_test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.mainnet --cov=eth2spec.altair.mainnet --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec citest: pyspec mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \ diff --git a/README.md b/README.md index b74102c30d..a9a840bdef 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,7 @@ while the details are in review and may change. * [ethereum.org](https://ethereum.org) high-level description of the merge [here](https://ethereum.org/en/eth2/docking/) * Specifications: * [Beacon Chain changes](specs/merge/beacon-chain.md) + * [Merge fork](specs/merge/fork.md) * [Fork Choice changes](specs/merge/fork-choice.md) * [Validator additions](specs/merge/validator.md) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 47b02aa8d9..dd5b394af4 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -32,7 +32,7 @@ SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 # TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 4294967296 +MIN_ANCHOR_POW_BLOCK_DIFFICULTY: 4294967296 # Time parameters diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 1a04c4ecd2..37a428b50e 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -31,7 +31,7 @@ SHARDING_FORK_VERSION: 0x03000001 SHARDING_FORK_EPOCH: 18446744073709551615 # TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 4294967296 +MIN_ANCHOR_POW_BLOCK_DIFFICULTY: 4294967296 # Time parameters diff --git a/presets/mainnet/altair.yaml b/presets/mainnet/altair.yaml index 9f0ad9b4ce..9a17b78032 100644 --- a/presets/mainnet/altair.yaml +++ b/presets/mainnet/altair.yaml @@ -14,8 +14,8 @@ PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2 # --------------------------------------------------------------- # 2**9 (= 512) SYNC_COMMITTEE_SIZE: 512 -# 2**9 (= 512) -EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 512 +# 2**8 (= 256) +EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256 # Sync protocol diff --git a/presets/mainnet/sharding.yaml b/presets/mainnet/sharding.yaml index 9a81c8cdc6..2b78855fc2 100644 --- a/presets/mainnet/sharding.yaml +++ b/presets/mainnet/sharding.yaml @@ -15,6 +15,8 @@ MAX_SHARD_PROPOSER_SLASHINGS: 16 # Shard block configs # --------------------------------------------------------------- MAX_SHARD_HEADERS_PER_SHARD: 4 +# 2**8 (= 256) +SHARD_STATE_MEMORY_SLOTS: 256 # 2**11 (= 2,048) MAX_SAMPLES_PER_BLOCK: 2048 # 2**10 (= 1,1024) diff --git a/presets/minimal/sharding.yaml b/presets/minimal/sharding.yaml index 7dedbc9260..10f79c96ea 100644 --- a/presets/minimal/sharding.yaml +++ b/presets/minimal/sharding.yaml @@ -15,6 +15,8 @@ MAX_SHARD_PROPOSER_SLASHINGS: 4 # Shard block configs # --------------------------------------------------------------- MAX_SHARD_HEADERS_PER_SHARD: 4 +# 2**8 (= 256) +SHARD_STATE_MEMORY_SLOTS: 256 # 2**11 (= 2,048) MAX_SAMPLES_PER_BLOCK: 2048 # 2**10 (= 1,1024) diff --git a/setup.py b/setup.py index 8095e3b7f4..9ecf8261fd 100644 --- a/setup.py +++ b/setup.py @@ -55,6 +55,12 @@ def floorlog2(x: int) -> uint64: ''' +OPTIMIZED_BLS_AGGREGATE_PUBKEYS = ''' +def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: + return bls.AggregatePKs(pubkeys) +''' + + class ProtocolDefinition(NamedTuple): # just function definitions currently. May expand with configuration vars in future. functions: Dict[str, str] @@ -299,10 +305,7 @@ def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: # TODO @classmethod @abstractmethod - def invariant_checks(cls) -> str: - """ - The invariant checks - """ + def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: raise NotImplementedError() @classmethod @@ -426,8 +429,8 @@ def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: return {} @classmethod - def invariant_checks(cls) -> str: - return '' + def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: + return functions @classmethod def build_spec(cls, preset_name: str, @@ -476,12 +479,10 @@ def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]: return {**super().hardcoded_ssz_dep_constants(), **constants} @classmethod - def invariant_checks(cls) -> str: - return ''' -assert ( - TIMELY_HEAD_WEIGHT + TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT + SYNC_REWARD_WEIGHT + PROPOSER_WEIGHT -) == WEIGHT_DENOMINATOR''' - + def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: + if "eth2_aggregate_pubkeys" in functions: + functions["eth2_aggregate_pubkeys"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip() + return super().implement_optimizations(functions) # # MergeSpecBuilder @@ -509,7 +510,7 @@ def sundry_functions(cls) -> str: def get_pow_block(hash: Bytes32) -> PowBlock: return PowBlock(block_hash=hash, is_valid=True, is_processed=True, - total_difficulty=config.TRANSITION_TOTAL_DIFFICULTY) + total_difficulty=uint256(0), difficulty=uint256(0)) def get_execution_state(execution_state_root: Bytes32) -> ExecutionState: @@ -588,7 +589,8 @@ def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str for k in list(spec_object.functions): if "ceillog2" in k or "floorlog2" in k: del spec_object.functions[k] - functions_spec = '\n\n\n'.join(spec_object.functions.values()) + functions = builder.implement_optimizations(spec_object.functions) + functions_spec = '\n\n\n'.join(functions.values()) # Access global dict of config vars for runtime configurables for name in spec_object.config_vars.keys(): @@ -596,7 +598,7 @@ def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str def format_config_var(name: str, vardef: VariableDefinition) -> str: if vardef.type_name is None: - out = f'{name}={vardef.value}' + out = f'{name}={vardef.value},' else: out = f'{name}={vardef.type_name}({vardef.value}),' if vardef.comment is not None: @@ -647,7 +649,6 @@ def format_constant(name: str, vardef: VariableDefinition) -> str: # Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are # as same as the spec definition. + ('\n\n\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '') - + ('\n' + builder.invariant_checks() if builder.invariant_checks() != '' else '') + '\n' ) return spec @@ -831,7 +832,7 @@ def initialize_options(self): self.out_dir = 'pyspec_output' self.build_targets = """ minimal:presets/minimal:configs/minimal.yaml - mainnet:presets/mainnet:configs/mainnet.yaml + mainnet:presets/mainnet:configs/mainnet.yaml """ def finalize_options(self): @@ -853,6 +854,7 @@ def finalize_options(self): specs/phase0/validator.md specs/phase0/weak-subjectivity.md specs/altair/beacon-chain.md + specs/altair/bls.md specs/altair/fork.md specs/altair/validator.md specs/altair/p2p-interface.md @@ -865,6 +867,7 @@ def finalize_options(self): specs/phase0/validator.md specs/phase0/weak-subjectivity.md specs/merge/beacon-chain.md + specs/merge/fork.md specs/merge/fork-choice.md specs/merge/validator.md """ @@ -912,7 +915,8 @@ def run(self): if not self.dry_run: with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out: - out.write("") + # `mainnet` is the default spec. + out.write("from . import mainnet as spec # noqa:F401\n") class BuildPyCommand(build_py): @@ -1006,7 +1010,7 @@ def run(self): python_requires=">=3.8, <4", extras_require={ "test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"], - "lint": ["flake8==3.7.7", "mypy==0.750"], + "lint": ["flake8==3.7.7", "mypy==0.812"], "generator": ["python-snappy==0.5.4"], }, install_requires=[ @@ -1016,7 +1020,7 @@ def run(self): "py_ecc==5.2.0", "milagro_bls_binding==1.6.3", "dataclasses==0.6", - "remerkleable==0.1.19", + "remerkleable==0.1.20", RUAMEL_YAML_VERSION, "lru-dict==1.1.6", MARKO_VERSION, diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 7412a8490c..3bd82aec88 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -26,8 +26,7 @@ - [`SyncAggregate`](#syncaggregate) - [`SyncCommittee`](#synccommittee) - [Helper functions](#helper-functions) - - [`Predicates`](#predicates) - - [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify) + - [Crypto](#crypto) - [Misc](#misc-1) - [`add_flag`](#add_flag) - [`has_flag`](#has_flag) @@ -45,7 +44,7 @@ - [Block processing](#block-processing) - [Modified `process_attestation`](#modified-process_attestation) - [Modified `process_deposit`](#modified-process_deposit) - - [Sync committee processing](#sync-committee-processing) + - [Sync aggregate processing](#sync-aggregate-processing) - [Epoch processing](#epoch-processing) - [Justification and finalization](#justification-and-finalization) - [Inactivity scores](#inactivity-scores) @@ -86,10 +85,10 @@ Altair is the first beacon chain hard fork. Its main features are: | Name | Value | | - | - | -| `TIMELY_SOURCE_WEIGHT` | `uint64(12)` | -| `TIMELY_TARGET_WEIGHT` | `uint64(24)` | -| `TIMELY_HEAD_WEIGHT` | `uint64(12)` | -| `SYNC_REWARD_WEIGHT` | `uint64(8)` | +| `TIMELY_SOURCE_WEIGHT` | `uint64(14)` | +| `TIMELY_TARGET_WEIGHT` | `uint64(26)` | +| `TIMELY_HEAD_WEIGHT` | `uint64(14)` | +| `SYNC_REWARD_WEIGHT` | `uint64(2)` | | `PROPOSER_WEIGHT` | `uint64(8)` | | `WEIGHT_DENOMINATOR` | `uint64(64)` | @@ -107,7 +106,6 @@ Altair is the first beacon chain hard fork. Its main features are: | Name | Value | | - | - | -| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` | | `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]` | ## Preset @@ -129,7 +127,7 @@ This patch updates a few configuration values to move penalty parameters closer | Name | Value | Unit | Duration | | - | - | - | - | | `SYNC_COMMITTEE_SIZE` | `uint64(2**9)` (= 512) | Validators | | -| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**9)` (= 512) | epochs | ~54 hours | +| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**8)` (= 256) | epochs | ~27 hours | ## Configuration @@ -137,8 +135,8 @@ This patch updates a few configuration values to move penalty parameters closer | Name | Value | Description | | - | - | - | -| `INACTIVITY_SCORE_BIAS` | `uint64(4)` | score points per inactive epoch | -| `INACTIVITY_SCORE_RECOVERY_RATE` | `uint64(16)` | score points per recovering epoch | +| `INACTIVITY_SCORE_BIAS` | `uint64(2**2)` (= 4) | score points per inactive epoch | +| `INACTIVITY_SCORE_RECOVERY_RATE` | `uint64(2**4)` (= 16) | score points per leak-free epoch | ## Containers @@ -157,8 +155,7 @@ class BeaconBlockBody(Container): attestations: List[Attestation, MAX_ATTESTATIONS] deposits: List[Deposit, MAX_DEPOSITS] voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] - # [New in Altair] - sync_aggregate: SyncAggregate + sync_aggregate: SyncAggregate # [New in Altair] ``` #### `BeaconState` @@ -221,19 +218,11 @@ class SyncCommittee(Container): ## Helper functions -### `Predicates` +### Crypto -#### `eth2_fast_aggregate_verify` - -```python -def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: - """ - Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. - """ - if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: - return True - return bls.FastAggregateVerify(pubkeys, message, signature) -``` +Refer to the definitions in the [phase 0 document regarding BLS signatures](../phase0/beacon-chain.md#bls-signatures) +and the extensions defined in the [Altair BLS document](./bls.md). This specification assumes knowledge of +the functionality described in those documents. ### Misc @@ -266,10 +255,7 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool: ```python def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: """ - Return the sequence of sync committee indices (which may include duplicate indices) - for the next sync committee, given a ``state`` at a sync committee period boundary. - - Note: Committee can contain duplicate indices for small validator sets (< SYNC_COMMITTEE_SIZE + 128) + Return the sync committee indices, with possible duplicates, for the next sync committee. """ epoch = Epoch(get_current_epoch(state) + 1) @@ -292,25 +278,16 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd #### `get_next_sync_committee` +*Note*: The function `get_next_sync_committee` should only be called at sync committee period boundaries. + ```python def get_next_sync_committee(state: BeaconState) -> SyncCommittee: """ - Return the *next* sync committee for a given ``state``. - - ``SyncCommittee`` contains an aggregate pubkey that enables - resource-constrained clients to save some computation when verifying - the sync committee's signature. - - ``SyncCommittee`` can also contain duplicate pubkeys, when ``get_next_sync_committee_indices`` - returns duplicate indices. Implementations must take care when handling - optimizations relating to aggregation and verification in the presence of duplicates. - - Note: This function should only be called at sync committee period boundaries by ``process_sync_committee_updates`` - as ``get_next_sync_committee_indices`` is not stable within a given period. + Return the next sync committee, with possible pubkey duplicates. """ indices = get_next_sync_committee_indices(state) pubkeys = [state.validators[index].pubkey for index in indices] - aggregate_pubkey = bls.AggregatePKs(pubkeys) + aggregate_pubkey = eth2_aggregate_pubkeys(pubkeys) return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey) ``` @@ -325,14 +302,12 @@ def get_base_reward_per_increment(state: BeaconState) -> Gwei: *Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH` and the use of increment based accounting. +*Note*: On average an optimally performing validator earns one base reward per epoch. + ```python def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: """ Return the base reward for the validator defined by ``index`` with respect to the current ``state``. - - Note: An optimally performing validator can earn one base reward per epoch over a long time horizon. - This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal - and sync committees). """ increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT return Gwei(increments * get_base_reward_per_increment(state)) @@ -471,7 +446,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_randao(state, block.body) process_eth1_data(state, block.body) process_operations(state, block.body) # [Modified in Altair] - process_sync_committee(state, block.body.sync_aggregate) # [New in Altair] + process_sync_aggregate(state, block.body.sync_aggregate) # [New in Altair] ``` #### Modified `process_attestation` @@ -557,17 +532,19 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: increase_balance(state, index, amount) ``` -#### Sync committee processing +#### Sync aggregate processing + +*Note*: The function `process_sync_aggregate` is new. ```python -def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None: +def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None: # Verify sync committee aggregate signature signing over the previous slot block root committee_pubkeys = state.current_sync_committee.pubkeys - participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, aggregate.sync_committee_bits) if bit] + participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit] previous_slot = max(state.slot, Slot(1)) - Slot(1) domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot)) signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain) - assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, aggregate.sync_committee_signature) + assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) # Compute participant and proposer rewards total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT @@ -579,10 +556,12 @@ def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None # Apply participant and proposer rewards all_pubkeys = [v.pubkey for v in state.validators] committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys] - participant_indices = [index for index, bit in zip(committee_indices, aggregate.sync_committee_bits) if bit] - for participant_index in participant_indices: - increase_balance(state, participant_index, participant_reward) - increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits): + if participation_bit: + increase_balance(state, participant_index, participant_reward) + increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + else: + decrease_balance(state, participant_index, participant_reward) ``` ### Epoch processing @@ -627,17 +606,17 @@ def process_justification_and_finalization(state: BeaconState) -> None: ```python def process_inactivity_updates(state: BeaconState) -> None: - # Score updates based on previous epoch participation, skip genesis epoch + # Skip the genesis epoch as score updates are based on the previous epoch participation if get_current_epoch(state) == GENESIS_EPOCH: return for index in get_eligible_validator_indices(state): - # Increase inactivity score of inactive validators + # Increase the inactivity score of inactive validators if index in get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)): state.inactivity_scores[index] -= min(1, state.inactivity_scores[index]) else: state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS - # Decrease the score of all validators for forgiveness when not during a leak + # Decrease the inactivity score of all eligible validators during a leak-free epoch if not is_in_inactivity_leak(state): state.inactivity_scores[index] -= min(INACTIVITY_SCORE_RECOVERY_RATE, state.inactivity_scores[index]) ``` diff --git a/specs/altair/bls.md b/specs/altair/bls.md new file mode 100644 index 0000000000..5292360561 --- /dev/null +++ b/specs/altair/bls.md @@ -0,0 +1,65 @@ +# Ethereum 2.0 Altair BLS extensions + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Constants](#constants) +- [Extensions](#extensions) + - [`eth2_aggregate_pubkeys`](#eth2_aggregate_pubkeys) + - [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify) + + + + +## Introduction + +A number of extensions are defined to handle BLS signatures in the Altair upgrade. + +Knowledge of the [phase 0 specification](../phase0/beacon-chain.md) is assumed, including type definitions. + +## Constants + +| Name | Value | +| - | - | +| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` | + +## Extensions + +### `eth2_aggregate_pubkeys` + +An additional function `AggregatePKs` is defined to extend the +[IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) +spec referenced in the phase 0 document. + +```python +def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: + """ + Return the aggregate public key for the public keys in ``pubkeys``. + + NOTE: the ``+`` operation should be interpreted as elliptic curve point addition, which takes as input + elliptic curve points that must be decoded from the input ``BLSPubkey``s. + This implementation is for demonstrative purposes only and ignores encoding/decoding concerns. + Refer to the BLS signature draft standard for more information. + """ + assert len(pubkeys) > 0 + result = copy(pubkeys[0]) + for pubkey in pubkeys[1:]: + result += pubkey + return result +``` + +### `eth2_fast_aggregate_verify` + +```python +def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: + """ + Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. + """ + if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: + return True + return bls.FastAggregateVerify(pubkeys, message, signature) +``` diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 6f250b57eb..fc0a8a35fd 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -74,13 +74,30 @@ New topics are added in Altair to support the sync committees and the beacon blo The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 document. +The derivation of the `message-id` has changed starting with Altair to incorporate the message `topic` along with the message `data`. These are fields of the `Message` Protobuf, and interpreted as empty byte strings if missing. +The `message-id` MUST be the following 20 byte value computed from the message: +* If `message.data` has a valid snappy decompression, set `message-id` to the first 20 bytes of the `SHA256` hash of + the concatenation of the following data: `MESSAGE_DOMAIN_VALID_SNAPPY`, the length of the topic byte string (encoded as little-endian `uint64`), + the topic byte string, and the snappy decompressed message data: + i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + snappy_decompress(message.data))[:20]`. +* Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of + the concatenation of the following data: `MESSAGE_DOMAIN_INVALID_SNAPPY`, the length of the topic byte string (encoded as little-endian `uint64`), + the topic byte string, and the raw message data: + i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20]`. + +Implementations may need to carefully handle the function that computes the `message-id`. In particular, messages on topics with the Phase 0 +fork digest should use the `message-id` procedure specified in the Phase 0 document. +Messages on topics with the Altair fork digest should use the `message-id` procedure defined here. +If an implementation only supports a single `message-id` function, it can define a switch inline; +for example, `if topic in phase0_topics: return phase0_msg_id_fn(message) else return altair_msg_id_fn(message)`. + The new topics along with the type of the `data` field of a gossipsub message are given in this table: | Name | Message Type | | - | - | | `beacon_block` | `SignedBeaconBlock` (modified) | | `sync_committee_contribution_and_proof` | `SignedContributionAndProof` | -| `sync_committee_{subnet_id}` | `SyncCommitteeSignature` | +| `sync_committee_{subnet_id}` | `SyncCommitteeMessage` | Definitions of these new types can be found in the [Altair validator guide](./validator.md#containers). @@ -120,7 +137,7 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64 return sync_committee.pubkeys[i:i + sync_subcommittee_size] ``` -- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`. +- _[IGNORE]_ The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `contribution.slot == current_slot`. - _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources). - _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`. - _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`. @@ -139,12 +156,13 @@ Sync committee subnets are used to propagate unaggregated sync committee signatu The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee signatures to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic. -The following validations MUST pass before forwarding the `sync_committee_signature` on the network: +The following validations MUST pass before forwarding the `sync_committee_message` on the network: -- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_signature.slot == current_slot`. -- _[IGNORE]_ The block being signed over (`sync_committee_signature.beacon_block_root`) has been seen (via both gossip and non-gossip sources). -- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_signature.validator_index`. -- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index)`. +- _[IGNORE]_ The signature's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `sync_committee_message.slot == current_slot`. +- _[IGNORE]_ The block being signed over (`sync_committee_message.beacon_block_root`) has been seen (via both gossip and non-gossip sources). +- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_message.validator_index`. + Note this validation is _per topic_ so that for a given `slot`, multiple messages could be forwarded with the same `validator_index` as long as the `subnet_id`s are distinct. +- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`. Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee. - _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`. @@ -156,7 +174,7 @@ The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee. Individual validators can be duplicated in the broader sync committee such that they are included multiple times in a given subcommittee or across multiple subcommittees. -Unaggregated signatures (along with metadata) are sent as `SyncCommitteeSignature`s on the `sync_committee_{subnet_id}` topics. +Unaggregated signatures (along with metadata) are sent as `SyncCommitteeMessage`s on the `sync_committee_{subnet_id}` topics. Aggregated sync committee signatures are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic. diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 3b3362b22c..f193e3591c 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -14,7 +14,7 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./ - [Constants](#constants) - [Misc](#misc) - [Containers](#containers) - - [`SyncCommitteeSignature`](#synccommitteesignature) + - [`SyncCommitteeMessage`](#synccommitteemessage) - [`SyncCommitteeContribution`](#synccommitteecontribution) - [`ContributionAndProof`](#contributionandproof) - [`SignedContributionAndProof`](#signedcontributionandproof) @@ -30,9 +30,9 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./ - [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock) - [Attesting and attestation aggregation](#attesting-and-attestation-aggregation) - [Sync committees](#sync-committees) - - [Sync committee signatures](#sync-committee-signatures) - - [Prepare sync committee signature](#prepare-sync-committee-signature) - - [Broadcast sync committee signature](#broadcast-sync-committee-signature) + - [Sync committee messages](#sync-committee-messages) + - [Prepare sync committee message](#prepare-sync-committee-message) + - [Broadcast sync committee message](#broadcast-sync-committee-message) - [Sync committee contributions](#sync-committee-contributions) - [Aggregation selection](#aggregation-selection) - [Construct sync committee contribution](#construct-sync-committee-contribution) @@ -78,10 +78,10 @@ This document is currently illustrative for early Altair testnets and some parts ## Containers -### `SyncCommitteeSignature` +### `SyncCommitteeMessage` ```python -class SyncCommitteeSignature(Container): +class SyncCommitteeMessage(Container): # Slot to which this contribution pertains slot: Slot # Block root for this signature @@ -243,7 +243,7 @@ def process_sync_committee_contributions(block: BeaconBlock, block.body.sync_aggregate = sync_aggregate ``` -*Note*: The resulting block must pass the validations for the `SyncAggregate` defined in `process_sync_committee` defined in the [state transition document](./beacon-chain.md#sync-committee-processing). +*Note*: The resulting block must pass the validations for the `SyncAggregate` defined in `process_sync_aggregate` defined in the [state transition document](./beacon-chain.md#sync-aggregate-processing). In particular, this means `SyncCommitteeContribution`s received from gossip must have a `beacon_block_root` that matches the proposer's local view of the chain. #### Packaging into a `SignedBeaconBlock` @@ -258,34 +258,39 @@ There is no change compared to the phase 0 document. ### Sync committees Sync committee members employ an aggregation scheme to reduce load on the global proposer channel that is monitored by all potential proposers to be able to include the full output of the sync committee every slot. -Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeSignature`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers. +Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeMessage`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers. This process occurs each slot. -#### Sync committee signatures +#### Sync committee messages -##### Prepare sync committee signature +##### Prepare sync committee message -If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of `slot - 1`. +If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeMessage` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_message` as soon as they have determined the head block of `slot - 1`. This logic is triggered upon the same conditions as when producing an attestation. -Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first. +Meaning, a sync committee member should produce and broadcast a `SyncCommitteeMessage` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first. -`get_sync_committee_signature(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator. +`get_sync_committee_message(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator. ```python -def get_sync_committee_signature(state: BeaconState, - block_root: Root, - validator_index: ValidatorIndex, - privkey: int) -> SyncCommitteeSignature: +def get_sync_committee_message(state: BeaconState, + block_root: Root, + validator_index: ValidatorIndex, + privkey: int) -> SyncCommitteeMessage: epoch = get_current_epoch(state) domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch) signing_root = compute_signing_root(block_root, domain) signature = bls.Sign(privkey, signing_root) - return SyncCommitteeSignature(slot=state.slot, validator_index=validator_index, signature=signature) + return SyncCommitteeMessage( + slot=state.slot, + beacon_block_root=block_root, + validator_index=validator_index, + signature=signature, + ) ``` -##### Broadcast sync committee signature +##### Broadcast sync committee message The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic. @@ -312,11 +317,11 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali *Note*: Subnet assignment does not change during the duration of a validator's assignment to a given sync committee. -*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_signature` on each of the distinct subnets. +*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_message` on each of the distinct subnets. #### Sync committee contributions -Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeSignature`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block. +Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeMessage`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block. ##### Aggregation selection @@ -347,9 +352,9 @@ def is_sync_committee_aggregator(signature: BLSSignature) -> bool: ##### Construct sync committee contribution -If a validator is selected to aggregate the `SyncCommitteeSignature`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`. +If a validator is selected to aggregate the `SyncCommitteeMessage`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`. -Given all of the (valid) collected `sync_committee_signatures: Set[SyncCommitteeSignature]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields: +Given all of the (valid) collected `sync_committee_messages: Set[SyncCommitteeMessage]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields: ###### Slot @@ -357,7 +362,7 @@ Set `contribution.slot = state.slot` where `state` is the `BeaconState` for the ###### Beacon block root -Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_signatures`. +Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_messages`. ###### Subcommittee index @@ -366,15 +371,15 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co ###### Aggregation bits Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee. -An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`. +An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_message.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`. For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution. -*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`. +*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeMessage`. ###### Signature -Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_signatures` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. +Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_messages` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee. diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index ada6c1a258..697bd0c96b 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -14,8 +14,6 @@ - [Custom types](#custom-types) - [Constants](#constants) - [Execution](#execution) -- [Configuration](#configuration) - - [Transition](#transition) - [Containers](#containers) - [Extended containers](#extended-containers) - [`BeaconBlockBody`](#beaconblockbody) @@ -35,6 +33,7 @@ - [Block processing](#block-processing) - [Execution payload processing](#execution-payload-processing) - [`process_execution_payload`](#process_execution_payload) +- [Initialize state for pure Merge testnets and test vectors](#initialize-state-for-pure-merge-testnets-and-test-vectors) @@ -62,18 +61,6 @@ We define the following Python custom types for type hinting and readability: | `MAX_EXECUTION_TRANSACTIONS` | `uint64(2**14)` (= 16,384) | | `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) | -## Configuration - -Warning: this configuration is not definitive. - -### Transition - -| Name | Value | -| - | - | -| `MERGE_FORK_VERSION` | `Version('0x02000000')` | -| `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | -| `TRANSITION_TOTAL_DIFFICULTY` | **TBD** | - ## Containers ### Extended containers @@ -247,3 +234,63 @@ def process_execution_payload(state: BeaconState, transactions_root=hash_tree_root(execution_payload.transactions), ) ``` + +## Initialize state for pure Merge testnets and test vectors + +This helper function is only for initializing the state for pure Merge testnets and tests. + +*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `MERGE_FORK_VERSION` as the current fork version, (2) utilizing the Merge `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) adding initial `latest_execution_payload_header`. + +```python +def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, + eth1_timestamp: uint64, + deposits: Sequence[Deposit]) -> BeaconState: + fork = Fork( + previous_version=GENESIS_FORK_VERSION, + current_version=MERGE_FORK_VERSION, # [Modified in Merge] + epoch=GENESIS_EPOCH, + ) + state = BeaconState( + genesis_time=eth1_timestamp + GENESIS_DELAY, + fork=fork, + eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), + latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), + randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy + ) + + # Process deposits + leaves = list(map(lambda deposit: deposit.data, deposits)) + for index, deposit in enumerate(deposits): + deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1]) + state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) + process_deposit(state, deposit) + + # Process activations + for index, validator in enumerate(state.validators): + balance = state.balances[index] + validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) + if validator.effective_balance == MAX_EFFECTIVE_BALANCE: + validator.activation_eligibility_epoch = GENESIS_EPOCH + validator.activation_epoch = GENESIS_EPOCH + + # Set genesis validators root for domain separation and chain versioning + state.genesis_validators_root = hash_tree_root(state.validators) + + # [New in Merge] Construct execution payload header + # Note: initialized with zero block height + state.latest_execution_payload_header = ExecutionPayloadHeader( + block_hash=eth1_block_hash, + parent_hash=Hash32(), + coinbase=Bytes20(), + state_root=Bytes32(), + number=uint64(0), + gas_limit=uint64(0), + gas_used=uint64(0), + timestamp=eth1_timestamp, + receipt_root=Bytes32(), + logs_bloom=ByteVector[BYTES_PER_LOGS_BLOOM](), + transactions_root=Root(), + ) + + return state +``` diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 9e6c341bc0..56345dd90e 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -12,13 +12,13 @@ - [`ExecutionEngine`](#executionengine) - [`set_head`](#set_head) - [`finalize_block`](#finalize_block) -- [Containers](#containers) - - [`PowBlock`](#powblock) -- [Helper functions](#helper-functions) - - [`get_pow_block`](#get_pow_block) - - [`is_valid_transition_block`](#is_valid_transition_block) +- [Helpers](#helpers) + - [`TransitionStore`](#transitionstore) + - [`PowBlock`](#powblock) + - [`get_pow_block`](#get_pow_block) + - [`is_valid_terminal_pow_block`](#is_valid_terminal_pow_block) - [Updated fork-choice handlers](#updated-fork-choice-handlers) - - [`on_block`](#on_block) + - [`on_block`](#on_block) @@ -66,44 +66,52 @@ def finalize_block(self: ExecutionEngine, block_hash: Hash32) -> bool: ... ``` -## Containers +## Helpers -#### `PowBlock` +### `TransitionStore` ```python -class PowBlock(Container): +@dataclass +class TransitionStore(object): + transition_total_difficulty: uint256 +``` + +### `PowBlock` + +```python +@dataclass +class PowBlock(object): block_hash: Hash32 is_processed: boolean is_valid: boolean total_difficulty: uint256 + difficulty: uint256 ``` -## Helper functions - -#### `get_pow_block` +### `get_pow_block` Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given the hash of the PoW block returns its data. *Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that haven't been processed yet. Either extending this existing method or implementing a new one is required. -#### `is_valid_transition_block` +### `is_valid_terminal_pow_block` Used by fork-choice handler, `on_block`. ```python -def is_valid_transition_block(block: PowBlock) -> bool: - is_total_difficulty_reached = block.total_difficulty >= TRANSITION_TOTAL_DIFFICULTY +def is_valid_terminal_pow_block(transition_store: TransitionStore, block: PowBlock) -> bool: + is_total_difficulty_reached = block.total_difficulty >= transition_store.transition_total_difficulty return block.is_valid and is_total_difficulty_reached ``` ## Updated fork-choice handlers -#### `on_block` +### `on_block` *Note*: The only modification is the addition of the verification of transition block conditions. ```python -def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: +def on_block(store: Store, signed_block: SignedBeaconBlock, transition_store: TransitionStore=None) -> None: block = signed_block.message # Parent block must be known assert block.parent_root in store.block_states @@ -119,11 +127,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root # [New in Merge] - if is_transition_block(pre_state, block): + if (transition_store is not None) and is_transition_block(pre_state, block): # Delay consideration of block until PoW block is processed by the PoW node pow_block = get_pow_block(block.body.execution_payload.parent_hash) assert pow_block.is_processed - assert is_valid_transition_block(pow_block) + assert is_valid_terminal_pow_block(transition_store, pow_block) # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/merge/fork.md b/specs/merge/fork.md new file mode 100644 index 0000000000..1f2ea7fff5 --- /dev/null +++ b/specs/merge/fork.md @@ -0,0 +1,121 @@ +# Ethereum 2.0 The Merge + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + +- [Introduction](#introduction) +- [Configuration](#configuration) +- [Fork to Merge](#fork-to-merge) + - [Fork trigger](#fork-trigger) + - [Upgrading the state](#upgrading-the-state) + - [Initializing transition store](#initializing-transition-store) + + + +## Introduction + +This document describes the process of the Merge upgrade. + +## Configuration + +Warning: this configuration is not definitive. + +| Name | Value | +| - | - | +| `MERGE_FORK_VERSION` | `Version('0x02000000')` | +| `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | +| `MIN_ANCHOR_POW_BLOCK_DIFFICULTY` | **TBD** | +| `TARGET_SECONDS_TO_MERGE` | `uint64(7 * 86400)` = (604,800) | + +## Fork to Merge + +### Fork trigger + +TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at epoch `MERGE_FORK_EPOCH`. + +Since the Merge transition process relies on `Eth1Data` in the beacon state we do want to make sure that this data is fresh. This is achieved by forcing `MERGE_FORK_EPOCH` to point to eth1 voting period boundary, i.e. `MERGE_FORK_EPOCH` should satisfy the following condition `MERGE_FORK_EPOCH % EPOCHS_PER_ETH1_VOTING_PERIOD == 0`. + +Note that for the pure Merge networks, we don't apply `upgrade_to_merge` since it starts with Merge version logic. + +### Upgrading the state + +If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, an irregular state change is made to upgrade to Merge. + +The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `MERGE_FORK_EPOCH * SLOTS_PER_EPOCH`. +Care must be taken when transitioning through the fork boundary as implementations will need a modified [state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) that deviates from the Phase 0 document. +In particular, the outer `state_transition` function defined in the Phase 0 document will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`. + +```python +def upgrade_to_merge(pre: phase0.BeaconState) -> BeaconState: + epoch = phase0.get_current_epoch(pre) + post = BeaconState( + # Versioning + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + current_version=MERGE_FORK_VERSION, + epoch=epoch, + ), + # History + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + # Eth1 + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + # Registry + validators=pre.validators, + balances=pre.balances, + # Randomness + randao_mixes=pre.randao_mixes, + # Slashings + slashings=pre.slashings, + # Attestations + previous_epoch_attestations=pre.previous_epoch_attestations, + current_epoch_attestations=pre.current_epoch_attestations, + # Finality + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + # Execution-layer + latest_execution_payload_header=ExecutionPayloadHeader(), + ) + + return post +``` + +### Initializing transition store + +If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, a transition store is initialized to be further utilized by the transition process of the Merge. + +Transition store initialization occurs after the state has been modified by corresponding `upgrade_to_merge` function. + +```python +def compute_transition_total_difficulty(anchor_pow_block: PowBlock) -> uint256: + seconds_per_voting_period = EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH * SECONDS_PER_SLOT + pow_blocks_per_voting_period = seconds_per_voting_period // SECONDS_PER_ETH1_BLOCK + pow_blocks_to_merge = TARGET_SECONDS_TO_MERGE // SECONDS_PER_ETH1_BLOCK + pow_blocks_after_anchor_block = ETH1_FOLLOW_DISTANCE + pow_blocks_per_voting_period + pow_blocks_to_merge + anchor_difficulty = max(MIN_ANCHOR_POW_BLOCK_DIFFICULTY, anchor_pow_block.difficulty) + + return anchor_pow_block.total_difficulty + anchor_difficulty * pow_blocks_after_anchor_block + + +def get_transition_store(anchor_pow_block: PowBlock) -> TransitionStore: + transition_total_difficulty = compute_transition_total_difficulty(anchor_pow_block) + return TransitionStore(transition_total_difficulty=transition_total_difficulty) + + +def initialize_transition_store(state: BeaconState) -> TransitionStore: + pow_block = get_pow_block(state.eth1_data.block_hash) + return get_transition_store(pow_block) +``` diff --git a/specs/merge/validator.md b/specs/merge/validator.md index c4c3960596..c5a7a4c789 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -20,7 +20,6 @@ - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - [Execution Payload](#execution-payload) - [`get_pow_chain_head`](#get_pow_chain_head) - - [`produce_execution_payload`](#produce_execution_payload) @@ -68,18 +67,15 @@ All validator responsibilities remain unchanged other than those noted below. Na Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of the PoW chain. The body of the function is implementation specific. -###### `produce_execution_payload` - -Let `produce_execution_payload(parent_hash: Hash32, timestamp: uint64) -> ExecutionPayload` be the function that produces new instance of execution payload. -The `ExecutionEngine` protocol is used for the implementation specific part of execution payload proposals. - -* Set `block.body.execution_payload = get_execution_payload(state)` where: +* Set `block.body.execution_payload = get_execution_payload(state, transition_store, execution_engine)` where: ```python -def get_execution_payload(state: BeaconState, execution_engine: ExecutionEngine) -> ExecutionPayload: +def get_execution_payload(state: BeaconState, + transition_store: TransitionStore, + execution_engine: ExecutionEngine) -> ExecutionPayload: if not is_transition_completed(state): pow_block = get_pow_chain_head() - if not is_valid_transition_block(pow_block): + if not is_valid_terminal_pow_block(transition_store, pow_block): # Pre-merge, empty payload return ExecutionPayload() else: diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 5522e044de..98feba22b4 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -13,6 +13,7 @@ - [Constants](#constants) - [Misc](#misc) - [Domain types](#domain-types) + - [Shard Work Status](#shard-work-status) - [Preset](#preset) - [Misc](#misc-1) - [Shard block samples](#shard-block-samples) @@ -32,6 +33,7 @@ - [`ShardBlobReference`](#shardblobreference) - [`SignedShardBlobReference`](#signedshardblobreference) - [`ShardProposerSlashing`](#shardproposerslashing) + - [`ShardWork`](#shardwork) - [Helper functions](#helper-functions) - [Misc](#misc-2) - [`next_power_of_two`](#next_power_of_two) @@ -49,14 +51,14 @@ - [`compute_committee_index_from_shard`](#compute_committee_index_from_shard) - [Block processing](#block-processing) - [Operations](#operations) - - [New Attestation processing](#new-attestation-processing) - - [Updated `process_attestation`](#updated-process_attestation) - - [`update_pending_votes`](#update_pending_votes) - - [`process_shard_header`](#process_shard_header) - - [Shard Proposer slashings](#shard-proposer-slashings) + - [Extended Attestation processing](#extended-attestation-processing) + - [`process_shard_header`](#process_shard_header) + - [`process_shard_proposer_slashing`](#process_shard_proposer_slashing) - [Epoch transition](#epoch-transition) - - [Pending headers](#pending-headers) - - [Shard epoch increment](#shard-epoch-increment) + - [`process_pending_shard_confirmations`](#process_pending_shard_confirmations) + - [`charge_confirmed_shard_fees`](#charge_confirmed_shard_fees) + - [`reset_pending_shard_work`](#reset_pending_shard_work) + - [`process_shard_epoch_increment`](#process_shard_epoch_increment) @@ -99,6 +101,14 @@ The following values are (non-configurable) constants used throughout the specif | `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` | | `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | +### Shard Work Status + +| Name | Value | Notes | +| - | - | - | +| `SHARD_WORK_UNCONFIRMED` | `0` | Unconfirmed, nullified after confirmation time elapses | +| `SHARD_WORK_CONFIRMED` | `1` | Confirmed, reduced to just the commitment | +| `SHARD_WORK_PENDING` | `2` | Pending, a list of competing headers | + ## Preset ### Misc @@ -109,6 +119,7 @@ The following values are (non-configurable) constants used throughout the specif | `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | | `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | +| `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state | ### Shard block samples @@ -169,13 +180,12 @@ class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body] ```python class BeaconState(merge.BeaconState): # [extends The Merge state] - # [Updated fields] + # [Updated fields] (Warning: this changes with Altair, Sharding will rebase to use participation-flags) previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # [New fields] - previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH] - current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH] - grandparent_epoch_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] + # A ring buffer of the latest slots, with information per active shard. + shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS] shard_gasprice: uint64 current_epoch_start_shard: Shard ``` @@ -216,6 +226,7 @@ class ShardBlobHeader(Container): # Slot and shard that this header is intended for slot: Slot shard: Shard + # SSZ-summary of ShardBlobBody body_summary: ShardBlobBodySummary # Proposer of the shard-blob proposer_index: ValidatorIndex @@ -233,17 +244,16 @@ class SignedShardBlobHeader(Container): ```python class PendingShardHeader(Container): - # Slot and shard that this header is intended for - slot: Slot - shard: Shard # KZG10 commitment to the data commitment: DataCommitment # hash_tree_root of the ShardHeader (stored so that attestations can be checked against it) root: Root # Who voted for the header votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] - # Has this header been confirmed? - confirmed: boolean + # Sum of effective balances of votes + weight: Gwei + # When the header was last updated, as reference for weight accuracy + update_slot: Slot ``` ### `ShardBlobReference` @@ -253,7 +263,7 @@ class ShardBlobReference(Container): # Slot and shard that this reference is intended for slot: Slot shard: Shard - # Hash-tree-root of commitment data + # Hash-tree-root of ShardBlobBody body_root: Root # Proposer of the shard-blob proposer_index: ValidatorIndex @@ -275,6 +285,18 @@ class ShardProposerSlashing(Container): signed_reference_2: SignedShardBlobReference ``` +### `ShardWork` + +```python +class ShardWork(Container): + # Upon confirmation the data is reduced to just the header. + status: Union[ # See Shard Work Status enum + None, # SHARD_WORK_UNCONFIRMED + DataCommitment, # SHARD_WORK_CONFIRMED + List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # SHARD_WORK_PENDING + ] +``` + ## Helper functions ### Misc @@ -448,6 +470,7 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard: ```python def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Shard: active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot)) + assert index < active_shards return Shard((index + get_start_shard(state, slot)) % active_shards) ``` @@ -455,8 +478,11 @@ def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: Co ```python def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Shard) -> CommitteeIndex: - active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot)) - return CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards) + epoch = compute_epoch_at_slot(slot) + active_shards = get_active_shard_count(state, epoch) + index = CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards) + assert index < get_committee_count_per_slot(state, epoch) + return index ``` @@ -497,67 +523,74 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.voluntary_exits, process_voluntary_exit) ``` -### New Attestation processing - -#### Updated `process_attestation` +##### Extended Attestation processing ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: phase0.process_attestation(state, attestation) - update_pending_votes(state, attestation) + update_pending_shard_work(state, attestation) ``` -#### `update_pending_votes` - ```python -def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: - # Find and update the PendingShardHeader object, invalid block if pending header not in state - if compute_epoch_at_slot(attestation.data.slot) == get_current_epoch(state): - pending_headers = state.current_epoch_pending_shard_headers - else: - pending_headers = state.previous_epoch_pending_shard_headers - +def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> None: attestation_shard = compute_shard_from_committee_index( state, attestation.data.slot, attestation.data.index, ) - pending_header = None - for header in pending_headers: - if ( - header.root == attestation.data.shard_header_root - and header.slot == attestation.data.slot - and header.shard == attestation_shard - ): - pending_header = header - assert pending_header is not None - - for i in range(len(pending_header.votes)): - pending_header.votes[i] = pending_header.votes[i] or attestation.aggregation_bits[i] - - # Check if the PendingShardHeader is eligible for expedited confirmation - # Requirement 1: nothing else confirmed - all_candidates = [ - c for c in pending_headers if - (c.slot, c.shard) == (pending_header.slot, pending_header.shard) - ] - if True in [c.confirmed for c in all_candidates]: + buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS + committee_work = state.shard_buffer[buffer_index][attestation_shard] + + # Skip attestation vote accounting if the header is not pending + if committee_work.status.selector != SHARD_WORK_PENDING: + # TODO In Altair: set participation bit flag, if attestation matches winning header. return - # Requirement 2: >= 2/3 of balance attesting - participants = get_attesting_indices(state, attestation.data, pending_header.votes) - participants_balance = get_total_balance(state, participants) + current_headers: Sequence[PendingShardHeader] = committee_work.status.value + + # Find the corresponding header, abort if it cannot be found + header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root) + + pending_header: PendingShardHeader = current_headers[header_index] full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) - full_committee_balance = get_total_balance(state, set(full_committee)) - if participants_balance * 3 >= full_committee_balance * 2: - pending_header.confirmed = True + + # The weight may be outdated if it is not the initial weight, and from a previous epoch + if pending_header.weight != 0 and compute_epoch_at_slot(pending_header.update_slot) < get_current_epoch(state): + pending_header.weight = sum(state.validators[index].effective_balance for index, bit + in zip(full_committee, pending_header.votes) if bit) + + pending_header.update_slot = state.slot + + full_committee_balance = Gwei(0) + # Update votes bitfield in the state, update weights + for i, bit in enumerate(attestation.aggregation_bits): + weight = state.validators[full_committee[i]].effective_balance + full_committee_balance += weight + if bit: + if not pending_header.votes[i]: + pending_header.weight += weight + pending_header.votes[i] = True + + # Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting + if pending_header.weight * 3 >= full_committee_balance * 2: + # TODO In Altair: set participation bit flag for voters of this early winning header + if pending_header.commitment == DataCommitment(): + # The committee voted to not confirm anything + state.shard_buffer[buffer_index][attestation_shard].change( + selector=SHARD_WORK_UNCONFIRMED, + value=None, + ) + else: + state.shard_buffer[buffer_index][attestation_shard].change( + selector=SHARD_WORK_CONFIRMED, + value=pending_header.commitment, + ) ``` -#### `process_shard_header` +##### `process_shard_header` ```python -def process_shard_header(state: BeaconState, - signed_header: SignedShardBlobHeader) -> None: +def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None: header = signed_header.message # Verify the header is not 0, and not from the future. assert Slot(0) < header.slot <= state.slot @@ -569,6 +602,16 @@ def process_shard_header(state: BeaconState, # Verify that the block root matches, # to ensure the header will only be included in this specific Beacon Chain sub-tree. assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1) + + # Check that this data is still pending + committee_work = state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard] + assert committee_work.status.selector == SHARD_WORK_PENDING + + # Check that this header is not yet in the pending list + current_headers: Sequence[PendingShardHeader] = committee_work.status.value + header_root = hash_tree_root(header) + assert header_root not in [pending_header.root for pending_header in current_headers] + # Verify proposer assert header.proposer_index == get_shard_proposer_index(state, header.slot, header.shard) # Verify signature @@ -584,27 +627,20 @@ def process_shard_header(state: BeaconState, == bls.Pairing(body_summary.commitment.point, G2_SETUP[-body_summary.commitment.length]) ) - # Get the correct pending header list - if header_epoch == get_current_epoch(state): - pending_headers = state.current_epoch_pending_shard_headers - else: - pending_headers = state.previous_epoch_pending_shard_headers - - header_root = hash_tree_root(header) - # Check that this header is not yet in the pending list - assert header_root not in [pending_header.root for pending_header in pending_headers] - - # Include it in the pending list + # Initialize the pending header index = compute_committee_index_from_shard(state, header.slot, header.shard) committee_length = len(get_beacon_committee(state, header.slot, index)) - pending_headers.append(PendingShardHeader( - slot=header.slot, - shard=header.shard, + initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length) + pending_header = PendingShardHeader( commitment=body_summary.commitment, root=header_root, - votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), - confirmed=False, - )) + votes=initial_votes, + weight=0, + update_slot=state.slot, + ) + + # Include it in the pending list + state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard].append(pending_header) ``` The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), @@ -612,7 +648,7 @@ the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 where `MAX_DEGREE` is the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). -##### Shard Proposer slashings +##### `process_shard_proposer_slashing` ```python def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None: @@ -645,19 +681,18 @@ This epoch transition overrides the Merge epoch transition: ```python def process_epoch(state: BeaconState) -> None: + # Sharding + process_pending_shard_confirmations(state) + charge_confirmed_shard_fees(state) + reset_pending_shard_work(state) + + # Phase0 process_justification_and_finalization(state) process_rewards_and_penalties(state) process_registry_updates(state) - process_slashings(state) - # Sharding - process_pending_headers(state) - charge_confirmed_header_fees(state) - reset_pending_headers(state) - # Final updates - # Phase 0 process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) @@ -668,10 +703,10 @@ def process_epoch(state: BeaconState) -> None: process_shard_epoch_increment(state) ``` -#### Pending headers +#### `process_pending_shard_confirmations` ```python -def process_pending_headers(state: BeaconState) -> None: +def process_pending_shard_confirmations(state: BeaconState) -> None: # Pending header processing applies to the previous epoch. # Skip if `GENESIS_EPOCH` because no prior epoch to process. if get_current_epoch(state) == GENESIS_EPOCH: @@ -679,108 +714,93 @@ def process_pending_headers(state: BeaconState) -> None: previous_epoch = get_previous_epoch(state) previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch) + + # Mark stale headers as unconfirmed for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): - for shard_index in range(get_active_shard_count(state, previous_epoch)): - shard = Shard(shard_index) - # Pending headers for this (slot, shard) combo - candidates = [ - c for c in state.previous_epoch_pending_shard_headers - if (c.slot, c.shard) == (slot, shard) - ] - # If any candidates already confirmed, skip - if True in [c.confirmed for c in candidates]: - continue - - # The entire committee (and its balance) - index = compute_committee_index_from_shard(state, slot, shard) - full_committee = get_beacon_committee(state, slot, index) - # The set of voters who voted for each header (and their total balances) - voting_sets = [ - set(v for i, v in enumerate(full_committee) if c.votes[i]) - for c in candidates - ] - voting_balances = [ - get_total_balance(state, voters) - for voters in voting_sets - ] - # Get the index with the most total balance voting for them. - # NOTE: if two choices get exactly the same voting balance, - # the candidate earlier in the list wins - if max(voting_balances) > 0: - winning_index = voting_balances.index(max(voting_balances)) - else: - # If no votes, zero wins - winning_index = [c.root for c in candidates].index(Root()) - candidates[winning_index].confirmed = True - for slot_index in range(SLOTS_PER_EPOCH): - for shard in range(MAX_SHARDS): - state.grandparent_epoch_confirmed_commitments[shard][slot_index] = DataCommitment() - confirmed_headers = [candidate for candidate in state.previous_epoch_pending_shard_headers if candidate.confirmed] - for header in confirmed_headers: - state.grandparent_epoch_confirmed_commitments[header.shard][header.slot % SLOTS_PER_EPOCH] = header.commitment -``` - -```python -def charge_confirmed_header_fees(state: BeaconState) -> None: + buffer_index = slot % SHARD_STATE_MEMORY_SLOTS + for shard_index in range(len(state.shard_buffer[buffer_index])): + committee_work = state.shard_buffer[buffer_index][shard_index] + if committee_work.status.selector == SHARD_WORK_PENDING: + winning_header = max(committee_work.status.value, key=lambda header: header.weight) + # TODO In Altair: set participation bit flag of voters for winning header + if winning_header.commitment == DataCommitment(): + committee_work.status.change(selector=SHARD_WORK_UNCONFIRMED, value=None) + else: + committee_work.status.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.commitment) +``` + +#### `charge_confirmed_shard_fees` + +```python +def charge_confirmed_shard_fees(state: BeaconState) -> None: new_gasprice = state.shard_gasprice previous_epoch = get_previous_epoch(state) + previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch) adjustment_quotient = ( get_active_shard_count(state, previous_epoch) * SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT ) - previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch) + # Iterate through confirmed shard-headers for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): - for shard_index in range(get_active_shard_count(state, previous_epoch)): - shard = Shard(shard_index) - confirmed_candidates = [ - c for c in state.previous_epoch_pending_shard_headers - if (c.slot, c.shard, c.confirmed) == (slot, shard, True) - ] - if not any(confirmed_candidates): - continue - candidate = confirmed_candidates[0] - - # Charge EIP 1559 fee - proposer = get_shard_proposer_index(state, slot, shard) - fee = ( - (state.shard_gasprice * candidate.commitment.length) - // TARGET_SAMPLES_PER_BLOCK - ) - decrease_balance(state, proposer, fee) - - # Track updated gas price - new_gasprice = compute_updated_gasprice( - new_gasprice, - candidate.commitment.length, - adjustment_quotient, - ) + buffer_index = slot % SHARD_STATE_MEMORY_SLOTS + for shard_index in range(len(state.shard_buffer[buffer_index])): + committee_work = state.shard_buffer[buffer_index][shard_index] + if committee_work.status.selector == SHARD_WORK_CONFIRMED: + commitment: DataCommitment = committee_work.status.value + # Charge EIP 1559 fee + proposer = get_shard_proposer_index(state, slot, Shard(shard_index)) + fee = ( + (state.shard_gasprice * commitment.length) + // TARGET_SAMPLES_PER_BLOCK + ) + decrease_balance(state, proposer, fee) + + # Track updated gas price + new_gasprice = compute_updated_gasprice( + new_gasprice, + commitment.length, + adjustment_quotient, + ) state.shard_gasprice = new_gasprice ``` +#### `reset_pending_shard_work` + ```python -def reset_pending_headers(state: BeaconState) -> None: - state.previous_epoch_pending_shard_headers = state.current_epoch_pending_shard_headers - state.current_epoch_pending_shard_headers = [] - # Add dummy "empty" PendingShardHeader (default vote for if no shard header available) +def reset_pending_shard_work(state: BeaconState) -> None: + # Add dummy "empty" PendingShardHeader (default vote if no shard header is available) next_epoch = get_current_epoch(state) + 1 next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch) committees_per_slot = get_committee_count_per_slot(state, next_epoch) + active_shards = get_active_shard_count(state, next_epoch) + for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_PER_EPOCH): - for index in range(committees_per_slot): - committee_index = CommitteeIndex(index) - shard = compute_shard_from_committee_index(state, slot, committee_index) + buffer_index = slot % SHARD_STATE_MEMORY_SLOTS + + # Reset the shard work tracking + state.shard_buffer[buffer_index] = [ShardWork() for _ in range(active_shards)] + + start_shard = get_start_shard(state, slot) + for committee_index in range(committees_per_slot): + shard = (start_shard + committee_index) % active_shards + # a committee is available, initialize a pending shard-header list committee_length = len(get_beacon_committee(state, slot, committee_index)) - state.current_epoch_pending_shard_headers.append(PendingShardHeader( - slot=slot, - shard=shard, - commitment=DataCommitment(), - root=Root(), - votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), - confirmed=False, - )) + state.shard_buffer[buffer_index][shard].change( + selector=SHARD_WORK_PENDING, + value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD]( + PendingShardHeader( + commitment=DataCommitment(), + root=Root(), + votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), + weight=0, + update_slot=slot, + ) + ) + ) + # a shard without committee available defaults to SHARD_WORK_UNCONFIRMED. ``` -#### Shard epoch increment +#### `process_shard_epoch_increment` ```python def process_shard_epoch_increment(state: BeaconState) -> None: diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 47ed52970a..51dbfd5a6a 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -17,9 +17,11 @@ - [SignedShardBlob](#signedshardblob) - [Gossip domain](#gossip-domain) - [Topics and messages](#topics-and-messages) - - [Shard blobs: `shard_blob_{subnet_id}`](#shard-blobs-shard_blob_subnet_id) - - [Shard header: `shard_header`](#shard-header-shard_header) - - [Shard proposer slashing: `shard_proposer_slashing`](#shard-proposer-slashing-shard_proposer_slashing) + - [Shard blob subnets](#shard-blob-subnets) + - [`shard_blob_{subnet_id}`](#shard_blob_subnet_id) + - [Global topics](#global-topics) + - [`shard_header`](#shard_header) + - [`shard_proposer_slashing`](#shard_proposer_slashing) @@ -28,7 +30,7 @@ ## Introduction The specification of these changes continues in the same format as the [Phase0](../phase0/p2p-interface.md) and -[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite. +[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite. The adjustments and additions for Shards are outlined in this document. ## Constants @@ -64,6 +66,7 @@ class ShardBlob(Container): # Slot and shard that this blob is intended for slot: Slot shard: Shard + # Shard data with related commitments and beacon anchor body: ShardBlobBody # Proposer of the shard-blob proposer_index: ValidatorIndex @@ -88,12 +91,16 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface. | Name | Message Type | |----------------------------------|---------------------------| | `shard_blob_{subnet_id}` | `SignedShardBlob` | -| `shard_header` | `SignedShardHeader` | +| `shard_header` | `SignedShardBlobHeader` | | `shard_proposer_slashing` | `ShardProposerSlashing` | The [DAS network specification](./das-p2p.md) defines additional topics. -#### Shard blobs: `shard_blob_{subnet_id}` +#### Shard blob subnets + +Shard blob subnets are used to propagate shard blobs to subsections of the network. + +##### `shard_blob_{subnet_id}` Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets. @@ -117,8 +124,10 @@ The following validations MUST pass before forwarding the `signed_blob` (with in (a client MAY queue future blobs for processing at the appropriate slot). - _[IGNORE]_ The `blob` is new enough to be still be processed -- i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)` +- _[REJECT]_ The shard should have a committee at slot -- + i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard)` doesn't raise an error - _[REJECT]_ The shard blob is for the correct subnet -- - i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` + i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` - _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination. - _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. - _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. @@ -129,19 +138,25 @@ The following validations MUST pass before forwarding the `signed_blob` (with in the block MAY be queued for later processing while proposers for the blob's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. +#### Global topics + +There are two additional global topics for Sharding, one is used to propagate shard blob headers (`shard_header`) to +all nodes on the network. Another one is used to propagate validator message (`shard_proposer_slashing`). -#### Shard header: `shard_header` +##### `shard_header` Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet. -The following validations MUST pass before forwarding the `signed_shard_header` (with inner `message` as `header`) on the network. +The following validations MUST pass before forwarding the `signed_shard_blob_header` (with inner `message` as `header`) on the network. - _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `header.slot <= current_slot` (a client MAY queue future headers for processing at the appropriate slot). - _[IGNORE]_ The `header` is new enough to be still be processed -- i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` - _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination. -- _[REJECT]_ The proposer signature, `signed_shard_header.signature`, is valid with respect to the `proposer_index` pubkey. +- _[REJECT]_ The shard should have a committee at slot -- + i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error +- _[REJECT]_ The proposer signature, `signed_shard_blob_header.signature`, is valid with respect to the `proposer_index` pubkey. - _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, @@ -149,7 +164,7 @@ The following validations MUST pass before forwarding the `signed_shard_header` in such a case _do not_ `REJECT`, instead `IGNORE` this message. -#### Shard proposer slashing: `shard_proposer_slashing` +##### `shard_proposer_slashing` Shard proposer slashings, in the form of `ShardProposerSlashing`, are published to the global `shard_proposer_slashing` topic. diff --git a/ssz/simple-serialize.md b/ssz/simple-serialize.md index d97b8ea1c9..4ef64f2f28 100644 --- a/ssz/simple-serialize.md +++ b/ssz/simple-serialize.md @@ -17,10 +17,10 @@ - [Serialization](#serialization) - [`uintN`](#uintn) - [`boolean`](#boolean) - - [`null`](#null) - [`Bitvector[N]`](#bitvectorn) - [`Bitlist[N]`](#bitlistn) - - [Vectors, containers, lists, unions](#vectors-containers-lists-unions) + - [Vectors, containers, lists](#vectors-containers-lists) + - [Union](#union) - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Summaries and expansions](#summaries-and-expansions) @@ -61,7 +61,7 @@ * **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits * notation `Bitlist[N]` * **union**: union type containing one of the given subtypes - * notation `Union[type_0, type_1, ...]`, e.g. `union[null, uint64]` + * notation `Union[type_0, type_1, ...]`, e.g. `union[None, uint64, uint32]` *Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due to their different serialization requirements. Similarly, both `List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally `Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization efficiencies. @@ -77,7 +77,6 @@ For convenience we alias: * `byte` to `uint8` (this is a basic type) * `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type) * `ByteList[N]` to `List[byte, N]` -* `null`: `{}` ### Default values Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types. @@ -101,7 +100,7 @@ An SSZ object is called zeroed (and thus, `is_zero(object)` returns true) if it - Empty vector types (`Vector[type, 0]`, `Bitvector[0]`) are illegal. - Containers with no fields are illegal. -- The `null` type is only legal as the first type in a union subtype (i.e. with type index zero). +- The `None` type option in a `Union` type is only legal as the first option (i.e. with index zero). ## Serialization @@ -123,12 +122,6 @@ assert value in (True, False) return b"\x01" if value is True else b"\x00" ``` -### `null` - -```python -return b"" -``` - ### `Bitvector[N]` ```python @@ -150,7 +143,7 @@ array[len(value) // 8] |= 1 << (len(value) % 8) return bytes(array) ``` -### Vectors, containers, lists, unions +### Vectors, containers, lists ```python # Recursively serialize @@ -170,19 +163,31 @@ fixed_parts = [part if part != None else variable_offsets[i] for i, part in enum return b"".join(fixed_parts + variable_parts) ``` -If `value` is a union type: +### Union + +A `value` as `Union[T...]` type has properties `value.value` with the contained value, and `value.selector` which indexes the selected `Union` type option `T`. -Define value as an object that has properties `value.value` with the contained value, and `value.type_index` which indexes the type. +A `Union`: +- May have multiple selectors with the same type. +- Should not use selectors above 127 (i.e. highest bit is set), these are reserved for backwards compatible extensions. +- Must have at least 1 type option. +- May have `None` as first type option, i.e. `selector == 0` +- Must have at least 2 type options if the first is `None` +- Is always considered a variable-length type, even if all type options have an equal fixed-length. ```python -serialized_bytes = serialize(value.value) -serialized_type_index = value.type_index.to_bytes(BYTES_PER_LENGTH_OFFSET, "little") -return serialized_type_index + serialized_bytes +if value.value is None: + assert value.selector == 0 + return b"\x00" +else: + serialized_bytes = serialize(value.value) + serialized_selector_index = value.selector.to_bytes(1, "little") + return serialized_selector_index + serialized_bytes ``` ## Deserialization -Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. +Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Deserialization can be implemented using a recursive algorithm. The deserialization of basic objects is easy, and from there we can find a simple recursive algorithm for all fixed-size objects. For variable-size objects we have to do one of the following depending on what kind of object it is: @@ -191,12 +196,14 @@ Deserialization can be implemented using a recursive algorithm. The deserializat * The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length) * Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects. * In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits. +* In the case of unions, the first byte of the deserialization scope is deserialized as type selector, the remainder of the scope is deserialized as the selected type. Note that deserialization requires hardening against invalid inputs. A non-exhaustive list: - Offsets: out of order, out of range, mismatching minimum element size. - Scope: Extra unused bytes, not aligned with element size. - More elements than a list limit allows. Part of enforcing consensus. +- An out-of-bounds selected index in an `Union` Efficient algorithms for computing this object can be found in [the implementations](#implementations). @@ -227,7 +234,7 @@ We first define helper functions: - If `1` chunk: the root is the chunk itself. - If `> 1` chunks: merkleize as binary tree. * `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`. -* `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`. +* `mix_in_selector`: Given a Merkle root `root` and a type selector `selector` (`"uint256"` little-endian serialization) return `hash(root + selector)`. We now define Merkleization `hash_tree_root(value)` of an object `value` recursively: @@ -237,7 +244,8 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi * `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist. * `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container. * `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects. -* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type. +* `mix_in_selector(hash_tree_root(value.value), value.selector)` if `value` is of union type, and `value.value` is not `None` +* `mix_in_selector(Bytes32(), 0)` if `value` is of union type, and `value.value` is `None` ## Summaries and expansions diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 270b199529..0b3e4753b8 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.1.0-alpha.6 +1.1.0-alpha.7 diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py similarity index 80% rename from tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py rename to tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py index ff388ff379..fa7f89fdcb 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py @@ -1,4 +1,3 @@ -from collections import Counter import random from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, @@ -13,6 +12,9 @@ ) from eth2spec.test.helpers.sync_committee import ( compute_aggregate_sync_committee_signature, + compute_sync_committee_participant_reward_and_penalty, + compute_sync_committee_proposer_reward, + compute_committee_indices, ) from eth2spec.test.context import ( expect_assertion_error, @@ -30,7 +32,7 @@ def run_sync_committee_processing(spec, state, block, expect_exception=False): produces a pre-state and post-state (None if exception) specifically for sync-committee processing changes. """ # process up to the sync committee work - call = run_block_processing_to(spec, state, block, 'process_sync_committee') + call = run_block_processing_to(spec, state, block, 'process_sync_aggregate') yield 'pre', state yield 'sync_aggregate', block.body.sync_aggregate if expect_exception: @@ -61,15 +63,6 @@ def get_committee_indices(spec, state, duplicates=False): state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index]) -def compute_committee_indices(spec, state, committee): - """ - Given a ``committee``, calculate and return the related indices - """ - all_pubkeys = [v.pubkey for v in state.validators] - committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys] - return committee_indices - - @with_altair_and_later @spec_state_test @always_bls @@ -115,65 +108,20 @@ def test_invalid_signature_extra_participant(spec, state): yield from run_sync_committee_processing(spec, state, block, expect_exception=True) -def compute_sync_committee_inclusion_reward(spec, - state, - participant_index, - committee_indices, - committee_bits): - total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT - total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments) - max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR) - included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit] - max_slot_rewards = spec.Gwei( - max_epoch_rewards * len(included_indices) - // len(committee_indices) // spec.SLOTS_PER_EPOCH - ) - - # Compute the participant and proposer sync rewards - committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices]) - committee_effective_balance = max(spec.EFFECTIVE_BALANCE_INCREMENT, committee_effective_balance) - effective_balance = state.validators[participant_index].effective_balance - return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance) - - -def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits): - included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit] - multiplicities = Counter(included_indices) - - inclusion_reward = compute_sync_committee_inclusion_reward( - spec, state, participant_index, committee_indices, committee_bits, - ) - return spec.Gwei(inclusion_reward * multiplicities[participant_index]) - - -def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits): - proposer_reward = 0 - for index, bit in zip(committee_indices, committee_bits): - if not bit: - continue - inclusion_reward = compute_sync_committee_inclusion_reward( - spec, state, index, committee_indices, committee_bits, - ) - proposer_reward_denominator = ( - (spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT) - * spec.WEIGHT_DENOMINATOR - // spec.PROPOSER_WEIGHT - ) - proposer_reward += spec.Gwei((inclusion_reward * spec.WEIGHT_DENOMINATOR) // proposer_reward_denominator) - return proposer_reward - - def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index): for index in range(len(post_state.validators)): reward = 0 + penalty = 0 if index in committee_indices: - reward += compute_sync_committee_participant_reward( + _reward, _penalty = compute_sync_committee_participant_reward_and_penalty( spec, pre_state, index, committee_indices, committee_bits, ) + reward += _reward + penalty += _penalty if proposer_index == index: reward += compute_sync_committee_proposer_reward( @@ -183,7 +131,7 @@ def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indic committee_bits, ) - assert post_state.balances[index] == pre_state.balances[index] + reward + assert post_state.balances[index] == pre_state.balances[index] + reward - penalty def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits): diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py new file mode 100644 index 0000000000..82acba3224 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py @@ -0,0 +1,115 @@ +from random import Random + +from eth2spec.test.helpers.constants import MINIMAL +from eth2spec.test.context import ( + with_altair_and_later, + with_custom_state, + spec_test, spec_state_test, + with_presets, + single_phase, +) +from eth2spec.test.helpers.state import next_epoch_via_block +from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with + + +def run_process_participation_flag_updates(spec, state): + old = state.current_epoch_participation.copy() + yield from run_epoch_processing_with(spec, state, 'process_participation_flag_updates') + assert state.current_epoch_participation == [0] * len(state.validators) + assert state.previous_epoch_participation == old + + +@with_altair_and_later +@spec_state_test +def test_all_zeroed(spec, state): + next_epoch_via_block(spec, state) + state.current_epoch_participation = [0] * len(state.validators) + state.previous_epoch_participation = [0] * len(state.validators) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@spec_state_test +def test_filled(spec, state): + next_epoch_via_block(spec, state) + + full_flags = spec.ParticipationFlags(0) + for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)): + full_flags = spec.add_flag(full_flags, flag_index) + + state.previous_epoch_participation = [full_flags] * len(state.validators) + state.current_epoch_participation = [full_flags] * len(state.validators) + + yield from run_process_participation_flag_updates(spec, state) + + +def random_flags(spec, state, seed: int, previous=True, current=True): + rng = Random(seed) + count = len(state.validators) + max_flag_value_excl = 2**len(spec.PARTICIPATION_FLAG_WEIGHTS) + if previous: + state.previous_epoch_participation = [rng.randrange(0, max_flag_value_excl) for _ in range(count)] + if current: + state.current_epoch_participation = [rng.randrange(0, max_flag_value_excl) for _ in range(count)] + + +@with_altair_and_later +@spec_state_test +def test_random(spec, state): + next_epoch_via_block(spec, state) + random_flags(spec, state, 10) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@spec_state_test +def test_random_genesis(spec, state): + random_flags(spec, state, 11) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@spec_state_test +def test_current_epoch_zeroed(spec, state): + next_epoch_via_block(spec, state) + random_flags(spec, state, 12, current=False) + state.current_epoch_participation = [0] * len(state.validators) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@spec_state_test +def test_previous_epoch_zeroed(spec, state): + next_epoch_via_block(spec, state) + random_flags(spec, state, 13, previous=False) + state.previous_epoch_participation = [0] * len(state.validators) + yield from run_process_participation_flag_updates(spec, state) + + +def custom_validator_count(factor: float): + def initializer(spec): + num_validators = spec.SLOTS_PER_EPOCH * spec.MAX_COMMITTEES_PER_SLOT * spec.TARGET_COMMITTEE_SIZE + return [spec.MAX_EFFECTIVE_BALANCE] * int(float(int(num_validators)) * factor) + return initializer + + +@with_altair_and_later +@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys") +@spec_test +@with_custom_state(balances_fn=custom_validator_count(1.3), threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@single_phase +def test_slightly_larger_random(spec, state): + next_epoch_via_block(spec, state) + random_flags(spec, state, 14) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys") +@spec_test +@with_custom_state(balances_fn=custom_validator_count(2.6), threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@single_phase +def test_large_random(spec, state): + next_epoch_via_block(spec, state) + random_flags(spec, state, 15) + yield from run_process_participation_flag_updates(spec, state) diff --git a/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py b/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py index 20e13b9bab..62740df4ed 100644 --- a/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py +++ b/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py @@ -1,7 +1,7 @@ import random from eth2spec.test.context import fork_transition_test from eth2spec.test.helpers.constants import PHASE0, ALTAIR -from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot, next_epoch_via_block +from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot, next_epoch_via_signed_block from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block from eth2spec.test.helpers.attestations import next_slots_with_attestations @@ -261,12 +261,12 @@ def _run_transition_test_with_attestations(state, assert current_epoch == spec.GENESIS_EPOCH # skip genesis epoch to avoid dealing with some edge cases... - block = next_epoch_via_block(spec, state) + block = next_epoch_via_signed_block(spec, state) # regular state transition until fork: fill_cur_epoch = False fill_prev_epoch = True - blocks = [pre_tag(sign_block(spec, state, block))] + blocks = [pre_tag(block)] current_epoch = spec.get_current_epoch(state) for _ in range(current_epoch, fork_epoch - 1): _, blocks_in_epoch, state = next_slots_with_attestations( @@ -414,8 +414,8 @@ def test_transition_with_no_attestations_until_after_fork(state, fork_epoch, spe # continue regular state transition but add attestations # for enough epochs to finalize the ``fork_epoch`` - block = next_epoch_via_block(post_spec, state) - blocks.append(post_tag(sign_block(post_spec, state, block))) + block = next_epoch_via_signed_block(post_spec, state) + blocks.append(post_tag(block)) for _ in range(4): _, blocks_in_epoch, state = next_slots_with_attestations( post_spec, diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/__init__.py b/tests/core/pyspec/eth2spec/test/altair/unittests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py new file mode 100644 index 0000000000..4443f97e0b --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py @@ -0,0 +1,23 @@ +from eth2spec.test.context import ( + spec_state_test, + with_phases, +) +from eth2spec.test.helpers.constants import ALTAIR + + +@with_phases([ALTAIR]) +@spec_state_test +def test_weight_denominator(spec, state): + assert ( + spec.TIMELY_HEAD_WEIGHT + + spec.TIMELY_SOURCE_WEIGHT + + spec.TIMELY_TARGET_WEIGHT + + spec.SYNC_REWARD_WEIGHT + + spec.PROPOSER_WEIGHT + ) == spec.WEIGHT_DENOMINATOR + + +@with_phases([ALTAIR]) +@spec_state_test +def test_inactivity_score(spec, state): + assert spec.config.INACTIVITY_SCORE_BIAS <= spec.config.INACTIVITY_SCORE_RECOVERY_RATE diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py index dfe90b5b5d..048e5f43db 100644 --- a/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py @@ -4,7 +4,7 @@ from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.state import transition_to -from eth2spec.utils import bls +from eth2spec.test.helpers.sync_committee import compute_sync_committee_signature from eth2spec.utils.bls import only_with_bls from eth2spec.test.context import ( with_altair_and_later, @@ -85,12 +85,9 @@ def _get_sync_committee_signature( pubkey = state.current_sync_committee.pubkeys[sync_committee_index] privkey = pubkey_to_privkey[pubkey] - domain = spec.get_domain( - state, - spec.DOMAIN_SYNC_COMMITTEE, + return compute_sync_committee_signature( + spec, state, target_slot, privkey, block_root=target_block_root ) - signing_data = spec.compute_signing_root(target_block_root, domain) - return bls.Sign(privkey, spec.hash_tree_root(signing_data)) @only_with_bls() diff --git a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py index 707ac0b2ec..4ed3f50885 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py @@ -7,8 +7,7 @@ from eth2spec.test.helpers.state import transition_to from eth2spec.test.helpers.attestations import ( run_attestation_processing, - get_valid_late_attestation, - get_valid_on_time_attestation, + get_valid_attestation, ) @@ -16,7 +15,7 @@ @spec_state_test @always_bls def test_on_time_success(spec, state): - attestation = get_valid_on_time_attestation(spec, state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -27,7 +26,7 @@ def test_on_time_success(spec, state): @spec_state_test @always_bls def test_late_success(spec, state): - attestation = get_valid_late_attestation(spec, state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY + 1) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py index 87f0238fb9..cc12b66f5e 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py @@ -4,7 +4,7 @@ get_sample_shard_transition, ) from eth2spec.test.helpers.attestations import ( - get_valid_on_time_attestation, + get_valid_attestation, ) from eth2spec.test.helpers.constants import ( CUSTODY_GAME, @@ -80,8 +80,8 @@ def test_challenge_appended(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -104,8 +104,8 @@ def test_challenge_empty_element_replaced(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -130,8 +130,8 @@ def test_duplicate_challenge(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -156,8 +156,8 @@ def test_second_challenge(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -185,8 +185,8 @@ def test_multiple_epochs_custody(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -210,8 +210,8 @@ def test_many_epochs_custody(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -235,8 +235,8 @@ def test_off_chain_attestation(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) @@ -256,8 +256,8 @@ def test_custody_response(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -287,8 +287,8 @@ def test_custody_response_chunk_index_2(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -319,8 +319,8 @@ def test_custody_response_multiple_epochs(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -351,8 +351,8 @@ def test_custody_response_many_epochs(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py index 7ee5cd394c..4891c7b236 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py @@ -3,7 +3,7 @@ get_custody_slashable_shard_transition, ) from eth2spec.test.helpers.attestations import ( - get_valid_on_time_attestation, + get_valid_attestation, ) from eth2spec.test.helpers.constants import ( CUSTODY_GAME, @@ -96,8 +96,8 @@ def run_standard_custody_slashing_test(spec, slashable=correct, ) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py index 7332dcc80d..144ea02135 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py @@ -3,7 +3,7 @@ get_sample_shard_transition, ) from eth2spec.test.helpers.attestations import ( - get_valid_on_time_attestation, + get_valid_attestation, ) from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( @@ -36,8 +36,8 @@ def test_validator_slashed_after_chunk_challenge(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py index 92c311a29e..d8dd3d19e8 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py @@ -8,7 +8,7 @@ get_sample_shard_transition ) from eth2spec.test.helpers.attestations import ( - get_valid_on_time_attestation, + get_valid_attestation, ) from eth2spec.test.helpers.state import next_epoch_via_block, transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( @@ -77,8 +77,8 @@ def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -126,8 +126,8 @@ def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py index f242c361b0..77ce3c5add 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py @@ -5,7 +5,7 @@ spec_state_test, with_presets, ) -from eth2spec.test.helpers.attestations import get_valid_on_time_attestation +from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.constants import ( CUSTODY_GAME, @@ -60,7 +60,7 @@ def test_with_shard_transition_with_custody_challenge_and_response(spec, state): shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True) shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]} shard_transitions = get_shard_transitions(spec, state, shard_block_dict) - attestation = get_valid_on_time_attestation( + attestation = get_valid_attestation( spec, state, index=committee_index, shard_transition=shard_transitions[shard], signed=True, ) @@ -127,7 +127,7 @@ def test_custody_slashing(spec, state): shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]} shard_transitions = get_shard_transitions(spec, state, shard_block_dict) - attestation = get_valid_on_time_attestation( + attestation = get_valid_attestation( spec, state, index=committee_index, shard_transition=shard_transitions[shard], signed=True, ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index b55aff9e51..c92860ffa7 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -50,7 +50,7 @@ def run_attestation_processing(spec, state, attestation, valid=True): yield 'post', state -def build_attestation_data(spec, state, slot, index, shard=None, on_time=True): +def build_attestation_data(spec, state, slot, index, shard=None): assert state.slot >= slot if slot == state.slot: @@ -85,45 +85,12 @@ def build_attestation_data(spec, state, slot, index, shard=None, on_time=True): return data -def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=False): - ''' - Construct on-time attestation for next slot - ''' - if slot is None: - slot = state.slot - if index is None: - index = 0 - - return get_valid_attestation( - spec, - state, - slot=slot, - index=index, - signed=signed, - on_time=True, - ) - - -def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False): - ''' - Construct on-time attestation for next slot - ''' - if slot is None: - slot = state.slot - if index is None: - index = 0 - - return get_valid_attestation(spec, state, slot=slot, index=index, - signed=signed, on_time=False) - - def get_valid_attestation(spec, state, slot=None, index=None, filter_participant_set=None, - signed=False, - on_time=True): + signed=False): # If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed. # Thus strictly speaking invalid when no participant is added later. if slot is None: @@ -132,7 +99,7 @@ def get_valid_attestation(spec, index = 0 attestation_data = build_attestation_data( - spec, state, slot=slot, index=index, on_time=on_time + spec, state, slot=slot, index=index ) beacon_committee = spec.get_beacon_committee( @@ -219,7 +186,7 @@ def add_attestations_to_state(spec, state, attestations, slot): spec.process_attestation(state, attestation) -def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None, on_time=True): +def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None): committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest)) for index in range(committees_per_slot): def participants_filter(comm): @@ -234,7 +201,6 @@ def participants_filter(comm): slot_to_attest, index=index, signed=True, - on_time=on_time, filter_participant_set=participants_filter ) @@ -269,7 +235,6 @@ def next_slots_with_attestations(spec, post_state, spec, slot_to_attest, - on_time=False, participation_fn=participation_fn ) for attestation in attestations: diff --git a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py index 676d8cb6d8..d2ec4a1115 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py @@ -25,8 +25,8 @@ def get_process_calls(spec): 'process_voluntary_exit': lambda state, block: for_ops(state, block.body.voluntary_exits, spec.process_voluntary_exit), # Altair - 'process_sync_committee': - lambda state, block: spec.process_sync_committee(state, block.body.sync_aggregate), + 'process_sync_aggregate': + lambda state, block: spec.process_sync_aggregate(state, block.body.sync_aggregate), # Merge 'process_application_payload': lambda state, block: spec.process_application_payload(state, block.body), diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py index 4e98845c47..8f116dc3d7 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/constants.py +++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py @@ -32,3 +32,9 @@ MINIMAL = PresetBaseName('minimal') ALL_PRESETS = (MINIMAL, MAINNET) + + +# +# Number +# +MAX_UINT_64 = 2**64 - 1 diff --git a/tests/core/pyspec/eth2spec/test/helpers/merge/__init__.py b/tests/core/pyspec/eth2spec/test/helpers/merge/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py b/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py new file mode 100644 index 0000000000..9b7f893663 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py @@ -0,0 +1,45 @@ +MERGE_FORK_TEST_META_TAGS = { + 'fork': 'merge', +} + + +def run_fork_test(post_spec, pre_state): + # Clean up state to be more realistic + pre_state.current_epoch_attestations = [] + + yield 'pre', pre_state + + post_state = post_spec.upgrade_to_merge(pre_state) + + # Stable fields + stable_fields = [ + 'genesis_time', 'genesis_validators_root', 'slot', + # History + 'latest_block_header', 'block_roots', 'state_roots', 'historical_roots', + # Eth1 + 'eth1_data', 'eth1_data_votes', 'eth1_deposit_index', + # Registry + 'validators', 'balances', + # Randomness + 'randao_mixes', + # Slashings + 'slashings', + # Attestations + 'previous_epoch_attestations', 'current_epoch_attestations', + # Finality + 'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint', + ] + for field in stable_fields: + assert getattr(pre_state, field) == getattr(post_state, field) + + # Modified fields + modified_fields = ['fork'] + for field in modified_fields: + assert getattr(pre_state, field) != getattr(post_state, field) + + assert pre_state.fork.current_version == post_state.fork.previous_version + assert post_state.fork.current_version == post_spec.config.MERGE_FORK_VERSION + assert post_state.fork.epoch == post_spec.get_current_epoch(post_state) + assert post_state.latest_execution_payload_header == post_spec.ExecutionPayloadHeader() + + yield 'post', post_state diff --git a/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py b/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py index d3520e580f..a783d2517d 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py +++ b/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py @@ -2,6 +2,10 @@ from eth2spec.test.helpers.block_header import sign_block_header from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.state import get_balance +from eth2spec.test.helpers.sync_committee import ( + compute_committee_indices, + compute_sync_committee_participant_reward_and_penalty, +) def get_min_slashing_penalty_quotient(spec): @@ -11,7 +15,7 @@ def get_min_slashing_penalty_quotient(spec): return spec.MIN_SLASHING_PENALTY_QUOTIENT -def check_proposer_slashing_effect(spec, pre_state, state, slashed_index): +def check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block=None): slashed_validator = state.validators[slashed_index] assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH @@ -20,24 +24,51 @@ def check_proposer_slashing_effect(spec, pre_state, state, slashed_index): proposer_index = spec.get_beacon_proposer_index(state) slash_penalty = state.validators[slashed_index].effective_balance // get_min_slashing_penalty_quotient(spec) whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT + + # Altair introduces sync committee (SC) reward and penalty + sc_reward_for_slashed = sc_penalty_for_slashed = sc_reward_for_proposer = sc_penalty_for_proposer = 0 + if is_post_altair(spec) and block is not None: + committee_indices = compute_committee_indices(spec, state, state.current_sync_committee) + committee_bits = block.body.sync_aggregate.sync_committee_bits + sc_reward_for_slashed, sc_penalty_for_slashed = compute_sync_committee_participant_reward_and_penalty( + spec, + pre_state, + slashed_index, + committee_indices, + committee_bits, + ) + sc_reward_for_proposer, sc_penalty_for_proposer = compute_sync_committee_participant_reward_and_penalty( + spec, + pre_state, + proposer_index, + committee_indices, + committee_bits, + ) + if proposer_index != slashed_index: # slashed validator lost initial slash penalty assert ( get_balance(state, slashed_index) - == get_balance(pre_state, slashed_index) - slash_penalty + == get_balance(pre_state, slashed_index) - slash_penalty + sc_reward_for_slashed - sc_penalty_for_slashed ) # block proposer gained whistleblower reward # >= because proposer could have reported multiple assert ( get_balance(state, proposer_index) - >= get_balance(pre_state, proposer_index) + whistleblower_reward + >= ( + get_balance(pre_state, proposer_index) + whistleblower_reward + + sc_reward_for_proposer - sc_penalty_for_proposer + ) ) else: # proposer reported themself so get penalty and reward # >= because proposer could have reported multiple assert ( get_balance(state, slashed_index) - >= get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward + >= ( + get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward + + sc_reward_for_slashed - sc_penalty_for_slashed + ) ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/state.py b/tests/core/pyspec/eth2spec/test/helpers/state.py index ef09c6e07a..05f0e9013a 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/state.py +++ b/tests/core/pyspec/eth2spec/test/helpers/state.py @@ -58,11 +58,19 @@ def next_epoch(spec, state): spec.process_slots(state, slot) -def next_epoch_via_block(spec, state): +def next_epoch_via_block(spec, state, insert_state_root=False): """ Transition to the start slot of the next epoch via a full block transition """ - return apply_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) + block = apply_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) + if insert_state_root: + block.state_root = state.hash_tree_root() + return block + + +def next_epoch_via_signed_block(spec, state): + block = next_epoch_via_block(spec, state, insert_state_root=True) + return sign_block(spec, state, block) def get_state_root(spec, state, slot) -> bytes: diff --git a/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py b/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py index da85fad606..fa753db527 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py +++ b/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py @@ -1,3 +1,5 @@ +from collections import Counter + from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, @@ -33,3 +35,42 @@ def compute_aggregate_sync_committee_signature(spec, state, slot, participants, ) ) return bls.Aggregate(signatures) + + +def compute_sync_committee_inclusion_reward(spec, state): + total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT + total_base_rewards = spec.get_base_reward_per_increment(state) * total_active_increments + max_participant_rewards = (total_base_rewards * spec.SYNC_REWARD_WEIGHT + // spec.WEIGHT_DENOMINATOR // spec.SLOTS_PER_EPOCH) + return max_participant_rewards // spec.SYNC_COMMITTEE_SIZE + + +def compute_sync_committee_participant_reward_and_penalty( + spec, state, participant_index, committee_indices, committee_bits): + inclusion_reward = compute_sync_committee_inclusion_reward(spec, state) + + included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit] + not_included_indices = [index for index, bit in zip(committee_indices, committee_bits) if not bit] + included_multiplicities = Counter(included_indices) + not_included_multiplicities = Counter(not_included_indices) + return ( + spec.Gwei(inclusion_reward * included_multiplicities[participant_index]), + spec.Gwei(inclusion_reward * not_included_multiplicities[participant_index]) + ) + + +def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits): + proposer_reward_denominator = spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT + inclusion_reward = compute_sync_committee_inclusion_reward(spec, state) + participant_number = committee_bits.count(True) + participant_reward = inclusion_reward * spec.PROPOSER_WEIGHT // proposer_reward_denominator + return spec.Gwei(participant_reward * participant_number) + + +def compute_committee_indices(spec, state, committee): + """ + Given a ``committee``, calculate and return the related indices + """ + all_pubkeys = [v.pubkey for v in state.validators] + committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys] + return committee_indices diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/__init__.py b/tests/core/pyspec/eth2spec/test/merge/fork/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py new file mode 100644 index 0000000000..066a656a82 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py @@ -0,0 +1,82 @@ +from eth2spec.test.context import ( + with_phases, + with_custom_state, + with_presets, + spec_test, with_state, + low_balances, misc_balances, large_validator_set, +) +from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.constants import ( + PHASE0, MERGE, + MINIMAL, +) +from eth2spec.test.helpers.state import ( + next_epoch, + next_epoch_via_block, +) +from eth2spec.test.helpers.merge.fork import ( + MERGE_FORK_TEST_META_TAGS, + run_fork_test, +) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_base_state(spec, phases, state): + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_next_epoch(spec, phases, state): + next_epoch(spec, state) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_next_epoch_with_block(spec, phases, state): + next_epoch_via_block(spec, state) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_many_next_epoch(spec, phases, state): + for _ in range(3): + next_epoch(spec, state) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@spec_test +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_random_low_balances(spec, phases, state): + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@spec_test +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_random_misc_balances(spec, phases, state): + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_presets([MINIMAL], + reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") +@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@spec_test +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_random_large_validator_set(spec, phases, state): + yield from run_fork_test(phases[MERGE], state) diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py new file mode 100644 index 0000000000..d790acd3a4 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py @@ -0,0 +1,120 @@ +from random import Random + +from eth2spec.test.context import ( + with_phases, + with_custom_state, + with_presets, + spec_test, with_state, + low_balances, misc_balances, large_validator_set, +) +from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.constants import ( + PHASE0, MERGE, + MINIMAL, +) +from eth2spec.test.helpers.merge.fork import ( + MERGE_FORK_TEST_META_TAGS, + run_fork_test, +) +from eth2spec.test.helpers.random import ( + randomize_state, + randomize_attestation_participation, +) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_0(spec, phases, state): + randomize_state(spec, state, rng=Random(1010)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_1(spec, phases, state): + randomize_state(spec, state, rng=Random(2020)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_2(spec, phases, state): + randomize_state(spec, state, rng=Random(3030)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_3(spec, phases, state): + randomize_state(spec, state, rng=Random(4040)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_duplicate_attestations(spec, phases, state): + randomize_state(spec, state, rng=Random(1111)) + # Note: `run_fork_test` empties `current_epoch_attestations` + state.previous_epoch_attestations = state.previous_epoch_attestations + state.previous_epoch_attestations + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_mismatched_attestations(spec, phases, state): + # Create a random state + randomize_state(spec, state, rng=Random(2222)) + + # Now make two copies + state_0 = state.copy() + state_1 = state.copy() + + # Randomize attestation participation of both + randomize_attestation_participation(spec, state_0, rng=Random(3333)) + randomize_attestation_participation(spec, state_1, rng=Random(4444)) + + # Note: `run_fork_test` empties `current_epoch_attestations` + # Use pending attestations from both random states in a single state for testing + state_0.previous_epoch_attestations = state_0.previous_epoch_attestations + state_1.previous_epoch_attestations + yield from run_fork_test(phases[MERGE], state_0) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_low_balances(spec, phases, state): + randomize_state(spec, state, rng=Random(5050)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_misc_balances(spec, phases, state): + randomize_state(spec, state, rng=Random(6060)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_presets([MINIMAL], + reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") +@spec_test +@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_large_validator_set(spec, phases, state): + randomize_state(spec, state, rng=Random(7070)) + yield from run_fork_test(phases[MERGE], state) diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py index 38a050ebcd..c303200667 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py @@ -45,7 +45,7 @@ def test_success_multi_proposer_index_iterations(spec, state): @with_all_phases @spec_state_test def test_success_previous_epoch(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) next_epoch_via_block(spec, state) yield from run_attestation_processing(spec, state, attestation) @@ -96,7 +96,7 @@ def test_before_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_after_epoch_slots(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) # increment past latest inclusion slot transition_to_slot_via_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH + 1) @@ -197,7 +197,7 @@ def test_mismatched_target_and_slot(spec, state): next_epoch_via_block(spec, state) next_epoch_via_block(spec, state) - attestation = get_valid_attestation(spec, state, on_time=False) + attestation = get_valid_attestation(spec, state) attestation.data.slot = attestation.data.slot - spec.SLOTS_PER_EPOCH sign_attestation(spec, state, attestation) @@ -210,7 +210,7 @@ def test_mismatched_target_and_slot(spec, state): def test_old_target_epoch(spec, state): assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2 - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2) # target epoch will be too old to handle @@ -275,7 +275,7 @@ def test_invalid_current_source_root(spec, state): state.previous_justified_checkpoint = spec.Checkpoint(epoch=3, root=b'\x01' * 32) state.current_justified_checkpoint = spec.Checkpoint(epoch=4, root=b'\x32' * 32) - attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1, on_time=False) + attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) # Test logic sanity checks: @@ -348,7 +348,7 @@ def test_correct_min_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_correct_sqrt_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH)) yield from run_attestation_processing(spec, state, attestation) @@ -357,7 +357,7 @@ def test_correct_sqrt_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_correct_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) next_slots(spec, state, spec.SLOTS_PER_EPOCH) yield from run_attestation_processing(spec, state, attestation) @@ -366,7 +366,7 @@ def test_correct_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_correct_after_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) # increment past latest inclusion slot next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1) @@ -393,7 +393,7 @@ def test_incorrect_head_min_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_sqrt_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH)) attestation.data.beacon_block_root = b'\x42' * 32 @@ -405,7 +405,7 @@ def test_incorrect_head_sqrt_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.SLOTS_PER_EPOCH) attestation.data.beacon_block_root = b'\x42' * 32 @@ -417,7 +417,7 @@ def test_incorrect_head_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_after_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) # increment past latest inclusion slot next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1) @@ -448,7 +448,7 @@ def test_incorrect_head_and_target_min_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH)) attestation.data.beacon_block_root = b'\x42' * 32 @@ -461,7 +461,7 @@ def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_and_target_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.SLOTS_PER_EPOCH) attestation.data.beacon_block_root = b'\x42' * 32 @@ -474,7 +474,7 @@ def test_incorrect_head_and_target_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_and_target_after_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) # increment past latest inclusion slot next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1) @@ -504,7 +504,7 @@ def test_incorrect_target_min_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_target_sqrt_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH)) attestation.data.target.root = b'\x42' * 32 @@ -516,7 +516,7 @@ def test_incorrect_target_sqrt_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_target_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.SLOTS_PER_EPOCH) attestation.data.target.root = b'\x42' * 32 @@ -528,7 +528,7 @@ def test_incorrect_target_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_target_after_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) # increment past latest inclusion slot next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1) diff --git a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py index 0e22e75b85..33e9854b2e 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py @@ -24,6 +24,10 @@ run_slash_and_exit, run_test_full_random_operations, ) +from eth2spec.test.helpers.sync_committee import ( + compute_committee_indices, + compute_sync_committee_participant_reward_and_penalty, +) from eth2spec.test.helpers.constants import PHASE0, MINIMAL from eth2spec.test.context import ( spec_test, spec_state_test, dump_skipping_message, @@ -416,7 +420,7 @@ def test_proposer_slashing(spec, state): yield 'blocks', [signed_block] yield 'post', state - check_proposer_slashing_effect(spec, pre_state, state, slashed_index) + check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block) @with_all_phases @@ -491,7 +495,7 @@ def test_multiple_different_proposer_slashings_same_block(spec, state): for proposer_slashing in proposer_slashings: slashed_index = proposer_slashing.signed_header_1.message.proposer_index - check_proposer_slashing_effect(spec, pre_state, state, slashed_index) + check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block) def check_attester_slashing_effect(spec, pre_state, state, slashed_indices): @@ -743,7 +747,8 @@ def test_deposit_top_up(spec, state): initial_balances_len = len(state.balances) validator_pre_balance = get_balance(state, validator_index) - yield 'pre', state + pre_state = state.copy() + yield 'pre', pre_state block = build_empty_block_for_next_slot(spec, state) block.body.deposits.append(deposit) @@ -755,7 +760,23 @@ def test_deposit_top_up(spec, state): assert len(state.validators) == initial_registry_len assert len(state.balances) == initial_balances_len - assert get_balance(state, validator_index) == validator_pre_balance + amount + + # Altair introduces sync committee (sm) reward and penalty + sync_committee_reward = sync_committee_penalty = 0 + if is_post_altair(spec): + committee_indices = compute_committee_indices(spec, state, state.current_sync_committee) + committee_bits = block.body.sync_aggregate.sync_committee_bits + sync_committee_reward, sync_committee_penalty = compute_sync_committee_participant_reward_and_penalty( + spec, + pre_state, + validator_index, + committee_indices, + committee_bits, + ) + + assert get_balance(state, validator_index) == ( + validator_pre_balance + amount + sync_committee_reward - sync_committee_penalty + ) @with_all_phases @@ -771,7 +792,7 @@ def test_attestation(spec, state): # if spec.fork == SHARDING: # TODO add shard data to block to vote on - attestation = get_valid_attestation(spec, state, index=index, signed=True, on_time=True) + attestation = get_valid_attestation(spec, state, index=index, signed=True) if not is_post_altair(spec): pre_current_attestations_len = len(state.current_epoch_attestations) diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/__init__.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py new file mode 100644 index 0000000000..b39b011b4f --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py @@ -0,0 +1,74 @@ +from eth2spec.test.context import ( + spec_state_test, + with_all_phases, + is_post_altair, +) +from eth2spec.test.helpers.constants import MAX_UINT_64 + + +def check_bound(value, lower_bound, upper_bound): + assert value >= lower_bound + assert value <= upper_bound + + +@with_all_phases +@spec_state_test +def test_validators(spec, state): + check_bound(spec.VALIDATOR_REGISTRY_LIMIT, 1, MAX_UINT_64) + check_bound(spec.MAX_COMMITTEES_PER_SLOT, 1, MAX_UINT_64) + check_bound(spec.TARGET_COMMITTEE_SIZE, 1, MAX_UINT_64) + + # Note: can be less if you assume stricters bounds on validator set based on total ETH supply + maximum_validators_per_committee = ( + spec.VALIDATOR_REGISTRY_LIMIT + // spec.SLOTS_PER_EPOCH + // spec.MAX_COMMITTEES_PER_SLOT + ) + check_bound(spec.MAX_VALIDATORS_PER_COMMITTEE, 1, maximum_validators_per_committee) + check_bound(spec.config.MIN_PER_EPOCH_CHURN_LIMIT, 1, spec.VALIDATOR_REGISTRY_LIMIT) + check_bound(spec.config.CHURN_LIMIT_QUOTIENT, 1, spec.VALIDATOR_REGISTRY_LIMIT) + + check_bound(spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT, spec.TARGET_COMMITTEE_SIZE, MAX_UINT_64) + + +@with_all_phases +@spec_state_test +def test_balances(spec, state): + assert spec.MAX_EFFECTIVE_BALANCE % spec.EFFECTIVE_BALANCE_INCREMENT == 0 + check_bound(spec.MIN_DEPOSIT_AMOUNT, 1, MAX_UINT_64) + check_bound(spec.MAX_EFFECTIVE_BALANCE, spec.MIN_DEPOSIT_AMOUNT, MAX_UINT_64) + check_bound(spec.MAX_EFFECTIVE_BALANCE, spec.EFFECTIVE_BALANCE_INCREMENT, MAX_UINT_64) + + +@with_all_phases +@spec_state_test +def test_hysteresis_quotient(spec, state): + check_bound(spec.HYSTERESIS_QUOTIENT, 1, MAX_UINT_64) + check_bound(spec.HYSTERESIS_DOWNWARD_MULTIPLIER, 1, spec.HYSTERESIS_QUOTIENT) + check_bound(spec.HYSTERESIS_UPWARD_MULTIPLIER, spec.HYSTERESIS_QUOTIENT, MAX_UINT_64) + + +@with_all_phases +@spec_state_test +def test_incentives(spec, state): + # Ensure no ETH is minted in slash_validator + if is_post_altair(spec): + assert spec.MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR <= spec.WHISTLEBLOWER_REWARD_QUOTIENT + else: + assert spec.MIN_SLASHING_PENALTY_QUOTIENT <= spec.WHISTLEBLOWER_REWARD_QUOTIENT + + +@with_all_phases +@spec_state_test +def test_time(spec, state): + assert spec.SLOTS_PER_EPOCH <= spec.SLOTS_PER_HISTORICAL_ROOT + assert spec.MIN_SEED_LOOKAHEAD < spec.MAX_SEED_LOOKAHEAD + assert spec.SLOTS_PER_HISTORICAL_ROOT % spec.SLOTS_PER_EPOCH == 0 + check_bound(spec.SLOTS_PER_HISTORICAL_ROOT, spec.SLOTS_PER_EPOCH, MAX_UINT_64) + check_bound(spec.MIN_ATTESTATION_INCLUSION_DELAY, 1, spec.SLOTS_PER_EPOCH) + + +@with_all_phases +@spec_state_test +def test_networking(spec, state): + assert spec.RANDOM_SUBNETS_PER_VALIDATOR <= spec.ATTESTATION_SUBNET_COUNT diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/__init__.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py b/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py index 9b18f8bdae..5a1b61d0be 100644 --- a/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py +++ b/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -2,6 +2,7 @@ # Ignore linter: This module makes importing SSZ types easy, and hides away the underlying library from the spec. from remerkleable.complex import Container, Vector, List +from remerkleable.union import Union from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256 from remerkleable.bitfields import Bitvector, Bitlist from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList diff --git a/tests/formats/epoch_processing/README.md b/tests/formats/epoch_processing/README.md index d9abcaf98b..1032026a63 100644 --- a/tests/formats/epoch_processing/README.md +++ b/tests/formats/epoch_processing/README.md @@ -33,7 +33,7 @@ The provided pre-state is already transitioned to just before the specific sub-t Sub-transitions: - `justification_and_finalization` -- `inactivity_penalty_updates` +- `inactivity_updates` (Altair) - `rewards_and_penalties` - `registry_updates` - `slashings` @@ -42,7 +42,8 @@ Sub-transitions: - `slashings_reset` - `randao_mixes_reset` - `historical_roots_update` -- `participation_record_updates` -- `sync_committee_updates` +- `participation_record_updates` (Phase 0 only) +- `participation_flag_updates` (Altair) +- `sync_committee_updates` (Altair) The resulting state should match the expected `post` state. diff --git a/tests/formats/forks/README.md b/tests/formats/forks/README.md index 36ce942d78..1d3b18d0d8 100644 --- a/tests/formats/forks/README.md +++ b/tests/formats/forks/README.md @@ -23,6 +23,7 @@ Key of valid `fork` strings that might be found in `meta.yaml` | String ID | Pre-fork | Post-fork | Function | | - | - | - | - | | `altair` | Phase 0 | Altair | `upgrade_to_altair` | +| `merge` | Phase 0 | Merge | `upgrade_to_merge` | ### `pre.ssz_snappy` diff --git a/tests/formats/operations/README.md b/tests/formats/operations/README.md index f562a6f2aa..c69d798d77 100644 --- a/tests/formats/operations/README.md +++ b/tests/formats/operations/README.md @@ -41,7 +41,7 @@ Operations: | `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` | | `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` | | `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` | -| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) | +| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_aggregate(state, sync_aggregate)` (new in Altair) | | `execution_payload` | `ExecutionPayload` | `execution_payload` | `process_execution_payload(state, execution_payload)` (new in Merge) | Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here. diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py index a3d0f82be7..7203bac936 100644 --- a/tests/generators/epoch_processing/main.py +++ b/tests/generators/epoch_processing/main.py @@ -17,6 +17,8 @@ ]} altair_mods = { **{key: 'eth2spec.test.altair.epoch_processing.test_process_' + key for key in [ + 'inactivity_updates', + 'participation_flag_updates', 'sync_committee_updates', ]}, **phase_0_mods, diff --git a/tests/generators/operations/main.py b/tests/generators/operations/main.py index 554d0b30ad..57fc6dd967 100644 --- a/tests/generators/operations/main.py +++ b/tests/generators/operations/main.py @@ -13,7 +13,7 @@ ]} altair_mods = { **{key: 'eth2spec.test.altair.block_processing.test_process_' + key for key in [ - 'sync_committee', + 'sync_aggregate', ]}, **phase_0_mods, } # also run the previous phase 0 tests