From 4c02dbadd00fb726672d4f346e723e0b4368ffc0 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 22 May 2021 18:43:29 +0200 Subject: [PATCH 01/82] enable inactivity updates test generator output, implement new participation flag updates testing --- ...test_process_participation_flag_updates.py | 110 ++++++++++++++++++ tests/generators/epoch_processing/main.py | 2 + 2 files changed, 112 insertions(+) create mode 100644 tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py new file mode 100644 index 0000000000..b98713bfe0 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py @@ -0,0 +1,110 @@ +from random import Random + +from eth2spec.test.helpers.constants import MINIMAL +from eth2spec.test.context import ( + with_altair_and_later, + with_custom_state, + spec_test, spec_state_test, + with_presets, + single_phase, +) +from eth2spec.test.helpers.state import next_epoch_via_block +from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with + + +def run_process_participation_flag_updates(spec, state): + yield from run_epoch_processing_with(spec, state, 'process_participation_flag_updates') + + +@with_altair_and_later +@spec_state_test +def test_zeroed(spec, state): + next_epoch_via_block(spec, state) + state.current_epoch_participation = [0] * len(state.validators) + state.previous_epoch_participation = [0] * len(state.validators) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@spec_state_test +def test_filled(spec, state): + next_epoch_via_block(spec, state) + + full_flags = spec.ParticipationFlags(0) + for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)): + full_flags = spec.add_flag(full_flags, flag_index) + + state.previous_epoch_participation = [full_flags] * len(state.validators) + state.current_epoch_participation = [full_flags] * len(state.validators) + + yield from run_process_participation_flag_updates(spec, state) + + +def random_flags(spec, state, seed: int, previous=True, current=True): + rng = Random(seed) + count = len(state.validators) + max_flag_value_excl = 2**len(spec.PARTICIPATION_FLAG_WEIGHTS) + if previous: + state.previous_epoch_participation = [rng.randrange(0, max_flag_value_excl) for _ in range(count)] + if current: + state.current_epoch_participation = [rng.randrange(0, max_flag_value_excl) for _ in range(count)] + + +@with_altair_and_later +@spec_state_test +def test_random(spec, state): + next_epoch_via_block(spec, state) + random_flags(spec, state, 10) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@spec_state_test +def test_random_genesis(spec, state): + random_flags(spec, state, 11) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@spec_state_test +def test_zeroing(spec, state): + next_epoch_via_block(spec, state) + random_flags(spec, state, 12, current=False) + state.current_epoch_participation = [0] * len(state.validators) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@spec_state_test +def test_prev_zeroed(spec, state): + next_epoch_via_block(spec, state) + random_flags(spec, state, 13, previous=False) + state.previous_epoch_participation = [0] * len(state.validators) + yield from run_process_participation_flag_updates(spec, state) + + +def custom_validator_count(factor: float): + def initializer(spec): + num_validators = spec.SLOTS_PER_EPOCH * spec.MAX_COMMITTEES_PER_SLOT * spec.TARGET_COMMITTEE_SIZE + return [spec.MAX_EFFECTIVE_BALANCE] * int(float(int(num_validators)) * factor) + return initializer + + +@with_altair_and_later +@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys") +@spec_test +@with_custom_state(balances_fn=custom_validator_count(1.3), threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@single_phase +def test_slightly_larger_random(spec, state): + random_flags(spec, state, 14) + yield from run_process_participation_flag_updates(spec, state) + + +@with_altair_and_later +@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys") +@spec_test +@with_custom_state(balances_fn=custom_validator_count(2.6), threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@single_phase +def test_large_random(spec, state): + random_flags(spec, state, 15) + yield from run_process_participation_flag_updates(spec, state) diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py index a3d0f82be7..7203bac936 100644 --- a/tests/generators/epoch_processing/main.py +++ b/tests/generators/epoch_processing/main.py @@ -17,6 +17,8 @@ ]} altair_mods = { **{key: 'eth2spec.test.altair.epoch_processing.test_process_' + key for key in [ + 'inactivity_updates', + 'participation_flag_updates', 'sync_committee_updates', ]}, **phase_0_mods, From ed912f599e9a4c87bac0c3fb4f58776f6bb827ee Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 22 May 2021 18:52:29 +0200 Subject: [PATCH 02/82] add assertions on flag updates --- .../test_process_participation_flag_updates.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py index b98713bfe0..287525608b 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py @@ -13,7 +13,10 @@ def run_process_participation_flag_updates(spec, state): + old = state.current_epoch_participation yield from run_epoch_processing_with(spec, state, 'process_participation_flag_updates') + assert state.current_epoch_participation == [0] * len(state.validators) + assert state.previous_epoch_participation == old @with_altair_and_later From 139117d64784b5ff0d5242436917a548999fabd4 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 22 May 2021 19:00:55 +0200 Subject: [PATCH 03/82] update epoch processing test vector format docs --- tests/formats/epoch_processing/README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/formats/epoch_processing/README.md b/tests/formats/epoch_processing/README.md index d9abcaf98b..33ec79290e 100644 --- a/tests/formats/epoch_processing/README.md +++ b/tests/formats/epoch_processing/README.md @@ -33,7 +33,7 @@ The provided pre-state is already transitioned to just before the specific sub-t Sub-transitions: - `justification_and_finalization` -- `inactivity_penalty_updates` +- `inactivity_penalty_updates` (Altair) - `rewards_and_penalties` - `registry_updates` - `slashings` @@ -42,7 +42,8 @@ Sub-transitions: - `slashings_reset` - `randao_mixes_reset` - `historical_roots_update` -- `participation_record_updates` -- `sync_committee_updates` +- `participation_record_updates` (Phase 0 only) +- `participation_flag_updates` (Altair) +- `sync_committee_updates` (Altair) The resulting state should match the expected `post` state. From 96352726bbe3267a0287d6ad187d44fd70a6bf7f Mon Sep 17 00:00:00 2001 From: terence tsao Date: Sat, 22 May 2021 10:31:10 -0700 Subject: [PATCH 04/82] Sharding p2p: minor typo fixes --- specs/sharding/p2p-interface.md | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 47ed52970a..1fef7c60fb 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -17,9 +17,11 @@ - [SignedShardBlob](#signedshardblob) - [Gossip domain](#gossip-domain) - [Topics and messages](#topics-and-messages) - - [Shard blobs: `shard_blob_{subnet_id}`](#shard-blobs-shard_blob_subnet_id) - - [Shard header: `shard_header`](#shard-header-shard_header) - - [Shard proposer slashing: `shard_proposer_slashing`](#shard-proposer-slashing-shard_proposer_slashing) + - [Shard blob subnets](#shard-blob-subnets) + - [`shard_blob_{subnet_id}`](#shard_blob_subnet_id) + - [Global topics](#global-topics) + - [`shard_blob_header`](#shard_blob_header) + - [`shard_proposer_slashing`](#shard_proposer_slashing) @@ -88,12 +90,16 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface. | Name | Message Type | |----------------------------------|---------------------------| | `shard_blob_{subnet_id}` | `SignedShardBlob` | -| `shard_header` | `SignedShardHeader` | +| `shard_blob_header` | `SignedShardBlobHeader` | | `shard_proposer_slashing` | `ShardProposerSlashing` | The [DAS network specification](./das-p2p.md) defines additional topics. -#### Shard blobs: `shard_blob_{subnet_id}` +#### Shard blob subnets + +Shard blob subnets are used to propagate shard blobs to subsections of the network. + +##### `shard_blob_{subnet_id}` Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets. @@ -129,19 +135,23 @@ The following validations MUST pass before forwarding the `signed_blob` (with in the block MAY be queued for later processing while proposers for the blob's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. +#### Global topics + +There are two additional global topics for Sharding, one is used to propagate shard blob headers (`shard_blob_header`) to +all nodes on the network. Another one is used to propagate validator message (`shard_proposer_slashing`). -#### Shard header: `shard_header` +##### `shard_blob_header` -Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet. +Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_blob_header` subnet. -The following validations MUST pass before forwarding the `signed_shard_header` (with inner `message` as `header`) on the network. +The following validations MUST pass before forwarding the `signed_shard_blob_header` (with inner `message` as `header`) on the network. - _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `header.slot <= current_slot` (a client MAY queue future headers for processing at the appropriate slot). - _[IGNORE]_ The `header` is new enough to be still be processed -- i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` - _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination. -- _[REJECT]_ The proposer signature, `signed_shard_header.signature`, is valid with respect to the `proposer_index` pubkey. +- _[REJECT]_ The proposer signature, `signed_shard_blob_header.signature`, is valid with respect to the `proposer_index` pubkey. - _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, @@ -149,7 +159,7 @@ The following validations MUST pass before forwarding the `signed_shard_header` in such a case _do not_ `REJECT`, instead `IGNORE` this message. -#### Shard proposer slashing: `shard_proposer_slashing` +##### `shard_proposer_slashing` Shard proposer slashings, in the form of `ShardProposerSlashing`, are published to the global `shard_proposer_slashing` topic. From 4a5947d49e5f952b5b7b8952125bcb944978310b Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 24 May 2021 06:49:20 -0700 Subject: [PATCH 05/82] Proto's suggestion --- specs/sharding/p2p-interface.md | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 1fef7c60fb..65393bc0e2 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -20,7 +20,7 @@ - [Shard blob subnets](#shard-blob-subnets) - [`shard_blob_{subnet_id}`](#shard_blob_subnet_id) - [Global topics](#global-topics) - - [`shard_blob_header`](#shard_blob_header) + - [`shard_header`](#shard_header) - [`shard_proposer_slashing`](#shard_proposer_slashing) @@ -30,7 +30,7 @@ ## Introduction The specification of these changes continues in the same format as the [Phase0](../phase0/p2p-interface.md) and -[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite. +[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite. The adjustments and additions for Shards are outlined in this document. ## Constants @@ -90,7 +90,7 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface. | Name | Message Type | |----------------------------------|---------------------------| | `shard_blob_{subnet_id}` | `SignedShardBlob` | -| `shard_blob_header` | `SignedShardBlobHeader` | +| `shard_header` | `SignedShardBlobHeader` | | `shard_proposer_slashing` | `ShardProposerSlashing` | The [DAS network specification](./das-p2p.md) defines additional topics. @@ -124,7 +124,7 @@ The following validations MUST pass before forwarding the `signed_blob` (with in - _[IGNORE]_ The `blob` is new enough to be still be processed -- i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)` - _[REJECT]_ The shard blob is for the correct subnet -- - i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` + i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` - _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination. - _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large. - _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid. @@ -137,12 +137,12 @@ The following validations MUST pass before forwarding the `signed_blob` (with in #### Global topics -There are two additional global topics for Sharding, one is used to propagate shard blob headers (`shard_blob_header`) to +There are two additional global topics for Sharding, one is used to propagate shard blob headers (`shard_header`) to all nodes on the network. Another one is used to propagate validator message (`shard_proposer_slashing`). -##### `shard_blob_header` +##### `shard_header` -Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_blob_header` subnet. +Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet. The following validations MUST pass before forwarding the `signed_shard_blob_header` (with inner `message` as `header`) on the network. - _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- @@ -168,3 +168,4 @@ The following validations MUST pass before forwarding the `shard_proposer_slashi for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`. The `slot` and `shard` are ignored, there are no per-shard slashings. - _[REJECT]_ All of the conditions within `process_shard_proposer_slashing` pass validation. +- From 814c7696d96bab5e62aaea15cee8cdb39e0de6a3 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 24 May 2021 06:50:03 -0700 Subject: [PATCH 06/82] shard_blob_header -> shard_header --- specs/sharding/p2p-interface.md | 1 - 1 file changed, 1 deletion(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 65393bc0e2..5acd9735de 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -168,4 +168,3 @@ The following validations MUST pass before forwarding the `shard_proposer_slashi for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`. The `slot` and `shard` are ignored, there are no per-shard slashings. - _[REJECT]_ All of the conditions within `process_shard_proposer_slashing` pass validation. -- From 71d0d453431b21c61e44170d1865d01fc1c21482 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 24 May 2021 06:57:09 -0700 Subject: [PATCH 07/82] Align table --- specs/sharding/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 5acd9735de..b7e229e87a 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -90,7 +90,7 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface. | Name | Message Type | |----------------------------------|---------------------------| | `shard_blob_{subnet_id}` | `SignedShardBlob` | -| `shard_header` | `SignedShardBlobHeader` | +| `shard_header` | `SignedShardBlobHeader` | | `shard_proposer_slashing` | `ShardProposerSlashing` | The [DAS network specification](./das-p2p.md) defines additional topics. From c9f37805656cbae68ba05f6ca054d85ce8410ee5 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Mon, 24 May 2021 11:22:30 -0700 Subject: [PATCH 08/82] Clean up outdated attestation helper --- .../test_process_attestation.py | 7 ++- .../test_process_chunk_challenge.py | 46 +++++++++---------- .../test_process_custody_slashing.py | 6 +-- .../test_process_challenge_deadlines.py | 6 +-- .../test_process_custody_final_updates.py | 10 ++-- .../test/custody_game/sanity/test_blocks.py | 6 +-- .../eth2spec/test/helpers/attestations.py | 43 ++--------------- .../test_process_attestation.py | 34 +++++++------- .../test/phase0/sanity/test_blocks.py | 2 +- 9 files changed, 62 insertions(+), 98 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py index 707ac0b2ec..4ed3f50885 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_attestation.py @@ -7,8 +7,7 @@ from eth2spec.test.helpers.state import transition_to from eth2spec.test.helpers.attestations import ( run_attestation_processing, - get_valid_late_attestation, - get_valid_on_time_attestation, + get_valid_attestation, ) @@ -16,7 +15,7 @@ @spec_state_test @always_bls def test_on_time_success(spec, state): - attestation = get_valid_on_time_attestation(spec, state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -27,7 +26,7 @@ def test_on_time_success(spec, state): @spec_state_test @always_bls def test_late_success(spec, state): - attestation = get_valid_late_attestation(spec, state, signed=True) + attestation = get_valid_attestation(spec, state, signed=True) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY + 1) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py index 87f0238fb9..cc12b66f5e 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_chunk_challenge.py @@ -4,7 +4,7 @@ get_sample_shard_transition, ) from eth2spec.test.helpers.attestations import ( - get_valid_on_time_attestation, + get_valid_attestation, ) from eth2spec.test.helpers.constants import ( CUSTODY_GAME, @@ -80,8 +80,8 @@ def test_challenge_appended(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -104,8 +104,8 @@ def test_challenge_empty_element_replaced(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -130,8 +130,8 @@ def test_duplicate_challenge(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -156,8 +156,8 @@ def test_second_challenge(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -185,8 +185,8 @@ def test_multiple_epochs_custody(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -210,8 +210,8 @@ def test_many_epochs_custody(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -235,8 +235,8 @@ def test_off_chain_attestation(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)) @@ -256,8 +256,8 @@ def test_custody_response(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -287,8 +287,8 @@ def test_custody_response_chunk_index_2(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -319,8 +319,8 @@ def test_custody_response_multiple_epochs(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -351,8 +351,8 @@ def test_custody_response_many_epochs(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py index 7ee5cd394c..4891c7b236 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/block_processing/test_process_custody_slashing.py @@ -3,7 +3,7 @@ get_custody_slashable_shard_transition, ) from eth2spec.test.helpers.attestations import ( - get_valid_on_time_attestation, + get_valid_attestation, ) from eth2spec.test.helpers.constants import ( CUSTODY_GAME, @@ -96,8 +96,8 @@ def run_standard_custody_slashing_test(spec, slashable=correct, ) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py index 7332dcc80d..144ea02135 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_challenge_deadlines.py @@ -3,7 +3,7 @@ get_sample_shard_transition, ) from eth2spec.test.helpers.attestations import ( - get_valid_on_time_attestation, + get_valid_attestation, ) from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( @@ -36,8 +36,8 @@ def test_validator_slashed_after_chunk_challenge(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py index 92c311a29e..d8dd3d19e8 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/epoch_processing/test_process_custody_final_updates.py @@ -8,7 +8,7 @@ get_sample_shard_transition ) from eth2spec.test.helpers.attestations import ( - get_valid_on_time_attestation, + get_valid_attestation, ) from eth2spec.test.helpers.state import next_epoch_via_block, transition_to, transition_to_valid_shard_slot from eth2spec.test.context import ( @@ -77,8 +77,8 @@ def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state): shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) @@ -126,8 +126,8 @@ def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state) shard = 0 offset_slots = spec.get_offset_slots(state, shard) shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots)) - attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True, - shard_transition=shard_transition) + attestation = get_valid_attestation(spec, state, index=shard, signed=True, + shard_transition=shard_transition) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) diff --git a/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py index f242c361b0..77ce3c5add 100644 --- a/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/custody_game/sanity/test_blocks.py @@ -5,7 +5,7 @@ spec_state_test, with_presets, ) -from eth2spec.test.helpers.attestations import get_valid_on_time_attestation +from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.constants import ( CUSTODY_GAME, @@ -60,7 +60,7 @@ def test_with_shard_transition_with_custody_challenge_and_response(spec, state): shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True) shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]} shard_transitions = get_shard_transitions(spec, state, shard_block_dict) - attestation = get_valid_on_time_attestation( + attestation = get_valid_attestation( spec, state, index=committee_index, shard_transition=shard_transitions[shard], signed=True, ) @@ -127,7 +127,7 @@ def test_custody_slashing(spec, state): shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]} shard_transitions = get_shard_transitions(spec, state, shard_block_dict) - attestation = get_valid_on_time_attestation( + attestation = get_valid_attestation( spec, state, index=committee_index, shard_transition=shard_transitions[shard], signed=True, ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index b55aff9e51..c92860ffa7 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -50,7 +50,7 @@ def run_attestation_processing(spec, state, attestation, valid=True): yield 'post', state -def build_attestation_data(spec, state, slot, index, shard=None, on_time=True): +def build_attestation_data(spec, state, slot, index, shard=None): assert state.slot >= slot if slot == state.slot: @@ -85,45 +85,12 @@ def build_attestation_data(spec, state, slot, index, shard=None, on_time=True): return data -def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=False): - ''' - Construct on-time attestation for next slot - ''' - if slot is None: - slot = state.slot - if index is None: - index = 0 - - return get_valid_attestation( - spec, - state, - slot=slot, - index=index, - signed=signed, - on_time=True, - ) - - -def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False): - ''' - Construct on-time attestation for next slot - ''' - if slot is None: - slot = state.slot - if index is None: - index = 0 - - return get_valid_attestation(spec, state, slot=slot, index=index, - signed=signed, on_time=False) - - def get_valid_attestation(spec, state, slot=None, index=None, filter_participant_set=None, - signed=False, - on_time=True): + signed=False): # If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed. # Thus strictly speaking invalid when no participant is added later. if slot is None: @@ -132,7 +99,7 @@ def get_valid_attestation(spec, index = 0 attestation_data = build_attestation_data( - spec, state, slot=slot, index=index, on_time=on_time + spec, state, slot=slot, index=index ) beacon_committee = spec.get_beacon_committee( @@ -219,7 +186,7 @@ def add_attestations_to_state(spec, state, attestations, slot): spec.process_attestation(state, attestation) -def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None, on_time=True): +def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None): committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest)) for index in range(committees_per_slot): def participants_filter(comm): @@ -234,7 +201,6 @@ def participants_filter(comm): slot_to_attest, index=index, signed=True, - on_time=on_time, filter_participant_set=participants_filter ) @@ -269,7 +235,6 @@ def next_slots_with_attestations(spec, post_state, spec, slot_to_attest, - on_time=False, participation_fn=participation_fn ) for attestation in attestations: diff --git a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py index 38a050ebcd..c303200667 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py +++ b/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py @@ -45,7 +45,7 @@ def test_success_multi_proposer_index_iterations(spec, state): @with_all_phases @spec_state_test def test_success_previous_epoch(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) next_epoch_via_block(spec, state) yield from run_attestation_processing(spec, state, attestation) @@ -96,7 +96,7 @@ def test_before_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_after_epoch_slots(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) # increment past latest inclusion slot transition_to_slot_via_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH + 1) @@ -197,7 +197,7 @@ def test_mismatched_target_and_slot(spec, state): next_epoch_via_block(spec, state) next_epoch_via_block(spec, state) - attestation = get_valid_attestation(spec, state, on_time=False) + attestation = get_valid_attestation(spec, state) attestation.data.slot = attestation.data.slot - spec.SLOTS_PER_EPOCH sign_attestation(spec, state, attestation) @@ -210,7 +210,7 @@ def test_mismatched_target_and_slot(spec, state): def test_old_target_epoch(spec, state): assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2 - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2) # target epoch will be too old to handle @@ -275,7 +275,7 @@ def test_invalid_current_source_root(spec, state): state.previous_justified_checkpoint = spec.Checkpoint(epoch=3, root=b'\x01' * 32) state.current_justified_checkpoint = spec.Checkpoint(epoch=4, root=b'\x32' * 32) - attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1, on_time=False) + attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1) next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY) # Test logic sanity checks: @@ -348,7 +348,7 @@ def test_correct_min_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_correct_sqrt_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH)) yield from run_attestation_processing(spec, state, attestation) @@ -357,7 +357,7 @@ def test_correct_sqrt_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_correct_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) next_slots(spec, state, spec.SLOTS_PER_EPOCH) yield from run_attestation_processing(spec, state, attestation) @@ -366,7 +366,7 @@ def test_correct_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_correct_after_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=True, on_time=False) + attestation = get_valid_attestation(spec, state, signed=True) # increment past latest inclusion slot next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1) @@ -393,7 +393,7 @@ def test_incorrect_head_min_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_sqrt_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH)) attestation.data.beacon_block_root = b'\x42' * 32 @@ -405,7 +405,7 @@ def test_incorrect_head_sqrt_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.SLOTS_PER_EPOCH) attestation.data.beacon_block_root = b'\x42' * 32 @@ -417,7 +417,7 @@ def test_incorrect_head_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_after_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) # increment past latest inclusion slot next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1) @@ -448,7 +448,7 @@ def test_incorrect_head_and_target_min_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH)) attestation.data.beacon_block_root = b'\x42' * 32 @@ -461,7 +461,7 @@ def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_and_target_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.SLOTS_PER_EPOCH) attestation.data.beacon_block_root = b'\x42' * 32 @@ -474,7 +474,7 @@ def test_incorrect_head_and_target_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_head_and_target_after_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) # increment past latest inclusion slot next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1) @@ -504,7 +504,7 @@ def test_incorrect_target_min_inclusion_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_target_sqrt_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH)) attestation.data.target.root = b'\x42' * 32 @@ -516,7 +516,7 @@ def test_incorrect_target_sqrt_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_target_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) next_slots(spec, state, spec.SLOTS_PER_EPOCH) attestation.data.target.root = b'\x42' * 32 @@ -528,7 +528,7 @@ def test_incorrect_target_epoch_delay(spec, state): @with_all_phases @spec_state_test def test_incorrect_target_after_epoch_delay(spec, state): - attestation = get_valid_attestation(spec, state, signed=False, on_time=False) + attestation = get_valid_attestation(spec, state, signed=False) # increment past latest inclusion slot next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1) diff --git a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py index 0e22e75b85..dba6238556 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py @@ -771,7 +771,7 @@ def test_attestation(spec, state): # if spec.fork == SHARDING: # TODO add shard data to block to vote on - attestation = get_valid_attestation(spec, state, index=index, signed=True, on_time=True) + attestation = get_valid_attestation(spec, state, index=index, signed=True) if not is_post_altair(spec): pre_current_attestations_len = len(state.current_epoch_attestations) From 41ca149429790fed7464410581c59b38d9b8d2ec Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Mon, 24 May 2021 11:51:08 -0700 Subject: [PATCH 09/82] Explicitly define `bls.AggregatePKs` --- setup.py | 19 +++++++++++++++++-- specs/altair/beacon-chain.md | 30 +++++++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 8095e3b7f4..d840c34371 100644 --- a/setup.py +++ b/setup.py @@ -55,6 +55,12 @@ def floorlog2(x: int) -> uint64: ''' +OPTIMIZED_BLS_AGGREGATE_PUBKEYS = ''' +def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: + return bls.AggregatePKs(pubkeys) +''' + + class ProtocolDefinition(NamedTuple): # just function definitions currently. May expand with configuration vars in future. functions: Dict[str, str] @@ -305,6 +311,10 @@ def invariant_checks(cls) -> str: """ raise NotImplementedError() + @classmethod + def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: + return functions + @classmethod @abstractmethod def build_spec(cls, preset_name: str, @@ -482,6 +492,10 @@ def invariant_checks(cls) -> str: TIMELY_HEAD_WEIGHT + TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT + SYNC_REWARD_WEIGHT + PROPOSER_WEIGHT ) == WEIGHT_DENOMINATOR''' + @classmethod + def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: + if "eth2_aggregate_pubkeys" in functions: + return {**functions, **{"eth2_aggregate_pubkeys": OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip()}} # # MergeSpecBuilder @@ -588,7 +602,8 @@ def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str for k in list(spec_object.functions): if "ceillog2" in k or "floorlog2" in k: del spec_object.functions[k] - functions_spec = '\n\n\n'.join(spec_object.functions.values()) + functions = builder.implement_optimizations(spec_object.functions) + functions_spec = '\n\n\n'.join(functions.values()) # Access global dict of config vars for runtime configurables for name in spec_object.config_vars.keys(): @@ -831,7 +846,7 @@ def initialize_options(self): self.out_dir = 'pyspec_output' self.build_targets = """ minimal:presets/minimal:configs/minimal.yaml - mainnet:presets/mainnet:configs/mainnet.yaml + mainnet:presets/mainnet:configs/mainnet.yaml """ def finalize_options(self): diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 7412a8490c..cd877161cb 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -26,6 +26,8 @@ - [`SyncAggregate`](#syncaggregate) - [`SyncCommittee`](#synccommittee) - [Helper functions](#helper-functions) + - [Crypto](#crypto) + - [BLS public keys](#bls-public-keys) - [`Predicates`](#predicates) - [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify) - [Misc](#misc-1) @@ -221,6 +223,32 @@ class SyncCommittee(Container): ## Helper functions + +### Crypto + +#### BLS public keys + +An additional function `AggregatePKs` is defined to extend the +[IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) +spec referenced in the phase 0 document. + +```python +def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: + """ + Return the aggregate public key for the public keys in ``pubkeys``. + + NOTE: the ``+`` operation should be interpreted as elliptic curve point addition, which takes as input + elliptic curve points that must be decoded from the input ``BLSPubkey``s. + This implementation is for demonstrative purposes only and ignores encoding/decoding concerns. + Refer to the BLS signature draft standard for more information. + """ + assert len(pubkeys) > 0 + result = copy(pubkeys[0]) + for pubkey in pubkeys[1:]: + result += pubkey + return result +``` + ### `Predicates` #### `eth2_fast_aggregate_verify` @@ -310,7 +338,7 @@ def get_next_sync_committee(state: BeaconState) -> SyncCommittee: """ indices = get_next_sync_committee_indices(state) pubkeys = [state.validators[index].pubkey for index in indices] - aggregate_pubkey = bls.AggregatePKs(pubkeys) + aggregate_pubkey = eth2_aggregate_pubkeys(pubkeys) return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey) ``` From fc1af1cff377ce68a5d2c54a415084632b4e53ab Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 25 May 2021 21:13:12 +0800 Subject: [PATCH 10/82] [pyspec] Use mainnet.py as the default spec --- setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8095e3b7f4..77b38b6405 100644 --- a/setup.py +++ b/setup.py @@ -912,7 +912,8 @@ def run(self): if not self.dry_run: with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out: - out.write("") + # `mainnet` is the default spec. + out.write("from . import mainnet as spec\n") class BuildPyCommand(build_py): From 74761db7a38a0eb81fc5b5b246f4ea2f2065fe9d Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 25 May 2021 21:40:10 +0800 Subject: [PATCH 11/82] Fix lint error --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 77b38b6405..99e501d9f3 100644 --- a/setup.py +++ b/setup.py @@ -913,7 +913,7 @@ def run(self): if not self.dry_run: with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out: # `mainnet` is the default spec. - out.write("from . import mainnet as spec\n") + out.write("from . import mainnet as spec # noqa:F401\n") class BuildPyCommand(build_py): From 715e450e0b7e8e78c2edbe0ee56babb30557ca56 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 26 May 2021 00:18:59 +0800 Subject: [PATCH 12/82] Generate coverage report on `minimal` config spec by default --- Makefile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 007f49f5b3..d637d26cf8 100644 --- a/Makefile +++ b/Makefile @@ -94,13 +94,15 @@ pyspec: install_test: python3 -m venv venv; . venv/bin/activate; python3 -m pip install -e .[lint]; python3 -m pip install -e .[test] +# Testing against `minimal` config by default test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.mainnet --cov=eth2spec.altair.mainnet --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec +# Testing against `minimal` config by default find_test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.mainnet --cov=eth2spec.altair.mainnet --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec citest: pyspec mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \ From 4664ccbc4276ee1613e598a654dd3948a90b316b Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 25 May 2021 09:30:38 -0700 Subject: [PATCH 13/82] Fix bug in Altair transition tests with missing state root --- .../test/altair/transition/test_transition.py | 10 +++++----- tests/core/pyspec/eth2spec/test/helpers/state.py | 12 ++++++++++-- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py b/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py index 20e13b9bab..62740df4ed 100644 --- a/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py +++ b/tests/core/pyspec/eth2spec/test/altair/transition/test_transition.py @@ -1,7 +1,7 @@ import random from eth2spec.test.context import fork_transition_test from eth2spec.test.helpers.constants import PHASE0, ALTAIR -from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot, next_epoch_via_block +from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot, next_epoch_via_signed_block from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block from eth2spec.test.helpers.attestations import next_slots_with_attestations @@ -261,12 +261,12 @@ def _run_transition_test_with_attestations(state, assert current_epoch == spec.GENESIS_EPOCH # skip genesis epoch to avoid dealing with some edge cases... - block = next_epoch_via_block(spec, state) + block = next_epoch_via_signed_block(spec, state) # regular state transition until fork: fill_cur_epoch = False fill_prev_epoch = True - blocks = [pre_tag(sign_block(spec, state, block))] + blocks = [pre_tag(block)] current_epoch = spec.get_current_epoch(state) for _ in range(current_epoch, fork_epoch - 1): _, blocks_in_epoch, state = next_slots_with_attestations( @@ -414,8 +414,8 @@ def test_transition_with_no_attestations_until_after_fork(state, fork_epoch, spe # continue regular state transition but add attestations # for enough epochs to finalize the ``fork_epoch`` - block = next_epoch_via_block(post_spec, state) - blocks.append(post_tag(sign_block(post_spec, state, block))) + block = next_epoch_via_signed_block(post_spec, state) + blocks.append(post_tag(block)) for _ in range(4): _, blocks_in_epoch, state = next_slots_with_attestations( post_spec, diff --git a/tests/core/pyspec/eth2spec/test/helpers/state.py b/tests/core/pyspec/eth2spec/test/helpers/state.py index ef09c6e07a..05f0e9013a 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/state.py +++ b/tests/core/pyspec/eth2spec/test/helpers/state.py @@ -58,11 +58,19 @@ def next_epoch(spec, state): spec.process_slots(state, slot) -def next_epoch_via_block(spec, state): +def next_epoch_via_block(spec, state, insert_state_root=False): """ Transition to the start slot of the next epoch via a full block transition """ - return apply_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) + block = apply_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) + if insert_state_root: + block.state_root = state.hash_tree_root() + return block + + +def next_epoch_via_signed_block(spec, state): + block = next_epoch_via_block(spec, state, insert_state_root=True) + return sign_block(spec, state, block) def get_state_root(spec, state, slot) -> bytes: From 69f2a3140639d036f7c9766b758e26c7de87afe5 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 26 May 2021 00:13:33 +0800 Subject: [PATCH 14/82] Add some config invariant checks --- setup.py | 20 -------- .../test/altair/unittests/__init__.py | 0 .../unittests/test_config_invariants.py | 17 +++++++ .../pyspec/eth2spec/test/helpers/constants.py | 6 +++ .../test/phase0/unittests/__init__.py | 0 .../unittests/test_config_invariants.py | 49 +++++++++++++++++++ .../phase0/unittests/validator/__init__.py | 0 7 files changed, 72 insertions(+), 20 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/altair/unittests/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py create mode 100644 tests/core/pyspec/eth2spec/test/phase0/unittests/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py create mode 100644 tests/core/pyspec/eth2spec/test/phase0/unittests/validator/__init__.py diff --git a/setup.py b/setup.py index 99e501d9f3..e87bc7e4a6 100644 --- a/setup.py +++ b/setup.py @@ -297,14 +297,6 @@ def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: # TODO """ raise NotImplementedError() - @classmethod - @abstractmethod - def invariant_checks(cls) -> str: - """ - The invariant checks - """ - raise NotImplementedError() - @classmethod @abstractmethod def build_spec(cls, preset_name: str, @@ -425,10 +417,6 @@ def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]: def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: return {} - @classmethod - def invariant_checks(cls) -> str: - return '' - @classmethod def build_spec(cls, preset_name: str, source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str: @@ -475,13 +463,6 @@ def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]: } return {**super().hardcoded_ssz_dep_constants(), **constants} - @classmethod - def invariant_checks(cls) -> str: - return ''' -assert ( - TIMELY_HEAD_WEIGHT + TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT + SYNC_REWARD_WEIGHT + PROPOSER_WEIGHT -) == WEIGHT_DENOMINATOR''' - # # MergeSpecBuilder @@ -647,7 +628,6 @@ def format_constant(name: str, vardef: VariableDefinition) -> str: # Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are # as same as the spec definition. + ('\n\n\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '') - + ('\n' + builder.invariant_checks() if builder.invariant_checks() != '' else '') + '\n' ) return spec diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/__init__.py b/tests/core/pyspec/eth2spec/test/altair/unittests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py new file mode 100644 index 0000000000..d24b56adc1 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py @@ -0,0 +1,17 @@ +from eth2spec.test.context import ( + spec_state_test, + with_phases, +) +from eth2spec.test.helpers.constants import ALTAIR + + +@with_phases([ALTAIR]) +@spec_state_test +def test_weight_denominator(spec, state): + assert ( + spec.TIMELY_HEAD_WEIGHT + + spec.TIMELY_SOURCE_WEIGHT + + spec.TIMELY_TARGET_WEIGHT + + spec.SYNC_REWARD_WEIGHT + + spec.PROPOSER_WEIGHT + ) == spec.WEIGHT_DENOMINATOR diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py index 4e98845c47..8f116dc3d7 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/constants.py +++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py @@ -32,3 +32,9 @@ MINIMAL = PresetBaseName('minimal') ALL_PRESETS = (MINIMAL, MAINNET) + + +# +# Number +# +MAX_UINT_64 = 2**64 - 1 diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/__init__.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py new file mode 100644 index 0000000000..fc73e66ac0 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py @@ -0,0 +1,49 @@ +from eth2spec.test.context import ( + spec_state_test, + with_all_phases, + is_post_altair, +) +from eth2spec.test.helpers.constants import MAX_UINT_64 + + +def check_bound(value, lower_bound, upper_bound): + assert value >= lower_bound + assert value <= upper_bound + + +@with_all_phases +@spec_state_test +def test_validators(spec, state): + check_bound(spec.VALIDATOR_REGISTRY_LIMIT, 1, MAX_UINT_64) + check_bound(spec.MAX_COMMITTEES_PER_SLOT, 1, MAX_UINT_64) + check_bound(spec.TARGET_COMMITTEE_SIZE, 1, MAX_UINT_64) + + check_bound(spec.MAX_VALIDATORS_PER_COMMITTEE, 1, spec.VALIDATOR_REGISTRY_LIMIT) + check_bound(spec.config.MIN_PER_EPOCH_CHURN_LIMIT, 1, spec.VALIDATOR_REGISTRY_LIMIT) + check_bound(spec.config.CHURN_LIMIT_QUOTIENT, 1, spec.VALIDATOR_REGISTRY_LIMIT) + + check_bound(spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT, spec.TARGET_COMMITTEE_SIZE, MAX_UINT_64) + + +@with_all_phases +@spec_state_test +def test_hysteresis_quotient(spec, state): + check_bound(spec.HYSTERESIS_QUOTIENT, 1, MAX_UINT_64) + check_bound(spec.HYSTERESIS_DOWNWARD_MULTIPLIER, 1, spec.HYSTERESIS_QUOTIENT) + check_bound(spec.HYSTERESIS_UPWARD_MULTIPLIER, spec.HYSTERESIS_QUOTIENT, MAX_UINT_64) + + +@with_all_phases +@spec_state_test +def test_incentives(spec, state): + # Ensure no ETH is minted in slash_validator + if is_post_altair(spec): + assert spec.MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR <= spec.WHISTLEBLOWER_REWARD_QUOTIENT + else: + assert spec.MIN_SLASHING_PENALTY_QUOTIENT <= spec.WHISTLEBLOWER_REWARD_QUOTIENT + + +@with_all_phases +@spec_state_test +def test_time(spec, state): + assert spec.SLOTS_PER_EPOCH <= spec.SLOTS_PER_HISTORICAL_ROOT diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/__init__.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/__init__.py new file mode 100644 index 0000000000..e69de29bb2 From b5f9b5d74f795ebb0d6d420166ff796f15e5e504 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 26 May 2021 01:21:04 +0800 Subject: [PATCH 15/82] Add more checks --- .../test/altair/unittests/test_config_invariants.py | 6 ++++++ .../test/phase0/unittests/test_config_invariants.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py index d24b56adc1..4443f97e0b 100644 --- a/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/altair/unittests/test_config_invariants.py @@ -15,3 +15,9 @@ def test_weight_denominator(spec, state): + spec.SYNC_REWARD_WEIGHT + spec.PROPOSER_WEIGHT ) == spec.WEIGHT_DENOMINATOR + + +@with_phases([ALTAIR]) +@spec_state_test +def test_inactivity_score(spec, state): + assert spec.config.INACTIVITY_SCORE_BIAS <= spec.config.INACTIVITY_SCORE_RECOVERY_RATE diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py index fc73e66ac0..078b48ea5a 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py @@ -47,3 +47,9 @@ def test_incentives(spec, state): @spec_state_test def test_time(spec, state): assert spec.SLOTS_PER_EPOCH <= spec.SLOTS_PER_HISTORICAL_ROOT + + +@with_all_phases +@spec_state_test +def test_networking(spec, state): + assert spec.RANDOM_SUBNETS_PER_VALIDATOR <= spec.ATTESTATION_SUBNET_COUNT From d71c50f6564c1f7a73f09a20557084df9195679c Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 25 May 2021 19:46:23 +0200 Subject: [PATCH 16/82] Union type update --- ssz/simple-serialize.md | 39 +++++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 12 deletions(-) diff --git a/ssz/simple-serialize.md b/ssz/simple-serialize.md index d97b8ea1c9..89a1ebc0b8 100644 --- a/ssz/simple-serialize.md +++ b/ssz/simple-serialize.md @@ -20,7 +20,8 @@ - [`null`](#null) - [`Bitvector[N]`](#bitvectorn) - [`Bitlist[N]`](#bitlistn) - - [Vectors, containers, lists, unions](#vectors-containers-lists-unions) + - [Vectors, containers, lists](#vectors-containers-lists) + - [Union](#union) - [Deserialization](#deserialization) - [Merkleization](#merkleization) - [Summaries and expansions](#summaries-and-expansions) @@ -61,7 +62,7 @@ * **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits * notation `Bitlist[N]` * **union**: union type containing one of the given subtypes - * notation `Union[type_0, type_1, ...]`, e.g. `union[null, uint64]` + * notation `Union[type_0, type_1, ...]`, e.g. `union[None, uint64, uint32]` *Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due to their different serialization requirements. Similarly, both `List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally `Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization efficiencies. @@ -77,7 +78,6 @@ For convenience we alias: * `byte` to `uint8` (this is a basic type) * `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type) * `ByteList[N]` to `List[byte, N]` -* `null`: `{}` ### Default values Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types. @@ -101,7 +101,7 @@ An SSZ object is called zeroed (and thus, `is_zero(object)` returns true) if it - Empty vector types (`Vector[type, 0]`, `Bitvector[0]`) are illegal. - Containers with no fields are illegal. -- The `null` type is only legal as the first type in a union subtype (i.e. with type index zero). +- The `None` type option in a `Union` type is only legal as the first option (i.e. with index zero). ## Serialization @@ -150,7 +150,7 @@ array[len(value) // 8] |= 1 << (len(value) % 8) return bytes(array) ``` -### Vectors, containers, lists, unions +### Vectors, containers, lists ```python # Recursively serialize @@ -170,14 +170,26 @@ fixed_parts = [part if part != None else variable_offsets[i] for i, part in enum return b"".join(fixed_parts + variable_parts) ``` -If `value` is a union type: +### Union -Define value as an object that has properties `value.value` with the contained value, and `value.type_index` which indexes the type. +A `value` as `Union[T...]` type has properties `value.value` with the contained value, and `value.selector` which indexes the selected `Union` type option `T`. + +A `Union`: +- May have multiple selectors with the same type. +- Should not use selectors above 127 (i.e. highest bit is set), these are reserved for backwards compatible extensions. +- Must have at least 1 type option. +- May have `None` as first type option, i.e. `selector == 0` +- Must have at least 2 type options if the first is `None` +- Is always considered a variable-length type, even if all type options have an equal fixed-length. ```python -serialized_bytes = serialize(value.value) -serialized_type_index = value.type_index.to_bytes(BYTES_PER_LENGTH_OFFSET, "little") -return serialized_type_index + serialized_bytes +if value.value is None: + assert value.selector == 0 + return b"\x00" +else: + serialized_bytes = serialize(value.value) + serialized_selector_index = value.selector.to_bytes(1, "little") + return serialized_selector_index + serialized_bytes ``` ## Deserialization @@ -191,12 +203,14 @@ Deserialization can be implemented using a recursive algorithm. The deserializat * The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length) * Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects. * In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits. +* The first byte of the deserialization scope is deserialized as type selector, the remainder of the scope is deserialized as the selected type. Note that deserialization requires hardening against invalid inputs. A non-exhaustive list: - Offsets: out of order, out of range, mismatching minimum element size. - Scope: Extra unused bytes, not aligned with element size. - More elements than a list limit allows. Part of enforcing consensus. +- An out-of-bounds selected index in an `Union` Efficient algorithms for computing this object can be found in [the implementations](#implementations). @@ -227,7 +241,7 @@ We first define helper functions: - If `1` chunk: the root is the chunk itself. - If `> 1` chunks: merkleize as binary tree. * `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`. -* `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`. +* `mix_in_selector`: Given a Merkle root `root` and a type selector `selector` (`"uint256"` little-endian serialization) return `hash(root + selector)`. We now define Merkleization `hash_tree_root(value)` of an object `value` recursively: @@ -237,7 +251,8 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi * `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist. * `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container. * `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects. -* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type. +* `mix_in_selector(hash_tree_root(value.value), value.selector)` if `value` is of union type, and `value.value` is not `None` +* `mix_in_selector(Bytes32(), 0)` if `value` is of union type, and `value.value` is `None` ## Summaries and expansions From 0142978b60750a2ec675701dab3c61aae7027300 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 26 May 2021 02:36:54 +0800 Subject: [PATCH 17/82] Rename `SyncCommitteeSignature` to `SyncCommitteeMessage` --- specs/altair/p2p-interface.md | 14 +++++------ specs/altair/validator.md | 44 +++++++++++++++++------------------ 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 6f250b57eb..4b89ab4e17 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -80,7 +80,7 @@ The new topics along with the type of the `data` field of a gossipsub message ar | - | - | | `beacon_block` | `SignedBeaconBlock` (modified) | | `sync_committee_contribution_and_proof` | `SignedContributionAndProof` | -| `sync_committee_{subnet_id}` | `SyncCommitteeSignature` | +| `sync_committee_{subnet_id}` | `SyncCommitteeMessage` | Definitions of these new types can be found in the [Altair validator guide](./validator.md#containers). @@ -139,12 +139,12 @@ Sync committee subnets are used to propagate unaggregated sync committee signatu The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee signatures to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic. -The following validations MUST pass before forwarding the `sync_committee_signature` on the network: +The following validations MUST pass before forwarding the `sync_committee_message` on the network: -- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_signature.slot == current_slot`. -- _[IGNORE]_ The block being signed over (`sync_committee_signature.beacon_block_root`) has been seen (via both gossip and non-gossip sources). -- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_signature.validator_index`. -- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index)`. +- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_message.slot == current_slot`. +- _[IGNORE]_ The block being signed over (`sync_committee_message.beacon_block_root`) has been seen (via both gossip and non-gossip sources). +- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_message.validator_index`. +- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`. Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee. - _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`. @@ -156,7 +156,7 @@ The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee. Individual validators can be duplicated in the broader sync committee such that they are included multiple times in a given subcommittee or across multiple subcommittees. -Unaggregated signatures (along with metadata) are sent as `SyncCommitteeSignature`s on the `sync_committee_{subnet_id}` topics. +Unaggregated signatures (along with metadata) are sent as `SyncCommitteeMessage`s on the `sync_committee_{subnet_id}` topics. Aggregated sync committee signatures are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic. diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 3b3362b22c..fd118be9a0 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -14,7 +14,7 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./ - [Constants](#constants) - [Misc](#misc) - [Containers](#containers) - - [`SyncCommitteeSignature`](#synccommitteesignature) + - [`SyncCommitteeMessage`](#synccommitteemessage) - [`SyncCommitteeContribution`](#synccommitteecontribution) - [`ContributionAndProof`](#contributionandproof) - [`SignedContributionAndProof`](#signedcontributionandproof) @@ -32,7 +32,7 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./ - [Sync committees](#sync-committees) - [Sync committee signatures](#sync-committee-signatures) - [Prepare sync committee signature](#prepare-sync-committee-signature) - - [Broadcast sync committee signature](#broadcast-sync-committee-signature) + - [Broadcast sync committee message](#broadcast-sync-committee-message) - [Sync committee contributions](#sync-committee-contributions) - [Aggregation selection](#aggregation-selection) - [Construct sync committee contribution](#construct-sync-committee-contribution) @@ -78,10 +78,10 @@ This document is currently illustrative for early Altair testnets and some parts ## Containers -### `SyncCommitteeSignature` +### `SyncCommitteeMessage` ```python -class SyncCommitteeSignature(Container): +class SyncCommitteeMessage(Container): # Slot to which this contribution pertains slot: Slot # Block root for this signature @@ -258,34 +258,34 @@ There is no change compared to the phase 0 document. ### Sync committees Sync committee members employ an aggregation scheme to reduce load on the global proposer channel that is monitored by all potential proposers to be able to include the full output of the sync committee every slot. -Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeSignature`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers. +Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeMessage`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers. This process occurs each slot. #### Sync committee signatures ##### Prepare sync committee signature -If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of `slot - 1`. +If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeMessage` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_message` as soon as they have determined the head block of `slot - 1`. This logic is triggered upon the same conditions as when producing an attestation. -Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first. +Meaning, a sync committee member should produce and broadcast a `SyncCommitteeMessage` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first. -`get_sync_committee_signature(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator. +`get_sync_committee_message(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator. ```python -def get_sync_committee_signature(state: BeaconState, - block_root: Root, - validator_index: ValidatorIndex, - privkey: int) -> SyncCommitteeSignature: +def get_sync_committee_message(state: BeaconState, + block_root: Root, + validator_index: ValidatorIndex, + privkey: int) -> SyncCommitteeMessage: epoch = get_current_epoch(state) domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch) signing_root = compute_signing_root(block_root, domain) signature = bls.Sign(privkey, signing_root) - return SyncCommitteeSignature(slot=state.slot, validator_index=validator_index, signature=signature) + return SyncCommitteeMessage(slot=state.slot, validator_index=validator_index, signature=signature) ``` -##### Broadcast sync committee signature +##### Broadcast sync committee message The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic. @@ -312,11 +312,11 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali *Note*: Subnet assignment does not change during the duration of a validator's assignment to a given sync committee. -*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_signature` on each of the distinct subnets. +*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_message` on each of the distinct subnets. #### Sync committee contributions -Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeSignature`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block. +Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeMessage`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block. ##### Aggregation selection @@ -347,9 +347,9 @@ def is_sync_committee_aggregator(signature: BLSSignature) -> bool: ##### Construct sync committee contribution -If a validator is selected to aggregate the `SyncCommitteeSignature`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`. +If a validator is selected to aggregate the `SyncCommitteeMessage`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`. -Given all of the (valid) collected `sync_committee_signatures: Set[SyncCommitteeSignature]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields: +Given all of the (valid) collected `sync_committee_messages: Set[SyncCommitteeMessage]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields: ###### Slot @@ -357,7 +357,7 @@ Set `contribution.slot = state.slot` where `state` is the `BeaconState` for the ###### Beacon block root -Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_signatures`. +Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_messages`. ###### Subcommittee index @@ -366,15 +366,15 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co ###### Aggregation bits Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee. -An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`. +An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_message.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`. For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution. -*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`. +*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeMessage`. ###### Signature -Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_signatures` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. +Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_messages` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee. From cb008f2c1a26b5b1ed13925b65763b921cd127a3 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 26 May 2021 02:40:22 +0800 Subject: [PATCH 18/82] Fix _get_sync_committee_signature helper. Should have used `target_slot` --- .../test/altair/unittests/validator/test_validator.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py index dfe90b5b5d..048e5f43db 100644 --- a/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/altair/unittests/validator/test_validator.py @@ -4,7 +4,7 @@ from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.state import transition_to -from eth2spec.utils import bls +from eth2spec.test.helpers.sync_committee import compute_sync_committee_signature from eth2spec.utils.bls import only_with_bls from eth2spec.test.context import ( with_altair_and_later, @@ -85,12 +85,9 @@ def _get_sync_committee_signature( pubkey = state.current_sync_committee.pubkeys[sync_committee_index] privkey = pubkey_to_privkey[pubkey] - domain = spec.get_domain( - state, - spec.DOMAIN_SYNC_COMMITTEE, + return compute_sync_committee_signature( + spec, state, target_slot, privkey, block_root=target_block_root ) - signing_data = spec.compute_signing_root(target_block_root, domain) - return bls.Sign(privkey, spec.hash_tree_root(signing_data)) @only_with_bls() From a25bc832b930eccd4e3698ad865bc65a67d15e7b Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 26 May 2021 03:01:38 +0800 Subject: [PATCH 19/82] Update section headers --- specs/altair/validator.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index fd118be9a0..23f380c233 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -30,8 +30,8 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./ - [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock) - [Attesting and attestation aggregation](#attesting-and-attestation-aggregation) - [Sync committees](#sync-committees) - - [Sync committee signatures](#sync-committee-signatures) - - [Prepare sync committee signature](#prepare-sync-committee-signature) + - [Sync committee messages](#sync-committee-messages) + - [Prepare sync committee message](#prepare-sync-committee-message) - [Broadcast sync committee message](#broadcast-sync-committee-message) - [Sync committee contributions](#sync-committee-contributions) - [Aggregation selection](#aggregation-selection) @@ -261,9 +261,9 @@ Sync committee members employ an aggregation scheme to reduce load on the global Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeMessage`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers. This process occurs each slot. -#### Sync committee signatures +#### Sync committee messages -##### Prepare sync committee signature +##### Prepare sync committee message If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeMessage` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_message` as soon as they have determined the head block of `slot - 1`. @@ -374,7 +374,7 @@ For example, if a validator with index `2044` is pseudo-randomly sampled to sync ###### Signature -Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_messages` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. +Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_messages`s and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee. From a08d23189e8b29324a01aa0173f6a6156e995320 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Wed, 26 May 2021 15:51:22 -0700 Subject: [PATCH 20/82] Update specs/altair/validator.md Co-authored-by: Hsiao-Wei Wang --- specs/altair/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 23f380c233..2258452c90 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -374,7 +374,7 @@ For example, if a validator with index `2044` is pseudo-randomly sampled to sync ###### Signature -Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_messages`s and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. +Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_messages` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`. The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee. From 1360860d1e7b2701d6c413186496dc43eecf32b6 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 27 May 2021 03:57:59 +0200 Subject: [PATCH 21/82] add/update comments on shard blob/header/reference body field --- specs/sharding/beacon-chain.md | 3 ++- specs/sharding/p2p-interface.md | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 5522e044de..a15a002e48 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -216,6 +216,7 @@ class ShardBlobHeader(Container): # Slot and shard that this header is intended for slot: Slot shard: Shard + # SSZ-summary of ShardBlobBody body_summary: ShardBlobBodySummary # Proposer of the shard-blob proposer_index: ValidatorIndex @@ -253,7 +254,7 @@ class ShardBlobReference(Container): # Slot and shard that this reference is intended for slot: Slot shard: Shard - # Hash-tree-root of commitment data + # Hash-tree-root of ShardBlobBody body_root: Root # Proposer of the shard-blob proposer_index: ValidatorIndex diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 47ed52970a..39da8166da 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -64,6 +64,7 @@ class ShardBlob(Container): # Slot and shard that this blob is intended for slot: Slot shard: Shard + # Shard data with related commitments and beacon anchor body: ShardBlobBody # Proposer of the shard-blob proposer_index: ValidatorIndex From 103d029a1abf054ead8eaa9c7d4c77facc10ace2 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 27 May 2021 15:28:51 +1000 Subject: [PATCH 22/82] Add clock disparity tolerance for sync subnets --- specs/altair/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 6f250b57eb..c66ed015cb 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -120,7 +120,7 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64 return sync_committee.pubkeys[i:i + sync_subcommittee_size] ``` -- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`. +- _[IGNORE]_ The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `contribution.slot == current_slot` . - _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources). - _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`. - _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`. @@ -141,7 +141,7 @@ The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync The following validations MUST pass before forwarding the `sync_committee_signature` on the network: -- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_signature.slot == current_slot`. +- _[IGNORE]_ The signature's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `sync_committee_signature.slot == current_slot`. - _[IGNORE]_ The block being signed over (`sync_committee_signature.beacon_block_root`) has been seen (via both gossip and non-gossip sources). - _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_signature.validator_index`. - _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index)`. From 48f989070db324bf8d37ff231cfb38766de353b8 Mon Sep 17 00:00:00 2001 From: Paul Hauner Date: Thu, 27 May 2021 15:30:44 +1000 Subject: [PATCH 23/82] Remove naughty space --- specs/altair/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index c66ed015cb..1f7d0f76c3 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -120,7 +120,7 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64 return sync_committee.pubkeys[i:i + sync_subcommittee_size] ``` -- _[IGNORE]_ The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `contribution.slot == current_slot` . +- _[IGNORE]_ The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `contribution.slot == current_slot`. - _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources). - _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`. - _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`. From 414ef614cb8f0d19c87d3e31c4c9b1addcb6bdb2 Mon Sep 17 00:00:00 2001 From: Anton Nashatyrev Date: Thu, 27 May 2021 15:13:13 +0300 Subject: [PATCH 24/82] Handle the case when a shard may not have a committee at slot. Block is invalid if contains ShardBlobHeader lacking committee Reject Gossip ShardBlobHeader and ShardBlob messages which lacks committee --- specs/sharding/beacon-chain.md | 18 +++++++++++++++--- specs/sharding/p2p-interface.md | 4 ++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 5522e044de..2a0695c49d 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -455,8 +455,15 @@ def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: Co ```python def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Shard) -> CommitteeIndex: + """ + Returns either committee index for ``shard`` at ``slot`` or ``None`` if no committee + """ active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot)) - return CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards) + index = (active_shards + shard - get_start_shard(state, slot)) % active_shards + if index >= get_committee_count_per_slot(state, compute_epoch_at_slot(slot)): + return None + else: + return CommitteeIndex(index) ``` @@ -559,6 +566,7 @@ def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None: header = signed_header.message + committee_index = compute_committee_index_from_shard(state, header.slot, header.shard) # Verify the header is not 0, and not from the future. assert Slot(0) < header.slot <= state.slot header_epoch = compute_epoch_at_slot(header.slot) @@ -566,6 +574,8 @@ def process_shard_header(state: BeaconState, assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)] # Verify that the shard is active assert header.shard < get_active_shard_count(state, header_epoch) + # Verify that shard has a committee at slot + assert committee_index is not None # Verify that the block root matches, # to ensure the header will only be included in this specific Beacon Chain sub-tree. assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1) @@ -595,8 +605,7 @@ def process_shard_header(state: BeaconState, assert header_root not in [pending_header.root for pending_header in pending_headers] # Include it in the pending list - index = compute_committee_index_from_shard(state, header.slot, header.shard) - committee_length = len(get_beacon_committee(state, header.slot, index)) + committee_length = len(get_beacon_committee(state, header.slot, committee_index)) pending_headers.append(PendingShardHeader( slot=header.slot, shard=header.shard, @@ -693,6 +702,9 @@ def process_pending_headers(state: BeaconState) -> None: # The entire committee (and its balance) index = compute_committee_index_from_shard(state, slot, shard) + if index is None: + # the shard had no committee on this slot + continue full_committee = get_beacon_committee(state, slot, index) # The set of voters who voted for each header (and their total balances) voting_sets = [ diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 47ed52970a..458c9985b6 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -117,6 +117,8 @@ The following validations MUST pass before forwarding the `signed_blob` (with in (a client MAY queue future blobs for processing at the appropriate slot). - _[IGNORE]_ The `blob` is new enough to be still be processed -- i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)` +- _[REJECT]_ The shard should have a committee at slot -- + i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard) is not None` - _[REJECT]_ The shard blob is for the correct subnet -- i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` - _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination. @@ -141,6 +143,8 @@ The following validations MUST pass before forwarding the `signed_shard_header` - _[IGNORE]_ The `header` is new enough to be still be processed -- i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` - _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination. +- _[REJECT]_ The shard should have a committee at slot -- + i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard) is not None` - _[REJECT]_ The proposer signature, `signed_shard_header.signature`, is valid with respect to the `proposer_index` pubkey. - _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). From a89ecced1cd4fdac79f9dc6e12e91c4e579ec0ac Mon Sep 17 00:00:00 2001 From: vbuterin Date: Thu, 27 May 2021 09:02:51 -0600 Subject: [PATCH 25/82] Modify sync committee logic and parameters to reduce variance Sync committee rewards as currently implemented significantly increase variance in proposer rewards: https://github.com/ethereum/eth2.0-specs/issues/2448 For example, if there are 200000 validators (6.4m ETH staked), then during each 1/4-eek (~54 hour) period there is a chance of 512/200000 that a validator will get accepted into the sync committee, so on average that will happen once every 200000/512 * 1/4 = 97.6 eeks, or close to two years. The payout of this "lottery" is 1/8 of that, or ~12.2 eeks (a bit less than four months) of revenue. This is much more severe than block proposing (a chance of 1/200000 per slot, or a lottery worth ~0.38 eeks of revenue once every ~3.05 eeks). This PR makes three changes to cut make the sync committee lottery less drastic and bring variance closer in line with what is available from block proposing: * Reduce the `SYNC_REWARD_WEIGHT` from 8 to 2 * Add a penalty for not participating in the sync committee, so that despite the first change the total net reward for participating vs not participating is only cut down by 2x * Reduce the sync committee period from 1/4 eek to 1/8 eek (~27 hours) With these three factors combined, the lottery reduces to ~1.5 eeks of revenue, on average occurring every ~48 eeks. Validators who are maximally unlucky (ie. never become part of a sync committee) only lose ~3.12% of their rewards instead of ~12.5%. The compromises that this approach makes are: * In the extreme case where >50% of proposers are operating efficiently, being in a sync committee becomes a net burden. However, this should be extremely rare, and in such cases validators would likely be suffering inactivity leak penalties anyway. * Incentive to participate in a sync committee decreased by 2x (but this is IMO an improvement; sync committees are _not_ as important as proposals and deserve to have lower rewards) * Minimum data syncing needed to maintain a light client increases by 2x (from 24 kB per 54 hours to 24 kB per 27 hours). A burden for on-chain light clients, but still insignificant for others. --- specs/altair/beacon-chain.md | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 7412a8490c..d6a942771b 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -86,10 +86,10 @@ Altair is the first beacon chain hard fork. Its main features are: | Name | Value | | - | - | -| `TIMELY_SOURCE_WEIGHT` | `uint64(12)` | -| `TIMELY_TARGET_WEIGHT` | `uint64(24)` | -| `TIMELY_HEAD_WEIGHT` | `uint64(12)` | -| `SYNC_REWARD_WEIGHT` | `uint64(8)` | +| `TIMELY_SOURCE_WEIGHT` | `uint64(14)` | +| `TIMELY_TARGET_WEIGHT` | `uint64(26)` | +| `TIMELY_HEAD_WEIGHT` | `uint64(14)` | +| `SYNC_REWARD_WEIGHT` | `uint64(2)` | | `PROPOSER_WEIGHT` | `uint64(8)` | | `WEIGHT_DENOMINATOR` | `uint64(64)` | @@ -129,7 +129,7 @@ This patch updates a few configuration values to move penalty parameters closer | Name | Value | Unit | Duration | | - | - | - | - | | `SYNC_COMMITTEE_SIZE` | `uint64(2**9)` (= 512) | Validators | | -| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**9)` (= 512) | epochs | ~54 hours | +| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**8)` (= 256) | epochs | ~27 hours | ## Configuration @@ -579,10 +579,12 @@ def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None # Apply participant and proposer rewards all_pubkeys = [v.pubkey for v in state.validators] committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys] - participant_indices = [index for index, bit in zip(committee_indices, aggregate.sync_committee_bits) if bit] - for participant_index in participant_indices: - increase_balance(state, participant_index, participant_reward) - increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + for index, participation_bit in zip(committee_indices, aggregate.sync_committee_bits): + if participant_bit: + increase_balance(state, participant_index, participant_reward) + increase_balance(state, get_beacon_proposer_index(state), proposer_reward) + else: + decrease_balance(state, participant_index, participant_reward) ``` ### Epoch processing From 3cd842a94d6c84ff0823946b8c032d913fe60336 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Thu, 27 May 2021 11:39:29 -0700 Subject: [PATCH 26/82] Update specs/altair/beacon-chain.md --- specs/altair/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index d6a942771b..33ef9a12b3 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -580,7 +580,7 @@ def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None all_pubkeys = [v.pubkey for v in state.validators] committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys] for index, participation_bit in zip(committee_indices, aggregate.sync_committee_bits): - if participant_bit: + if participation_bit: increase_balance(state, participant_index, participant_reward) increase_balance(state, get_beacon_proposer_index(state), proposer_reward) else: From 4d5bc83af429433a8ddd061ae27beca64f761f40 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Thu, 27 May 2021 11:59:02 -0700 Subject: [PATCH 27/82] Re-org files --- specs/altair/beacon-chain.md | 44 ++---------------------- specs/altair/bls.md | 65 ++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 41 deletions(-) create mode 100644 specs/altair/bls.md diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index cd877161cb..2ac1d18891 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -27,9 +27,6 @@ - [`SyncCommittee`](#synccommittee) - [Helper functions](#helper-functions) - [Crypto](#crypto) - - [BLS public keys](#bls-public-keys) - - [`Predicates`](#predicates) - - [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify) - [Misc](#misc-1) - [`add_flag`](#add_flag) - [`has_flag`](#has_flag) @@ -109,7 +106,6 @@ Altair is the first beacon chain hard fork. Its main features are: | Name | Value | | - | - | -| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` | | `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]` | ## Preset @@ -223,45 +219,11 @@ class SyncCommittee(Container): ## Helper functions - ### Crypto -#### BLS public keys - -An additional function `AggregatePKs` is defined to extend the -[IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) -spec referenced in the phase 0 document. - -```python -def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: - """ - Return the aggregate public key for the public keys in ``pubkeys``. - - NOTE: the ``+`` operation should be interpreted as elliptic curve point addition, which takes as input - elliptic curve points that must be decoded from the input ``BLSPubkey``s. - This implementation is for demonstrative purposes only and ignores encoding/decoding concerns. - Refer to the BLS signature draft standard for more information. - """ - assert len(pubkeys) > 0 - result = copy(pubkeys[0]) - for pubkey in pubkeys[1:]: - result += pubkey - return result -``` - -### `Predicates` - -#### `eth2_fast_aggregate_verify` - -```python -def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: - """ - Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. - """ - if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: - return True - return bls.FastAggregateVerify(pubkeys, message, signature) -``` +Refer to the definitions in the [phase 0 document regarding BLS signatures](../phase0/beacon-chain.md#bls-signatures) +and the extensions defined in the [Altair BLS document](./bls.md). This specification assumes knowledge of +the functionality described in those documents. ### Misc diff --git a/specs/altair/bls.md b/specs/altair/bls.md new file mode 100644 index 0000000000..5292360561 --- /dev/null +++ b/specs/altair/bls.md @@ -0,0 +1,65 @@ +# Ethereum 2.0 Altair BLS extensions + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Constants](#constants) +- [Extensions](#extensions) + - [`eth2_aggregate_pubkeys`](#eth2_aggregate_pubkeys) + - [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify) + + + + +## Introduction + +A number of extensions are defined to handle BLS signatures in the Altair upgrade. + +Knowledge of the [phase 0 specification](../phase0/beacon-chain.md) is assumed, including type definitions. + +## Constants + +| Name | Value | +| - | - | +| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` | + +## Extensions + +### `eth2_aggregate_pubkeys` + +An additional function `AggregatePKs` is defined to extend the +[IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) +spec referenced in the phase 0 document. + +```python +def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey: + """ + Return the aggregate public key for the public keys in ``pubkeys``. + + NOTE: the ``+`` operation should be interpreted as elliptic curve point addition, which takes as input + elliptic curve points that must be decoded from the input ``BLSPubkey``s. + This implementation is for demonstrative purposes only and ignores encoding/decoding concerns. + Refer to the BLS signature draft standard for more information. + """ + assert len(pubkeys) > 0 + result = copy(pubkeys[0]) + for pubkey in pubkeys[1:]: + result += pubkey + return result +``` + +### `eth2_fast_aggregate_verify` + +```python +def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool: + """ + Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty. + """ + if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY: + return True + return bls.FastAggregateVerify(pubkeys, message, signature) +``` From 7a14e93c655da9c00669af9cbb37efef4326cea8 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Thu, 27 May 2021 12:02:44 -0700 Subject: [PATCH 28/82] PR feedback --- setup.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index d840c34371..5384b1a704 100644 --- a/setup.py +++ b/setup.py @@ -312,8 +312,9 @@ def invariant_checks(cls) -> str: raise NotImplementedError() @classmethod + @abstractmethod def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: - return functions + raise NotImplementedError() @classmethod @abstractmethod @@ -439,6 +440,10 @@ def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]: def invariant_checks(cls) -> str: return '' + @classmethod + def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: + return functions + @classmethod def build_spec(cls, preset_name: str, source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str: From b0500c3de1b21a3f571c1f4c6a23a6a38e76b74e Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Thu, 27 May 2021 12:06:01 -0700 Subject: [PATCH 29/82] Update specs/altair/beacon-chain.md --- specs/altair/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 33ef9a12b3..41b86f3ac7 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -579,7 +579,7 @@ def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None # Apply participant and proposer rewards all_pubkeys = [v.pubkey for v in state.validators] committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys] - for index, participation_bit in zip(committee_indices, aggregate.sync_committee_bits): + for participant_index, participation_bit in zip(committee_indices, aggregate.sync_committee_bits): if participation_bit: increase_balance(state, participant_index, participant_reward) increase_balance(state, get_beacon_proposer_index(state), proposer_reward) From 6ecbc5e357a39b830ed71d8e5848c6bee1b1de9b Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Thu, 27 May 2021 12:21:04 -0700 Subject: [PATCH 30/82] Add new file to spec infra --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 5384b1a704..81a07f20f6 100644 --- a/setup.py +++ b/setup.py @@ -500,7 +500,8 @@ def invariant_checks(cls) -> str: @classmethod def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]: if "eth2_aggregate_pubkeys" in functions: - return {**functions, **{"eth2_aggregate_pubkeys": OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip()}} + functions["eth2_aggregate_pubkeys"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip() + return super().implement_optimizations(functions) # # MergeSpecBuilder @@ -873,6 +874,7 @@ def finalize_options(self): specs/phase0/validator.md specs/phase0/weak-subjectivity.md specs/altair/beacon-chain.md + specs/altair/bls.md specs/altair/fork.md specs/altair/validator.md specs/altair/p2p-interface.md From d11586122fea6ba00b67392191b9344a474c3749 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 28 May 2021 01:02:08 +0200 Subject: [PATCH 31/82] update remerkleable, union support --- setup.py | 2 +- tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 99e501d9f3..cd2006f715 100644 --- a/setup.py +++ b/setup.py @@ -1017,7 +1017,7 @@ def run(self): "py_ecc==5.2.0", "milagro_bls_binding==1.6.3", "dataclasses==0.6", - "remerkleable==0.1.19", + "remerkleable==0.1.20", RUAMEL_YAML_VERSION, "lru-dict==1.1.6", MARKO_VERSION, diff --git a/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py b/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py index 9b18f8bdae..5a1b61d0be 100644 --- a/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py +++ b/tests/core/pyspec/eth2spec/utils/ssz/ssz_typing.py @@ -2,6 +2,7 @@ # Ignore linter: This module makes importing SSZ types easy, and hides away the underlying library from the spec. from remerkleable.complex import Container, Vector, List +from remerkleable.union import Union from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256 from remerkleable.bitfields import Bitvector, Bitlist from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList From e5521af1f8186d2a624948bdbaa753181695e92e Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 28 May 2021 20:18:29 +0200 Subject: [PATCH 32/82] new union-based shard headers/commitments representation --- specs/sharding/beacon-chain.md | 251 +++++++++++++++------------------ 1 file changed, 117 insertions(+), 134 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 5522e044de..38007e478c 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -13,6 +13,7 @@ - [Constants](#constants) - [Misc](#misc) - [Domain types](#domain-types) + - [Shard Header Status](#shard-header-status) - [Preset](#preset) - [Misc](#misc-1) - [Shard block samples](#shard-block-samples) @@ -99,6 +100,14 @@ The following values are (non-configurable) constants used throughout the specif | `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` | | `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | +### Shard Header Status + +| Name | Value | Notes | +| - | - | - | +| `UNCONFIRMED_SHARD_DATA` | `0` | Unconfirmed, nullified after confirmation time elapses | +| `CONFIRMED_SHARD_DATA` | `1` | Confirmed, reduced to just the commitment | +| `PENDING_SHARD_DATA` | `2` | Pending, a list of competing headers | + ## Preset ### Misc @@ -109,6 +118,7 @@ The following values are (non-configurable) constants used throughout the specif | `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | | `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | +| `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (=256) | Number of slots for which shard commitments and confirmation status is directly available in the state | ### Shard block samples @@ -169,13 +179,21 @@ class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body] ```python class BeaconState(merge.BeaconState): # [extends The Merge state] - # [Updated fields] + # [Updated fields] (Warning: this changes with Altair, Sharding will rebase to use participation-flags) previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # [New fields] - previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH] - current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH] - grandparent_epoch_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS] + # A ring buffer of the latest shard headers per slot. Upon confirmation the data is reduced to just the header. + shard_headers: Vector[ + List[ + Union[ # See Shard Header Status enum + None, # UNCONFIRMED_SHARD_DATA + DataCommitment, # CONFIRMED_SHARD_DATA + List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # PENDING_SHARD_DATA + ], + MAX_SHARDS + ], + SHARD_STATE_MEMORY_SLOTS] shard_gasprice: uint64 current_epoch_start_shard: Shard ``` @@ -233,17 +251,12 @@ class SignedShardBlobHeader(Container): ```python class PendingShardHeader(Container): - # Slot and shard that this header is intended for - slot: Slot - shard: Shard # KZG10 commitment to the data commitment: DataCommitment # hash_tree_root of the ShardHeader (stored so that attestations can be checked against it) root: Root # Who voted for the header votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] - # Has this header been confirmed? - confirmed: boolean ``` ### `ShardBlobReference` @@ -346,7 +359,7 @@ def get_active_shard_count(state: BeaconState, epoch: Epoch) -> uint64: Return the number of active shards. Note that this puts an upper bound on the number of committees per slot. """ - return INITIAL_ACTIVE_SHARDS + return INITIAL_ACTIVE_SHARDS # TODO: use shard_headers from state instead? ``` #### `get_shard_committee` @@ -511,53 +524,46 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ```python def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: - # Find and update the PendingShardHeader object, invalid block if pending header not in state - if compute_epoch_at_slot(attestation.data.slot) == get_current_epoch(state): - pending_headers = state.current_epoch_pending_shard_headers - else: - pending_headers = state.previous_epoch_pending_shard_headers - attestation_shard = compute_shard_from_committee_index( state, attestation.data.slot, attestation.data.index, ) - pending_header = None - for header in pending_headers: - if ( - header.root == attestation.data.shard_header_root - and header.slot == attestation.data.slot - and header.shard == attestation_shard - ): - pending_header = header - assert pending_header is not None - - for i in range(len(pending_header.votes)): - pending_header.votes[i] = pending_header.votes[i] or attestation.aggregation_bits[i] - - # Check if the PendingShardHeader is eligible for expedited confirmation - # Requirement 1: nothing else confirmed - all_candidates = [ - c for c in pending_headers if - (c.slot, c.shard) == (pending_header.slot, pending_header.shard) - ] - if True in [c.confirmed for c in all_candidates]: + buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS + shard_header_status = state.shard_headers[buffer_index][attestation_shard] + + # Skip attestation vote accounting if the header is already confirmed + if shard_header_status.selector == CONFIRMED_SHARD_DATA: return - # Requirement 2: >= 2/3 of balance attesting + assert shard_header_status.selector == PENDING_SHARD_DATA + current_headers: Sequence[PendingShardHeader] = shard_header_status.value + + # Find the corresponding header, abort if it cannot be found + header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root) + + # Update votes bitfield in the state + pending_header = state.shard_headers[buffer_index][attestation_shard][header_index] + for i, bit in enumerate(attestation.aggregation_bits): + if bit: + pending_header.votes[i] = True + + # Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting participants = get_attesting_indices(state, attestation.data, pending_header.votes) participants_balance = get_total_balance(state, participants) full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) full_committee_balance = get_total_balance(state, set(full_committee)) if participants_balance * 3 >= full_committee_balance * 2: - pending_header.confirmed = True + state.shard_headers[buffer_index][attestation_shard].change( + selector=CONFIRMED_SHARD_DATA, + value=pending_header.commitment, + ) ``` #### `process_shard_header` ```python -def process_shard_header(state: BeaconState, - signed_header: SignedShardBlobHeader) -> None: +def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None: header = signed_header.message # Verify the header is not 0, and not from the future. assert Slot(0) < header.slot <= state.slot @@ -569,6 +575,15 @@ def process_shard_header(state: BeaconState, # Verify that the block root matches, # to ensure the header will only be included in this specific Beacon Chain sub-tree. assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1) + + # Check that this data is still pending + shard_header_status = state.shard_headers[header.slot % SHARD_STATE_MEMORY_SLOTS][header.slot] + assert shard_header_status.selector == PENDING_SHARD_DATA + + # Check that this header is not yet in the pending list + current_headers: Sequence[PendingShardHeader] = shard_header_status.value + assert header_root not in [pending_header.root for pending_header in current_headers] + # Verify proposer assert header.proposer_index == get_shard_proposer_index(state, header.slot, header.shard) # Verify signature @@ -584,27 +599,18 @@ def process_shard_header(state: BeaconState, == bls.Pairing(body_summary.commitment.point, G2_SETUP[-body_summary.commitment.length]) ) - # Get the correct pending header list - if header_epoch == get_current_epoch(state): - pending_headers = state.current_epoch_pending_shard_headers - else: - pending_headers = state.previous_epoch_pending_shard_headers - - header_root = hash_tree_root(header) - # Check that this header is not yet in the pending list - assert header_root not in [pending_header.root for pending_header in pending_headers] - - # Include it in the pending list + # Initialize the pending header index = compute_committee_index_from_shard(state, header.slot, header.shard) committee_length = len(get_beacon_committee(state, header.slot, index)) - pending_headers.append(PendingShardHeader( - slot=header.slot, - shard=header.shard, + initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length) + pending_header = PendingShardHeader( commitment=body_summary.commitment, root=header_root, - votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), - confirmed=False, - )) + votes=initial_votes, + ) + + # Include it in the pending list + state.shard_headers[header.slot % SHARD_STATE_MEMORY_SLOTS][header.slot].append(pending_header) ``` The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), @@ -679,105 +685,82 @@ def process_pending_headers(state: BeaconState) -> None: previous_epoch = get_previous_epoch(state) previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch) + + # Mark stale headers as unconfirmed for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): - for shard_index in range(get_active_shard_count(state, previous_epoch)): - shard = Shard(shard_index) - # Pending headers for this (slot, shard) combo - candidates = [ - c for c in state.previous_epoch_pending_shard_headers - if (c.slot, c.shard) == (slot, shard) - ] - # If any candidates already confirmed, skip - if True in [c.confirmed for c in candidates]: - continue - - # The entire committee (and its balance) - index = compute_committee_index_from_shard(state, slot, shard) - full_committee = get_beacon_committee(state, slot, index) - # The set of voters who voted for each header (and their total balances) - voting_sets = [ - set(v for i, v in enumerate(full_committee) if c.votes[i]) - for c in candidates - ] - voting_balances = [ - get_total_balance(state, voters) - for voters in voting_sets - ] - # Get the index with the most total balance voting for them. - # NOTE: if two choices get exactly the same voting balance, - # the candidate earlier in the list wins - if max(voting_balances) > 0: - winning_index = voting_balances.index(max(voting_balances)) - else: - # If no votes, zero wins - winning_index = [c.root for c in candidates].index(Root()) - candidates[winning_index].confirmed = True - for slot_index in range(SLOTS_PER_EPOCH): - for shard in range(MAX_SHARDS): - state.grandparent_epoch_confirmed_commitments[shard][slot_index] = DataCommitment() - confirmed_headers = [candidate for candidate in state.previous_epoch_pending_shard_headers if candidate.confirmed] - for header in confirmed_headers: - state.grandparent_epoch_confirmed_commitments[header.shard][header.slot % SLOTS_PER_EPOCH] = header.commitment + buffer_index = slot % SHARD_STATE_MEMORY_SLOTS + for shard_index in range(len(state.shard_headers[buffer_index])): + if state.shard_headers[buffer_index][shard_index].selector == PENDING_SHARD_DATA: + state.shard_headers[buffer_index][shard_index].change(selector=UNCONFIRMED_SHARD_DATA, value=None) ``` ```python def charge_confirmed_header_fees(state: BeaconState) -> None: new_gasprice = state.shard_gasprice previous_epoch = get_previous_epoch(state) + previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch) adjustment_quotient = ( get_active_shard_count(state, previous_epoch) * SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT ) - previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch) + # Iterate through confirmed shard-headers for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): - for shard_index in range(get_active_shard_count(state, previous_epoch)): - shard = Shard(shard_index) - confirmed_candidates = [ - c for c in state.previous_epoch_pending_shard_headers - if (c.slot, c.shard, c.confirmed) == (slot, shard, True) - ] - if not any(confirmed_candidates): - continue - candidate = confirmed_candidates[0] - - # Charge EIP 1559 fee - proposer = get_shard_proposer_index(state, slot, shard) - fee = ( - (state.shard_gasprice * candidate.commitment.length) - // TARGET_SAMPLES_PER_BLOCK - ) - decrease_balance(state, proposer, fee) - - # Track updated gas price - new_gasprice = compute_updated_gasprice( - new_gasprice, - candidate.commitment.length, - adjustment_quotient, - ) + buffer_index = slot % SHARD_STATE_MEMORY_SLOTS + for shard_index in range(len(state.shard_headers[buffer_index])): + shard_header_status = state.shard_headers[buffer_index][shard_index] + if shard_header_status.selector == CONFIRMED_SHARD_DATA: + # Charge EIP 1559 fee + proposer = get_shard_proposer_index(state, slot, Shard(shard_index)) + fee = ( + (state.shard_gasprice * candidate.commitment.length) + // TARGET_SAMPLES_PER_BLOCK + ) + decrease_balance(state, proposer, fee) + + # Track updated gas price + new_gasprice = compute_updated_gasprice( + new_gasprice, + candidate.commitment.length, + adjustment_quotient, + ) state.shard_gasprice = new_gasprice ``` ```python def reset_pending_headers(state: BeaconState) -> None: - state.previous_epoch_pending_shard_headers = state.current_epoch_pending_shard_headers - state.current_epoch_pending_shard_headers = [] # Add dummy "empty" PendingShardHeader (default vote for if no shard header available) next_epoch = get_current_epoch(state) + 1 next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch) committees_per_slot = get_committee_count_per_slot(state, next_epoch) + active_shards = get_active_shard_count(state, next_epoch) + for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_PER_EPOCH): - for index in range(committees_per_slot): - committee_index = CommitteeIndex(index) - shard = compute_shard_from_committee_index(state, slot, committee_index) - committee_length = len(get_beacon_committee(state, slot, committee_index)) - state.current_epoch_pending_shard_headers.append(PendingShardHeader( - slot=slot, - shard=shard, - commitment=DataCommitment(), - root=Root(), - votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), - confirmed=False, - )) + buffer_index = slot % SHARD_STATE_MEMORY_SLOTS + if len(state.shard_headers[buffer_index]) < active_shards: + state.shard_headers[buffer_index].extend() + + start_shard = get_start_shard(state, slot) + for shard_index in range(state.shard_headers[buffer_index]): + if start_shard <= shard_index < start_shard + committees_per_slot: + # a committee is available, initialize a pending shard-header list + committee_index = CommitteeIndex(shard_index - start_shard) + committee_length = len(get_beacon_committee(state, slot, committee_index)) + state.shard_headers[buffer_index][shard_index].change( + selector=PENDING_SHARD_DATA, + value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD]( + PendingShardHeader( + commitment=DataCommitment(), + root=Root(), + votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), + ) + ) + ) + else: + # shard is inactive, no committee available. + state.shard_headers[buffer_index][shard_index].change( + selector=UNCONFIRMED_SHARD_DATA, + value=None, + ) ``` #### Shard epoch increment From 38a0f4f21144b5e56179eb39c2887de22089c928 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 28 May 2021 21:26:06 +0200 Subject: [PATCH 33/82] update to wrap the union, clean up initialization and typing --- specs/sharding/beacon-chain.md | 102 ++++++++++++++++++--------------- 1 file changed, 57 insertions(+), 45 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 38007e478c..ba3a5ab504 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -33,6 +33,7 @@ - [`ShardBlobReference`](#shardblobreference) - [`SignedShardBlobReference`](#signedshardblobreference) - [`ShardProposerSlashing`](#shardproposerslashing) + - [`ShardCommitteeWork`](#shardcommitteework) - [Helper functions](#helper-functions) - [Misc](#misc-2) - [`next_power_of_two`](#next_power_of_two) @@ -183,17 +184,8 @@ class BeaconState(merge.BeaconState): # [extends The Merge state] previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # [New fields] - # A ring buffer of the latest shard headers per slot. Upon confirmation the data is reduced to just the header. - shard_headers: Vector[ - List[ - Union[ # See Shard Header Status enum - None, # UNCONFIRMED_SHARD_DATA - DataCommitment, # CONFIRMED_SHARD_DATA - List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # PENDING_SHARD_DATA - ], - MAX_SHARDS - ], - SHARD_STATE_MEMORY_SLOTS] + # A ring buffer of the latest slots, with information per active shard. + shard_buffer: Vector[List[ShardCommitteeWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS] shard_gasprice: uint64 current_epoch_start_shard: Shard ``` @@ -288,6 +280,18 @@ class ShardProposerSlashing(Container): signed_reference_2: SignedShardBlobReference ``` +### `ShardCommitteeWork` + +```python +class ShardWork(Container): + # Upon confirmation the data is reduced to just the header. + status: Union[ # See Shard Header Status enum + None, # UNCONFIRMED_SHARD_DATA + DataCommitment, # CONFIRMED_SHARD_DATA + List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # PENDING_SHARD_DATA + ] +``` + ## Helper functions ### Misc @@ -359,7 +363,7 @@ def get_active_shard_count(state: BeaconState, epoch: Epoch) -> uint64: Return the number of active shards. Note that this puts an upper bound on the number of committees per slot. """ - return INITIAL_ACTIVE_SHARDS # TODO: use shard_headers from state instead? + return INITIAL_ACTIVE_SHARDS ``` #### `get_shard_committee` @@ -461,6 +465,7 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard: ```python def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Shard: active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot)) + assert index < active_shards return Shard((index + get_start_shard(state, slot)) % active_shards) ``` @@ -468,8 +473,11 @@ def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: Co ```python def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Shard) -> CommitteeIndex: - active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot)) - return CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards) + epoch = compute_epoch_at_slot(slot) + active_shards = get_active_shard_count(state, epoch) + index = CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards) + assert index >= get_committee_count_per_slot(state, epoch) + return index ``` @@ -530,20 +538,21 @@ def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: attestation.data.index, ) buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS - shard_header_status = state.shard_headers[buffer_index][attestation_shard] + committee_work = state.shard_buffer[buffer_index][attestation_shard] # Skip attestation vote accounting if the header is already confirmed - if shard_header_status.selector == CONFIRMED_SHARD_DATA: + if committee_work.status.selector == CONFIRMED_SHARD_DATA: return - assert shard_header_status.selector == PENDING_SHARD_DATA - current_headers: Sequence[PendingShardHeader] = shard_header_status.value + # Note that shard-slot combinations without an assigned committee do not have a pending state + assert shard_info.status.selector == PENDING_SHARD_DATA + current_headers: Sequence[PendingShardHeader] = committee_work.status.value # Find the corresponding header, abort if it cannot be found header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root) # Update votes bitfield in the state - pending_header = state.shard_headers[buffer_index][attestation_shard][header_index] + pending_header: PendingShardHeader = state.shard_buffer[buffer_index][attestation_shard][header_index] for i, bit in enumerate(attestation.aggregation_bits): if bit: pending_header.votes[i] = True @@ -554,10 +563,17 @@ def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) full_committee_balance = get_total_balance(state, set(full_committee)) if participants_balance * 3 >= full_committee_balance * 2: - state.shard_headers[buffer_index][attestation_shard].change( - selector=CONFIRMED_SHARD_DATA, - value=pending_header.commitment, - ) + if pending_header.commitment == DataCommitment(): + # The committee voted to not confirm anything + state.shard_buffer[buffer_index][attestation_shard].change( + selector=UNCONFIRMED_SHARD_DATA, + value=None, + ) + else: + state.shard_buffer[buffer_index][attestation_shard].change( + selector=CONFIRMED_SHARD_DATA, + value=pending_header.commitment, + ) ``` #### `process_shard_header` @@ -577,11 +593,11 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1) # Check that this data is still pending - shard_header_status = state.shard_headers[header.slot % SHARD_STATE_MEMORY_SLOTS][header.slot] - assert shard_header_status.selector == PENDING_SHARD_DATA + committee_work = state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.slot] + assert committee_work.status.selector == PENDING_SHARD_DATA # Check that this header is not yet in the pending list - current_headers: Sequence[PendingShardHeader] = shard_header_status.value + current_headers: Sequence[PendingShardHeader] = committee_work.status.value assert header_root not in [pending_header.root for pending_header in current_headers] # Verify proposer @@ -610,7 +626,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade ) # Include it in the pending list - state.shard_headers[header.slot % SHARD_STATE_MEMORY_SLOTS][header.slot].append(pending_header) + state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.slot].append(pending_header) ``` The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), @@ -689,9 +705,9 @@ def process_pending_headers(state: BeaconState) -> None: # Mark stale headers as unconfirmed for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): buffer_index = slot % SHARD_STATE_MEMORY_SLOTS - for shard_index in range(len(state.shard_headers[buffer_index])): - if state.shard_headers[buffer_index][shard_index].selector == PENDING_SHARD_DATA: - state.shard_headers[buffer_index][shard_index].change(selector=UNCONFIRMED_SHARD_DATA, value=None) + for shard_index in range(len(state.shard_buffer[buffer_index])): + if state.shard_buffer[buffer_index][shard_index].selector == PENDING_SHARD_DATA: + state.shard_buffer[buffer_index][shard_index].change(selector=UNCONFIRMED_SHARD_DATA, value=None) ``` ```python @@ -706,9 +722,9 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: # Iterate through confirmed shard-headers for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): buffer_index = slot % SHARD_STATE_MEMORY_SLOTS - for shard_index in range(len(state.shard_headers[buffer_index])): - shard_header_status = state.shard_headers[buffer_index][shard_index] - if shard_header_status.selector == CONFIRMED_SHARD_DATA: + for shard_index in range(len(state.shard_buffer[buffer_index])): + committee_work = state.shard_buffer[buffer_index][shard_index] + if committee_work.status.selector == CONFIRMED_SHARD_DATA: # Charge EIP 1559 fee proposer = get_shard_proposer_index(state, slot, Shard(shard_index)) fee = ( @@ -728,24 +744,25 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: ```python def reset_pending_headers(state: BeaconState) -> None: - # Add dummy "empty" PendingShardHeader (default vote for if no shard header available) + # Add dummy "empty" PendingShardHeader (default vote if no shard header is available) next_epoch = get_current_epoch(state) + 1 next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch) committees_per_slot = get_committee_count_per_slot(state, next_epoch) active_shards = get_active_shard_count(state, next_epoch) - + for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_PER_EPOCH): buffer_index = slot % SHARD_STATE_MEMORY_SLOTS - if len(state.shard_headers[buffer_index]) < active_shards: - state.shard_headers[buffer_index].extend() + + # Reset the shard work tracking + state.shard_buffer[buffer_index] = [ShardCommitteeWork() for _ in range(active_shards)] start_shard = get_start_shard(state, slot) - for shard_index in range(state.shard_headers[buffer_index]): + for shard_index in range(state.shard_buffer[buffer_index]): if start_shard <= shard_index < start_shard + committees_per_slot: # a committee is available, initialize a pending shard-header list committee_index = CommitteeIndex(shard_index - start_shard) committee_length = len(get_beacon_committee(state, slot, committee_index)) - state.shard_headers[buffer_index][shard_index].change( + state.shard_buffer[buffer_index][shard_index].change( selector=PENDING_SHARD_DATA, value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD]( PendingShardHeader( @@ -755,12 +772,7 @@ def reset_pending_headers(state: BeaconState) -> None: ) ) ) - else: - # shard is inactive, no committee available. - state.shard_headers[buffer_index][shard_index].change( - selector=UNCONFIRMED_SHARD_DATA, - value=None, - ) + # the shard is inactive for this slot otherwise, no committee available, default to UNCONFIRMED_SHARD_DATA. ``` #### Shard epoch increment From ef9b7125c2efe9d786d12501422bbfad357637b4 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 28 May 2021 12:27:19 -0700 Subject: [PATCH 34/82] whitespace --- ssz/simple-serialize.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ssz/simple-serialize.md b/ssz/simple-serialize.md index 89a1ebc0b8..2ae8d9bddf 100644 --- a/ssz/simple-serialize.md +++ b/ssz/simple-serialize.md @@ -175,7 +175,7 @@ return b"".join(fixed_parts + variable_parts) A `value` as `Union[T...]` type has properties `value.value` with the contained value, and `value.selector` which indexes the selected `Union` type option `T`. A `Union`: -- May have multiple selectors with the same type. +- May have multiple selectors with the same type. - Should not use selectors above 127 (i.e. highest bit is set), these are reserved for backwards compatible extensions. - Must have at least 1 type option. - May have `None` as first type option, i.e. `selector == 0` @@ -194,7 +194,7 @@ else: ## Deserialization -Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. +Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to. Deserialization can be implemented using a recursive algorithm. The deserialization of basic objects is easy, and from there we can find a simple recursive algorithm for all fixed-size objects. For variable-size objects we have to do one of the following depending on what kind of object it is: From b763416a6b486b4fe0c82c9a9ceb07b0047d3034 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 28 May 2021 12:27:29 -0700 Subject: [PATCH 35/82] remove unnecessary defn of `null` --- ssz/simple-serialize.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/ssz/simple-serialize.md b/ssz/simple-serialize.md index 2ae8d9bddf..cc03cec09f 100644 --- a/ssz/simple-serialize.md +++ b/ssz/simple-serialize.md @@ -17,7 +17,6 @@ - [Serialization](#serialization) - [`uintN`](#uintn) - [`boolean`](#boolean) - - [`null`](#null) - [`Bitvector[N]`](#bitvectorn) - [`Bitlist[N]`](#bitlistn) - [Vectors, containers, lists](#vectors-containers-lists) @@ -123,12 +122,6 @@ assert value in (True, False) return b"\x01" if value is True else b"\x00" ``` -### `null` - -```python -return b"" -``` - ### `Bitvector[N]` ```python From 72215412fa1a89f5155e8b47e8ac479d21b2d686 Mon Sep 17 00:00:00 2001 From: protolambda Date: Fri, 28 May 2021 21:56:13 +0200 Subject: [PATCH 36/82] For slow shard confirmation, assuming it is available via DAS, we could confirm the best pending header --- specs/sharding/beacon-chain.md | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index ba3a5ab504..ba6da197f3 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -249,6 +249,8 @@ class PendingShardHeader(Container): root: Root # Who voted for the header votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] + # Sum of effective balances of votes + weight: Gwei ``` ### `ShardBlobReference` @@ -552,15 +554,18 @@ def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root) # Update votes bitfield in the state - pending_header: PendingShardHeader = state.shard_buffer[buffer_index][attestation_shard][header_index] + pending_header: PendingShardHeader = state.shard_buffer[buffer_index][attestation_shard][header_index] + full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) + participants_balance = Gwei(0) for i, bit in enumerate(attestation.aggregation_bits): + weight = state.validators[full_committee[i]].effective_balance if bit: + if not pending_header.votes[i]: + pending_header.weight += weight pending_header.votes[i] = True + participants_balance += weight # Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting - participants = get_attesting_indices(state, attestation.data, pending_header.votes) - participants_balance = get_total_balance(state, participants) - full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) full_committee_balance = get_total_balance(state, set(full_committee)) if participants_balance * 3 >= full_committee_balance * 2: if pending_header.commitment == DataCommitment(): @@ -623,6 +628,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade commitment=body_summary.commitment, root=header_root, votes=initial_votes, + weight=0, ) # Include it in the pending list @@ -706,8 +712,13 @@ def process_pending_headers(state: BeaconState) -> None: for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH): buffer_index = slot % SHARD_STATE_MEMORY_SLOTS for shard_index in range(len(state.shard_buffer[buffer_index])): - if state.shard_buffer[buffer_index][shard_index].selector == PENDING_SHARD_DATA: - state.shard_buffer[buffer_index][shard_index].change(selector=UNCONFIRMED_SHARD_DATA, value=None) + committee_work = state.shard_buffer[buffer_index][shard_index] + if committee_work.selector == PENDING_SHARD_DATA: + winning_header = max(committee_work.value, key=lambda header: header.weight) + if winning_header.commitment == DataCommitment(): + committee_work.change(selector=UNCONFIRMED_SHARD_DATA, value=None) + else: + committee_work.change(selector=CONFIRMED_SHARD_DATA, value=winning_header.commitment) ``` ```python @@ -769,6 +780,7 @@ def reset_pending_headers(state: BeaconState) -> None: commitment=DataCommitment(), root=Root(), votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), + weight=0, ) ) ) From fa09d896484bbe240334fa21ffaa454bafe5842e Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Fri, 28 May 2021 18:13:22 -0700 Subject: [PATCH 37/82] Update simple-serialize.md --- ssz/simple-serialize.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ssz/simple-serialize.md b/ssz/simple-serialize.md index cc03cec09f..4ef64f2f28 100644 --- a/ssz/simple-serialize.md +++ b/ssz/simple-serialize.md @@ -196,7 +196,7 @@ Deserialization can be implemented using a recursive algorithm. The deserializat * The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length) * Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects. * In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits. -* The first byte of the deserialization scope is deserialized as type selector, the remainder of the scope is deserialized as the selected type. +* In the case of unions, the first byte of the deserialization scope is deserialized as type selector, the remainder of the scope is deserialized as the selected type. Note that deserialization requires hardening against invalid inputs. A non-exhaustive list: From 3665dbea8b65d08da4953ac5fc26f7f39e77f746 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 29 May 2021 21:28:00 +0200 Subject: [PATCH 38/82] name (slot, shard) union wrapper ShardWork --- specs/sharding/beacon-chain.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index ba6da197f3..d5e43623ba 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -33,7 +33,7 @@ - [`ShardBlobReference`](#shardblobreference) - [`SignedShardBlobReference`](#signedshardblobreference) - [`ShardProposerSlashing`](#shardproposerslashing) - - [`ShardCommitteeWork`](#shardcommitteework) + - [`ShardWork`](#shardwork) - [Helper functions](#helper-functions) - [Misc](#misc-2) - [`next_power_of_two`](#next_power_of_two) @@ -185,7 +185,7 @@ class BeaconState(merge.BeaconState): # [extends The Merge state] current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH] # [New fields] # A ring buffer of the latest slots, with information per active shard. - shard_buffer: Vector[List[ShardCommitteeWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS] + shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS] shard_gasprice: uint64 current_epoch_start_shard: Shard ``` @@ -282,7 +282,7 @@ class ShardProposerSlashing(Container): signed_reference_2: SignedShardBlobReference ``` -### `ShardCommitteeWork` +### `ShardWork` ```python class ShardWork(Container): @@ -765,7 +765,7 @@ def reset_pending_headers(state: BeaconState) -> None: buffer_index = slot % SHARD_STATE_MEMORY_SLOTS # Reset the shard work tracking - state.shard_buffer[buffer_index] = [ShardCommitteeWork() for _ in range(active_shards)] + state.shard_buffer[buffer_index] = [ShardWork() for _ in range(active_shards)] start_shard = get_start_shard(state, slot) for shard_index in range(state.shard_buffer[buffer_index]): From 31f48b7b3bdc0c415a3cf50f3366402efb24d2f1 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 29 May 2021 22:39:25 +0200 Subject: [PATCH 39/82] update sharding presets --- presets/mainnet/sharding.yaml | 2 ++ presets/minimal/sharding.yaml | 2 ++ specs/sharding/beacon-chain.md | 2 +- 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/presets/mainnet/sharding.yaml b/presets/mainnet/sharding.yaml index 9a81c8cdc6..2b78855fc2 100644 --- a/presets/mainnet/sharding.yaml +++ b/presets/mainnet/sharding.yaml @@ -15,6 +15,8 @@ MAX_SHARD_PROPOSER_SLASHINGS: 16 # Shard block configs # --------------------------------------------------------------- MAX_SHARD_HEADERS_PER_SHARD: 4 +# 2**8 (= 256) +SHARD_STATE_MEMORY_SLOTS: 256 # 2**11 (= 2,048) MAX_SAMPLES_PER_BLOCK: 2048 # 2**10 (= 1,1024) diff --git a/presets/minimal/sharding.yaml b/presets/minimal/sharding.yaml index 7dedbc9260..10f79c96ea 100644 --- a/presets/minimal/sharding.yaml +++ b/presets/minimal/sharding.yaml @@ -15,6 +15,8 @@ MAX_SHARD_PROPOSER_SLASHINGS: 4 # Shard block configs # --------------------------------------------------------------- MAX_SHARD_HEADERS_PER_SHARD: 4 +# 2**8 (= 256) +SHARD_STATE_MEMORY_SLOTS: 256 # 2**11 (= 2,048) MAX_SAMPLES_PER_BLOCK: 2048 # 2**10 (= 1,1024) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index d5e43623ba..a562050bf4 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -119,7 +119,7 @@ The following values are (non-configurable) constants used throughout the specif | `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* | | `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block | | `MAX_SHARD_HEADERS_PER_SHARD` | `4` | | -| `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (=256) | Number of slots for which shard commitments and confirmation status is directly available in the state | +| `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state | ### Shard block samples From bb3d581b13170c74f5afa5661b21c1d920e83f21 Mon Sep 17 00:00:00 2001 From: protolambda Date: Sat, 29 May 2021 23:55:16 +0200 Subject: [PATCH 40/82] update shard processing naming and doc structure --- specs/sharding/beacon-chain.md | 48 +++++++++++++++++----------------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index a562050bf4..be4520a314 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -51,14 +51,14 @@ - [`compute_committee_index_from_shard`](#compute_committee_index_from_shard) - [Block processing](#block-processing) - [Operations](#operations) - - [New Attestation processing](#new-attestation-processing) - - [Updated `process_attestation`](#updated-process_attestation) - - [`update_pending_votes`](#update_pending_votes) - - [`process_shard_header`](#process_shard_header) - - [Shard Proposer slashings](#shard-proposer-slashings) + - [Extended Attestation processing](#extended-attestation-processing) + - [`process_shard_header`](#process_shard_header) + - [`process_shard_proposer_slashing`](#process_shard_proposer_slashing) - [Epoch transition](#epoch-transition) - - [Pending headers](#pending-headers) - - [Shard epoch increment](#shard-epoch-increment) + - [`process_pending_shard_confirmations`](#process_pending_shard_confirmations) + - [`charge_confirmed_shard_fees`](#charge_confirmed_shard_fees) + - [`reset_pending_shard_work`](#reset_pending_shard_work) + - [`process_shard_epoch_increment`](#process_shard_epoch_increment) @@ -520,20 +520,16 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.voluntary_exits, process_voluntary_exit) ``` -### New Attestation processing - -#### Updated `process_attestation` +##### Extended Attestation processing ```python def process_attestation(state: BeaconState, attestation: Attestation) -> None: phase0.process_attestation(state, attestation) - update_pending_votes(state, attestation) + update_pending_shard_work(state, attestation) ``` -#### `update_pending_votes` - ```python -def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: +def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> None: attestation_shard = compute_shard_from_committee_index( state, attestation.data.slot, @@ -581,7 +577,7 @@ def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: ) ``` -#### `process_shard_header` +##### `process_shard_header` ```python def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None: @@ -640,7 +636,7 @@ the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 where `MAX_DEGREE` is the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`. The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction). -##### Shard Proposer slashings +##### `process_shard_proposer_slashing` ```python def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None: @@ -680,9 +676,9 @@ def process_epoch(state: BeaconState) -> None: process_slashings(state) # Sharding - process_pending_headers(state) - charge_confirmed_header_fees(state) - reset_pending_headers(state) + process_pending_shard_confirmations(state) + charge_confirmed_shard_fees(state) + reset_pending_shard_work(state) # Final updates # Phase 0 @@ -696,10 +692,10 @@ def process_epoch(state: BeaconState) -> None: process_shard_epoch_increment(state) ``` -#### Pending headers +#### `process_pending_shard_confirmations` ```python -def process_pending_headers(state: BeaconState) -> None: +def process_pending_shard_confirmations(state: BeaconState) -> None: # Pending header processing applies to the previous epoch. # Skip if `GENESIS_EPOCH` because no prior epoch to process. if get_current_epoch(state) == GENESIS_EPOCH: @@ -721,8 +717,10 @@ def process_pending_headers(state: BeaconState) -> None: committee_work.change(selector=CONFIRMED_SHARD_DATA, value=winning_header.commitment) ``` +#### `charge_confirmed_shard_fees` + ```python -def charge_confirmed_header_fees(state: BeaconState) -> None: +def charge_confirmed_shard_fees(state: BeaconState) -> None: new_gasprice = state.shard_gasprice previous_epoch = get_previous_epoch(state) previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch) @@ -753,8 +751,10 @@ def charge_confirmed_header_fees(state: BeaconState) -> None: state.shard_gasprice = new_gasprice ``` +#### `reset_pending_shard_work` + ```python -def reset_pending_headers(state: BeaconState) -> None: +def reset_pending_shard_work(state: BeaconState) -> None: # Add dummy "empty" PendingShardHeader (default vote if no shard header is available) next_epoch = get_current_epoch(state) + 1 next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch) @@ -787,7 +787,7 @@ def reset_pending_headers(state: BeaconState) -> None: # the shard is inactive for this slot otherwise, no committee available, default to UNCONFIRMED_SHARD_DATA. ``` -#### Shard epoch increment +#### `process_shard_epoch_increment` ```python def process_shard_epoch_increment(state: BeaconState) -> None: From 8f005c18e59a749eca83aa16ff0b2a36fd4e13ea Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 31 May 2021 15:20:50 +0200 Subject: [PATCH 41/82] when an untyped var is not the last config var, it needs a comma --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index cd2006f715..6e6349b6b4 100644 --- a/setup.py +++ b/setup.py @@ -596,7 +596,7 @@ def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str def format_config_var(name: str, vardef: VariableDefinition) -> str: if vardef.type_name is None: - out = f'{name}={vardef.value}' + out = f'{name}={vardef.value},' else: out = f'{name}={vardef.type_name}({vardef.value}),' if vardef.comment is not None: From fd4369dc7ce4e6d1af912d1db5d6dfab0dddd3f9 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 31 May 2021 19:30:54 +0600 Subject: [PATCH 42/82] Add merge/fork.md with upgrade_to_merge definition --- README.md | 1 + setup.py | 1 + specs/merge/beacon-chain.md | 2 - specs/merge/fork.md | 89 +++++++++++++ .../eth2spec/test/helpers/merge/__init__.py | 0 .../eth2spec/test/helpers/merge/fork.py | 45 +++++++ .../eth2spec/test/merge/fork/__init__.py | 0 .../test/merge/fork/test_merge_fork_basic.py | 82 ++++++++++++ .../test/merge/fork/test_merge_fork_random.py | 120 ++++++++++++++++++ tests/formats/forks/README.md | 1 + 10 files changed, 339 insertions(+), 2 deletions(-) create mode 100644 specs/merge/fork.md create mode 100644 tests/core/pyspec/eth2spec/test/helpers/merge/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/helpers/merge/fork.py create mode 100644 tests/core/pyspec/eth2spec/test/merge/fork/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py create mode 100644 tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py diff --git a/README.md b/README.md index b74102c30d..a9a840bdef 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,7 @@ while the details are in review and may change. * [ethereum.org](https://ethereum.org) high-level description of the merge [here](https://ethereum.org/en/eth2/docking/) * Specifications: * [Beacon Chain changes](specs/merge/beacon-chain.md) + * [Merge fork](specs/merge/fork.md) * [Fork Choice changes](specs/merge/fork-choice.md) * [Validator additions](specs/merge/validator.md) diff --git a/setup.py b/setup.py index 6e6349b6b4..7466ec790d 100644 --- a/setup.py +++ b/setup.py @@ -865,6 +865,7 @@ def finalize_options(self): specs/phase0/validator.md specs/phase0/weak-subjectivity.md specs/merge/beacon-chain.md + specs/merge/fork.md specs/merge/fork-choice.md specs/merge/validator.md """ diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index ada6c1a258..c3ee81d767 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -70,8 +70,6 @@ Warning: this configuration is not definitive. | Name | Value | | - | - | -| `MERGE_FORK_VERSION` | `Version('0x02000000')` | -| `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | | `TRANSITION_TOTAL_DIFFICULTY` | **TBD** | ## Containers diff --git a/specs/merge/fork.md b/specs/merge/fork.md new file mode 100644 index 0000000000..ad6e3ad3bf --- /dev/null +++ b/specs/merge/fork.md @@ -0,0 +1,89 @@ +# Ethereum 2.0 The Merge + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + +- [Introduction](#introduction) +- [Configuration](#configuration) +- [Fork to Merge](#fork-to-merge) + - [Fork trigger](#fork-trigger) + - [Upgrading the state](#upgrading-the-state) + + + +## Introduction + +This document describes the process of the Merge upgrade. + +## Configuration + +Warning: this configuration is not definitive. + +| Name | Value | +| - | - | +| `MERGE_FORK_VERSION` | `Version('0x02000000')` | +| `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | + +## Fork to Merge + +### Fork trigger + +TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at epoch `MERGE_FORK_EPOCH`. + +Note that for the pure Merge networks, we don't apply `upgrade_to_merge` since it starts with Merge version logic. + +### Upgrading the state + +If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, an irregular state change is made to upgrade to Merge. + +The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `MERGE_FORK_EPOCH * SLOTS_PER_EPOCH`. +Care must be taken when transitioning through the fork boundary as implementations will need a modified [state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) that deviates from the Phase 0 document. +In particular, the outer `state_transition` function defined in the Phase 0 document will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`. + +```python +def upgrade_to_merge(pre: phase0.BeaconState) -> BeaconState: + epoch = phase0.get_current_epoch(pre) + post = BeaconState( + # Versioning + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + current_version=MERGE_FORK_VERSION, + epoch=epoch, + ), + # History + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + # Eth1 + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + # Registry + validators=pre.validators, + balances=pre.balances, + # Randomness + randao_mixes=pre.randao_mixes, + # Slashings + slashings=pre.slashings, + # Attestations + previous_epoch_attestations=pre.previous_epoch_attestations, + current_epoch_attestations=pre.current_epoch_attestations, + # Finality + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + # Execution-layer + latest_execution_payload_header=ExecutionPayloadHeader(), + ) + + return post +``` diff --git a/tests/core/pyspec/eth2spec/test/helpers/merge/__init__.py b/tests/core/pyspec/eth2spec/test/helpers/merge/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py b/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py new file mode 100644 index 0000000000..9b7f893663 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/helpers/merge/fork.py @@ -0,0 +1,45 @@ +MERGE_FORK_TEST_META_TAGS = { + 'fork': 'merge', +} + + +def run_fork_test(post_spec, pre_state): + # Clean up state to be more realistic + pre_state.current_epoch_attestations = [] + + yield 'pre', pre_state + + post_state = post_spec.upgrade_to_merge(pre_state) + + # Stable fields + stable_fields = [ + 'genesis_time', 'genesis_validators_root', 'slot', + # History + 'latest_block_header', 'block_roots', 'state_roots', 'historical_roots', + # Eth1 + 'eth1_data', 'eth1_data_votes', 'eth1_deposit_index', + # Registry + 'validators', 'balances', + # Randomness + 'randao_mixes', + # Slashings + 'slashings', + # Attestations + 'previous_epoch_attestations', 'current_epoch_attestations', + # Finality + 'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint', + ] + for field in stable_fields: + assert getattr(pre_state, field) == getattr(post_state, field) + + # Modified fields + modified_fields = ['fork'] + for field in modified_fields: + assert getattr(pre_state, field) != getattr(post_state, field) + + assert pre_state.fork.current_version == post_state.fork.previous_version + assert post_state.fork.current_version == post_spec.config.MERGE_FORK_VERSION + assert post_state.fork.epoch == post_spec.get_current_epoch(post_state) + assert post_state.latest_execution_payload_header == post_spec.ExecutionPayloadHeader() + + yield 'post', post_state diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/__init__.py b/tests/core/pyspec/eth2spec/test/merge/fork/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py new file mode 100644 index 0000000000..066a656a82 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_basic.py @@ -0,0 +1,82 @@ +from eth2spec.test.context import ( + with_phases, + with_custom_state, + with_presets, + spec_test, with_state, + low_balances, misc_balances, large_validator_set, +) +from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.constants import ( + PHASE0, MERGE, + MINIMAL, +) +from eth2spec.test.helpers.state import ( + next_epoch, + next_epoch_via_block, +) +from eth2spec.test.helpers.merge.fork import ( + MERGE_FORK_TEST_META_TAGS, + run_fork_test, +) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_base_state(spec, phases, state): + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_next_epoch(spec, phases, state): + next_epoch(spec, state) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_next_epoch_with_block(spec, phases, state): + next_epoch_via_block(spec, state) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_many_next_epoch(spec, phases, state): + for _ in range(3): + next_epoch(spec, state) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@spec_test +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_random_low_balances(spec, phases, state): + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@spec_test +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_random_misc_balances(spec, phases, state): + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_presets([MINIMAL], + reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") +@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@spec_test +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_fork_random_large_validator_set(spec, phases, state): + yield from run_fork_test(phases[MERGE], state) diff --git a/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py new file mode 100644 index 0000000000..d790acd3a4 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/merge/fork/test_merge_fork_random.py @@ -0,0 +1,120 @@ +from random import Random + +from eth2spec.test.context import ( + with_phases, + with_custom_state, + with_presets, + spec_test, with_state, + low_balances, misc_balances, large_validator_set, +) +from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.constants import ( + PHASE0, MERGE, + MINIMAL, +) +from eth2spec.test.helpers.merge.fork import ( + MERGE_FORK_TEST_META_TAGS, + run_fork_test, +) +from eth2spec.test.helpers.random import ( + randomize_state, + randomize_attestation_participation, +) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_0(spec, phases, state): + randomize_state(spec, state, rng=Random(1010)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_1(spec, phases, state): + randomize_state(spec, state, rng=Random(2020)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_2(spec, phases, state): + randomize_state(spec, state, rng=Random(3030)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_3(spec, phases, state): + randomize_state(spec, state, rng=Random(4040)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_duplicate_attestations(spec, phases, state): + randomize_state(spec, state, rng=Random(1111)) + # Note: `run_fork_test` empties `current_epoch_attestations` + state.previous_epoch_attestations = state.previous_epoch_attestations + state.previous_epoch_attestations + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_state +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_mismatched_attestations(spec, phases, state): + # Create a random state + randomize_state(spec, state, rng=Random(2222)) + + # Now make two copies + state_0 = state.copy() + state_1 = state.copy() + + # Randomize attestation participation of both + randomize_attestation_participation(spec, state_0, rng=Random(3333)) + randomize_attestation_participation(spec, state_1, rng=Random(4444)) + + # Note: `run_fork_test` empties `current_epoch_attestations` + # Use pending attestations from both random states in a single state for testing + state_0.previous_epoch_attestations = state_0.previous_epoch_attestations + state_1.previous_epoch_attestations + yield from run_fork_test(phases[MERGE], state_0) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_low_balances(spec, phases, state): + randomize_state(spec, state, rng=Random(5050)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@spec_test +@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_misc_balances(spec, phases, state): + randomize_state(spec, state, rng=Random(6060)) + yield from run_fork_test(phases[MERGE], state) + + +@with_phases(phases=[PHASE0], other_phases=[MERGE]) +@with_presets([MINIMAL], + reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") +@spec_test +@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(MERGE_FORK_TEST_META_TAGS) +def test_merge_fork_random_large_validator_set(spec, phases, state): + randomize_state(spec, state, rng=Random(7070)) + yield from run_fork_test(phases[MERGE], state) diff --git a/tests/formats/forks/README.md b/tests/formats/forks/README.md index 36ce942d78..1d3b18d0d8 100644 --- a/tests/formats/forks/README.md +++ b/tests/formats/forks/README.md @@ -23,6 +23,7 @@ Key of valid `fork` strings that might be found in `meta.yaml` | String ID | Pre-fork | Post-fork | Function | | - | - | - | - | | `altair` | Phase 0 | Altair | `upgrade_to_altair` | +| `merge` | Phase 0 | Merge | `upgrade_to_merge` | ### `pre.ssz_snappy` From 5a235d02516c270fa1703a4d78073df6d413d70f Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Mon, 31 May 2021 17:03:06 +0200 Subject: [PATCH 43/82] Review suggestions Co-authored-by: Anton Nashatyrev --- specs/sharding/beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index be4520a314..930d6a08ea 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -550,7 +550,7 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root) # Update votes bitfield in the state - pending_header: PendingShardHeader = state.shard_buffer[buffer_index][attestation_shard][header_index] + pending_header: PendingShardHeader = current_headers[header_index] full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) participants_balance = Gwei(0) for i, bit in enumerate(attestation.aggregation_bits): @@ -594,7 +594,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1) # Check that this data is still pending - committee_work = state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.slot] + committee_work = state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard] assert committee_work.status.selector == PENDING_SHARD_DATA # Check that this header is not yet in the pending list @@ -628,7 +628,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade ) # Include it in the pending list - state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.slot].append(pending_header) + state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard].append(pending_header) ``` The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values), From 2545c3e2d07b6d78b486e7232a6608b927f9e6da Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 31 May 2021 18:55:08 +0200 Subject: [PATCH 44/82] update pending header weights after epoch transition, fix committee index loop, fix header processing status assert, add todos for Altair-like shard attestation rewards --- specs/sharding/beacon-chain.md | 74 +++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 32 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 930d6a08ea..84ce948677 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -251,6 +251,8 @@ class PendingShardHeader(Container): votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] # Sum of effective balances of votes weight: Gwei + # When the header was last updated, as reference for weight accuracy + update_slot: Slot ``` ### `ShardBlobReference` @@ -538,32 +540,39 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS committee_work = state.shard_buffer[buffer_index][attestation_shard] - # Skip attestation vote accounting if the header is already confirmed - if committee_work.status.selector == CONFIRMED_SHARD_DATA: + # Skip attestation vote accounting if the header is not pending + if committee_work.status.selector != PENDING_SHARD_DATA: + # TODO In Altair: set participation bit flag, if attestation matches winning header. return - # Note that shard-slot combinations without an assigned committee do not have a pending state - assert shard_info.status.selector == PENDING_SHARD_DATA current_headers: Sequence[PendingShardHeader] = committee_work.status.value # Find the corresponding header, abort if it cannot be found header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root) - # Update votes bitfield in the state pending_header: PendingShardHeader = current_headers[header_index] full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) - participants_balance = Gwei(0) + full_committee_balance = Gwei(0) + + # The weight may be outdated if it is not the initial weight, and from a previous epoch + if pending_header.weight != 0 and compute_epoch_at_slot(pending_header.update_slot) < get_current_epoch(state): + pending_header.weight = sum(state.validators[index].effective_balance for index, bit + in zip(full_committee, pending_header.votes) if bit) + + pending_header.update_slot = state.slot + + # Update votes bitfield in the state, update weights for i, bit in enumerate(attestation.aggregation_bits): weight = state.validators[full_committee[i]].effective_balance + full_committee_balance += weight if bit: if not pending_header.votes[i]: pending_header.weight += weight - pending_header.votes[i] = True - participants_balance += weight + pending_header.votes[i] = True # Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting - full_committee_balance = get_total_balance(state, set(full_committee)) - if participants_balance * 3 >= full_committee_balance * 2: + if pending_header.weight * 3 >= full_committee_balance * 2: + # TODO In Altair: set participation bit flag for voters of this early winning header if pending_header.commitment == DataCommitment(): # The committee voted to not confirm anything state.shard_buffer[buffer_index][attestation_shard].change( @@ -625,6 +634,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade root=header_root, votes=initial_votes, weight=0, + update_slot=state.slot, ) # Include it in the pending list @@ -669,19 +679,18 @@ This epoch transition overrides the Merge epoch transition: ```python def process_epoch(state: BeaconState) -> None: - process_justification_and_finalization(state) - process_rewards_and_penalties(state) - process_registry_updates(state) - - process_slashings(state) - # Sharding process_pending_shard_confirmations(state) charge_confirmed_shard_fees(state) reset_pending_shard_work(state) + # Phase0 + process_justification_and_finalization(state) + process_rewards_and_penalties(state) + process_registry_updates(state) + process_slashings(state) + # Final updates - # Phase 0 process_eth1_data_reset(state) process_effective_balance_updates(state) process_slashings_reset(state) @@ -711,6 +720,7 @@ def process_pending_shard_confirmations(state: BeaconState) -> None: committee_work = state.shard_buffer[buffer_index][shard_index] if committee_work.selector == PENDING_SHARD_DATA: winning_header = max(committee_work.value, key=lambda header: header.weight) + # TODO In Altair: set participation bit flag of voters for winning header if winning_header.commitment == DataCommitment(): committee_work.change(selector=UNCONFIRMED_SHARD_DATA, value=None) else: @@ -768,23 +778,23 @@ def reset_pending_shard_work(state: BeaconState) -> None: state.shard_buffer[buffer_index] = [ShardWork() for _ in range(active_shards)] start_shard = get_start_shard(state, slot) - for shard_index in range(state.shard_buffer[buffer_index]): - if start_shard <= shard_index < start_shard + committees_per_slot: - # a committee is available, initialize a pending shard-header list - committee_index = CommitteeIndex(shard_index - start_shard) - committee_length = len(get_beacon_committee(state, slot, committee_index)) - state.shard_buffer[buffer_index][shard_index].change( - selector=PENDING_SHARD_DATA, - value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD]( - PendingShardHeader( - commitment=DataCommitment(), - root=Root(), - votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), - weight=0, - ) + for committee_index in range(committees_per_slot): + shard = (start_shard + committee_index) % active_shards + # a committee is available, initialize a pending shard-header list + committee_length = len(get_beacon_committee(state, slot, committee_index)) + state.shard_buffer[buffer_index][shard].change( + selector=PENDING_SHARD_DATA, + value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD]( + PendingShardHeader( + commitment=DataCommitment(), + root=Root(), + votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length), + weight=0, + update_slot=slot, ) ) - # the shard is inactive for this slot otherwise, no committee available, default to UNCONFIRMED_SHARD_DATA. + ) + # a shard without committee available defaults to UNCONFIRMED_SHARD_DATA. ``` #### `process_shard_epoch_increment` From 6226be9e1ee365d5de986324e96de6a8efb8e36a Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 1 Jun 2021 16:28:30 +0600 Subject: [PATCH 45/82] Implement TransitionStore and transition total difficulty computation --- configs/mainnet.yaml | 2 +- configs/minimal.yaml | 2 +- setup.py | 2 +- specs/merge/beacon-chain.md | 2 +- specs/merge/fork-choice.md | 55 ++++++++++++++++++++++++------------- specs/merge/fork.md | 13 +++++++++ specs/merge/validator.md | 12 ++------ 7 files changed, 56 insertions(+), 32 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 47b02aa8d9..80b8b6e7d6 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -32,7 +32,7 @@ SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 # TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 4294967296 +TRANSITION_TOTAL_DIFFICULTY_OFFSET: 4294967296 # Time parameters diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 1a04c4ecd2..8abbdb7fce 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -31,7 +31,7 @@ SHARDING_FORK_VERSION: 0x03000001 SHARDING_FORK_EPOCH: 18446744073709551615 # TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY: 4294967296 +TRANSITION_TOTAL_DIFFICULTY_OFFSET: 4294967296 # Time parameters diff --git a/setup.py b/setup.py index 7466ec790d..80e3d8d9f3 100644 --- a/setup.py +++ b/setup.py @@ -509,7 +509,7 @@ def sundry_functions(cls) -> str: def get_pow_block(hash: Bytes32) -> PowBlock: return PowBlock(block_hash=hash, is_valid=True, is_processed=True, - total_difficulty=config.TRANSITION_TOTAL_DIFFICULTY) + total_difficulty=uint256(0)) def get_execution_state(execution_state_root: Bytes32) -> ExecutionState: diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index c3ee81d767..621ebfddcb 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -70,7 +70,7 @@ Warning: this configuration is not definitive. | Name | Value | | - | - | -| `TRANSITION_TOTAL_DIFFICULTY` | **TBD** | +| `TRANSITION_TOTAL_DIFFICULTY_OFFSET` | **TBD** | ## Containers diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 9e6c341bc0..59dc8bad2a 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -12,13 +12,14 @@ - [`ExecutionEngine`](#executionengine) - [`set_head`](#set_head) - [`finalize_block`](#finalize_block) -- [Containers](#containers) - - [`PowBlock`](#powblock) -- [Helper functions](#helper-functions) - - [`get_pow_block`](#get_pow_block) - - [`is_valid_transition_block`](#is_valid_transition_block) +- [Helpers](#helpers) + - [`TransitionStore`](#transitionstore) + - [`PowBlock`](#powblock) + - [`get_pow_block`](#get_pow_block) + - [`get_transition_store`](#get_transition_store) + - [`is_valid_transition_block`](#is_valid_transition_block) - [Updated fork-choice handlers](#updated-fork-choice-handlers) - - [`on_block`](#on_block) + - [`on_block`](#on_block) @@ -66,44 +67,59 @@ def finalize_block(self: ExecutionEngine, block_hash: Hash32) -> bool: ... ``` -## Containers +## Helpers -#### `PowBlock` +### `TransitionStore` ```python -class PowBlock(Container): +@dataclass +class TransitionStore(object): + transition_total_difficulty: uint256 +``` + +### `PowBlock` + +```python +@dataclass +class PowBlock(object): block_hash: Hash32 is_processed: boolean is_valid: boolean total_difficulty: uint256 ``` -## Helper functions - -#### `get_pow_block` +### `get_pow_block` Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given the hash of the PoW block returns its data. *Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that haven't been processed yet. Either extending this existing method or implementing a new one is required. -#### `is_valid_transition_block` +### `get_transition_store` + +```python +def get_transition_store(anchor_pow_block: PowBlock): + transition_total_difficulty = pow_block.total_difficulty + TRANSITION_TOTAL_DIFFICULTY_OFFSET + return TransitionStore(transition_total_difficulty=transition_total_difficulty) +``` + +### `is_valid_transition_block` Used by fork-choice handler, `on_block`. ```python -def is_valid_transition_block(block: PowBlock) -> bool: - is_total_difficulty_reached = block.total_difficulty >= TRANSITION_TOTAL_DIFFICULTY +def is_valid_transition_block(transition_store: TransitionStore, block: PowBlock) -> bool: + is_total_difficulty_reached = block.total_difficulty >= transition_store.transition_total_difficulty return block.is_valid and is_total_difficulty_reached ``` ## Updated fork-choice handlers -#### `on_block` +### `on_block` *Note*: The only modification is the addition of the verification of transition block conditions. ```python -def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: +def on_block(store: Store, signed_block: SignedBeaconBlock, transition_store: TransitionStore=None) -> None: block = signed_block.message # Parent block must be known assert block.parent_root in store.block_states @@ -119,11 +135,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root # [New in Merge] - if is_transition_block(pre_state, block): + is_transition_store_initialized = transition_store is not None + if is_transition_store_initialized and is_transition_block(pre_state, block): # Delay consideration of block until PoW block is processed by the PoW node pow_block = get_pow_block(block.body.execution_payload.parent_hash) assert pow_block.is_processed - assert is_valid_transition_block(pow_block) + assert is_valid_transition_block(transition_store, pow_block) # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/merge/fork.md b/specs/merge/fork.md index ad6e3ad3bf..373b57cdc0 100644 --- a/specs/merge/fork.md +++ b/specs/merge/fork.md @@ -12,6 +12,7 @@ - [Fork to Merge](#fork-to-merge) - [Fork trigger](#fork-trigger) - [Upgrading the state](#upgrading-the-state) + - [Initializing transition store](#initializing-transition-store) @@ -87,3 +88,15 @@ def upgrade_to_merge(pre: phase0.BeaconState) -> BeaconState: return post ``` + +### Initializing transition store + +If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, a transition store is initialized to be further utilized by the transition process of the Merge. + +Transition store initialization occurs after the state has been modified by corresponding `upgrade_to_merge` function. + +```python +def initialize_transition_store(state: BeaconState) -> TransitionStore: + pow_block = get_pow_block(state.eth1_data.block_hash) + return get_transition_store(pow_block) +``` diff --git a/specs/merge/validator.md b/specs/merge/validator.md index c4c3960596..5b82030e0b 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -20,7 +20,6 @@ - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - [Execution Payload](#execution-payload) - [`get_pow_chain_head`](#get_pow_chain_head) - - [`produce_execution_payload`](#produce_execution_payload) @@ -68,18 +67,13 @@ All validator responsibilities remain unchanged other than those noted below. Na Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of the PoW chain. The body of the function is implementation specific. -###### `produce_execution_payload` - -Let `produce_execution_payload(parent_hash: Hash32, timestamp: uint64) -> ExecutionPayload` be the function that produces new instance of execution payload. -The `ExecutionEngine` protocol is used for the implementation specific part of execution payload proposals. - -* Set `block.body.execution_payload = get_execution_payload(state)` where: +* Set `block.body.execution_payload = get_execution_payload(state, transition_store, execution_engine)` where: ```python -def get_execution_payload(state: BeaconState, execution_engine: ExecutionEngine) -> ExecutionPayload: +def get_execution_payload(state: BeaconState, transition_store: TransitionStore, execution_engine: ExecutionEngine) -> ExecutionPayload: if not is_transition_completed(state): pow_block = get_pow_chain_head() - if not is_valid_transition_block(pow_block): + if not is_valid_transition_block(transition_store, pow_block): # Pre-merge, empty payload return ExecutionPayload() else: From db32c8b0131d913c9924bcb9e28dde8486206c6e Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 1 Jun 2021 17:44:58 +0600 Subject: [PATCH 46/82] Fix linter errors --- specs/merge/fork-choice.md | 2 +- specs/merge/validator.md | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 59dc8bad2a..b2e43d94be 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -98,7 +98,7 @@ Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given t ```python def get_transition_store(anchor_pow_block: PowBlock): - transition_total_difficulty = pow_block.total_difficulty + TRANSITION_TOTAL_DIFFICULTY_OFFSET + transition_total_difficulty = anchor_pow_block.total_difficulty + TRANSITION_TOTAL_DIFFICULTY_OFFSET return TransitionStore(transition_total_difficulty=transition_total_difficulty) ``` diff --git a/specs/merge/validator.md b/specs/merge/validator.md index 5b82030e0b..89b7a3c942 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -70,7 +70,9 @@ Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of * Set `block.body.execution_payload = get_execution_payload(state, transition_store, execution_engine)` where: ```python -def get_execution_payload(state: BeaconState, transition_store: TransitionStore, execution_engine: ExecutionEngine) -> ExecutionPayload: +def get_execution_payload(state: BeaconState, + transition_store: TransitionStore, + execution_engine: ExecutionEngine) -> ExecutionPayload: if not is_transition_completed(state): pow_block = get_pow_chain_head() if not is_valid_transition_block(transition_store, pow_block): From b298d4d63d0ff61dbe87d683223cea059e056bb6 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 1 Jun 2021 22:09:31 +0800 Subject: [PATCH 47/82] Update mainnet preset file --- presets/mainnet/altair.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/presets/mainnet/altair.yaml b/presets/mainnet/altair.yaml index 9f0ad9b4ce..9a17b78032 100644 --- a/presets/mainnet/altair.yaml +++ b/presets/mainnet/altair.yaml @@ -14,8 +14,8 @@ PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2 # --------------------------------------------------------------- # 2**9 (= 512) SYNC_COMMITTEE_SIZE: 512 -# 2**9 (= 512) -EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 512 +# 2**8 (= 256) +EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256 # Sync protocol From d87e076ce3e73da00531893ed22c73ca25d3b7f7 Mon Sep 17 00:00:00 2001 From: Justin Date: Tue, 1 Jun 2021 17:05:12 +0100 Subject: [PATCH 48/82] Minor Altair cosmetic polishing --- specs/altair/beacon-chain.md | 41 ++++++++++++------------------------ 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 7412a8490c..6597751daf 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -137,8 +137,8 @@ This patch updates a few configuration values to move penalty parameters closer | Name | Value | Description | | - | - | - | -| `INACTIVITY_SCORE_BIAS` | `uint64(4)` | score points per inactive epoch | -| `INACTIVITY_SCORE_RECOVERY_RATE` | `uint64(16)` | score points per recovering epoch | +| `INACTIVITY_SCORE_BIAS` | `uint64(2**2)` (= 4) | score points per inactive epoch | +| `INACTIVITY_SCORE_RECOVERY_RATE` | `uint64(2**4)` (= 16) | score points per leak-free epoch | ## Containers @@ -157,8 +157,7 @@ class BeaconBlockBody(Container): attestations: List[Attestation, MAX_ATTESTATIONS] deposits: List[Deposit, MAX_DEPOSITS] voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] - # [New in Altair] - sync_aggregate: SyncAggregate + sync_aggregate: SyncAggregate # [New in Altair] ``` #### `BeaconState` @@ -266,10 +265,7 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool: ```python def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]: """ - Return the sequence of sync committee indices (which may include duplicate indices) - for the next sync committee, given a ``state`` at a sync committee period boundary. - - Note: Committee can contain duplicate indices for small validator sets (< SYNC_COMMITTEE_SIZE + 128) + Return the sync committee indices, with possible duplicates, for the next sync committee. """ epoch = Epoch(get_current_epoch(state) + 1) @@ -292,21 +288,12 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd #### `get_next_sync_committee` +*Note*: The function `get_next_sync_committee` should only be called at sync committee period boundaries. + ```python def get_next_sync_committee(state: BeaconState) -> SyncCommittee: """ - Return the *next* sync committee for a given ``state``. - - ``SyncCommittee`` contains an aggregate pubkey that enables - resource-constrained clients to save some computation when verifying - the sync committee's signature. - - ``SyncCommittee`` can also contain duplicate pubkeys, when ``get_next_sync_committee_indices`` - returns duplicate indices. Implementations must take care when handling - optimizations relating to aggregation and verification in the presence of duplicates. - - Note: This function should only be called at sync committee period boundaries by ``process_sync_committee_updates`` - as ``get_next_sync_committee_indices`` is not stable within a given period. + Return the next sync committee, with possible pubkey duplicates. """ indices = get_next_sync_committee_indices(state) pubkeys = [state.validators[index].pubkey for index in indices] @@ -325,14 +312,12 @@ def get_base_reward_per_increment(state: BeaconState) -> Gwei: *Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH` and the use of increment based accounting. +*Note*: On average an optimally performing validator earns one base reward per epoch. + ```python def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: """ Return the base reward for the validator defined by ``index`` with respect to the current ``state``. - - Note: An optimally performing validator can earn one base reward per epoch over a long time horizon. - This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal - and sync committees). """ increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT return Gwei(increments * get_base_reward_per_increment(state)) @@ -559,6 +544,8 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: #### Sync committee processing +*Note*: The function `process_sync_committee` is new. + ```python def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None: # Verify sync committee aggregate signature signing over the previous slot block root @@ -627,17 +614,17 @@ def process_justification_and_finalization(state: BeaconState) -> None: ```python def process_inactivity_updates(state: BeaconState) -> None: - # Score updates based on previous epoch participation, skip genesis epoch + # Skip the genesis epoch as score updates are based on the previous epoch participation if get_current_epoch(state) == GENESIS_EPOCH: return for index in get_eligible_validator_indices(state): - # Increase inactivity score of inactive validators + # Increase the inactivity score of inactive validators if index in get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)): state.inactivity_scores[index] -= min(1, state.inactivity_scores[index]) else: state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS - # Decrease the score of all validators for forgiveness when not during a leak + # Decrease the inactivity score of all eligible validators during a leak-free epoch if not is_in_inactivity_leak(state): state.inactivity_scores[index] -= min(INACTIVITY_SCORE_RECOVERY_RATE, state.inactivity_scores[index]) ``` From b8d426b0286ca0049c50b50ec161be4729c165de Mon Sep 17 00:00:00 2001 From: Potuz Date: Mon, 31 May 2021 10:21:57 -0300 Subject: [PATCH 49/82] Align Sync Committee Tests to Specs --- .../test_process_sync_committee.py | 44 +++++-------------- 1 file changed, 10 insertions(+), 34 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py index ff388ff379..c42d3b3f56 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py @@ -115,52 +115,28 @@ def test_invalid_signature_extra_participant(spec, state): yield from run_sync_committee_processing(spec, state, block, expect_exception=True) -def compute_sync_committee_inclusion_reward(spec, - state, - participant_index, - committee_indices, - committee_bits): +def compute_sync_committee_inclusion_reward(spec, state): total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments) - max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR) - included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit] - max_slot_rewards = spec.Gwei( - max_epoch_rewards * len(included_indices) - // len(committee_indices) // spec.SLOTS_PER_EPOCH - ) - - # Compute the participant and proposer sync rewards - committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices]) - committee_effective_balance = max(spec.EFFECTIVE_BALANCE_INCREMENT, committee_effective_balance) - effective_balance = state.validators[participant_index].effective_balance - return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance) + max_participant_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // \ + spec.WEIGHT_DENOMINATOR // spec.SLOTS_PER_EPOCH) + return spec.Gwei(max_participant_rewards // spec.SYNC_COMMITTEE_SIZE) def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits): included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit] multiplicities = Counter(included_indices) - inclusion_reward = compute_sync_committee_inclusion_reward( - spec, state, participant_index, committee_indices, committee_bits, - ) + inclusion_reward = compute_sync_committee_inclusion_reward(spec, state) return spec.Gwei(inclusion_reward * multiplicities[participant_index]) def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits): - proposer_reward = 0 - for index, bit in zip(committee_indices, committee_bits): - if not bit: - continue - inclusion_reward = compute_sync_committee_inclusion_reward( - spec, state, index, committee_indices, committee_bits, - ) - proposer_reward_denominator = ( - (spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT) - * spec.WEIGHT_DENOMINATOR - // spec.PROPOSER_WEIGHT - ) - proposer_reward += spec.Gwei((inclusion_reward * spec.WEIGHT_DENOMINATOR) // proposer_reward_denominator) - return proposer_reward + proposer_reward_denominator = spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT + inclusion_reward = compute_sync_committee_inclusion_reward(spec, state) + participant_number = committee_bits.count(True) + participant_reward = inclusion_reward * spec.PROPOSER_WEIGHT // proposer_reward_denominator + return spec.Gwei(participant_reward * participant_number) def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index): From c115fa9a86d801b290181b38038214f870d2e7fc Mon Sep 17 00:00:00 2001 From: Potuz Date: Mon, 31 May 2021 10:38:51 -0300 Subject: [PATCH 50/82] make it pass the linter --- .../test/altair/block_processing/test_process_sync_committee.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py index c42d3b3f56..5da8b65fd3 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py @@ -118,7 +118,7 @@ def test_invalid_signature_extra_participant(spec, state): def compute_sync_committee_inclusion_reward(spec, state): total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments) - max_participant_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // \ + max_participant_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR // spec.SLOTS_PER_EPOCH) return spec.Gwei(max_participant_rewards // spec.SYNC_COMMITTEE_SIZE) From 67809e76e163bebb136f833ecd97f0d24a539eea Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 2 Jun 2021 01:21:03 +0800 Subject: [PATCH 51/82] Fix tests --- .../test_process_sync_committee.py | 44 ++++--------------- .../test/helpers/proposer_slashings.py | 39 ++++++++++++++-- .../eth2spec/test/helpers/sync_committee.py | 41 +++++++++++++++++ .../test/phase0/sanity/test_blocks.py | 29 ++++++++++-- 4 files changed, 109 insertions(+), 44 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py index 5da8b65fd3..b0a294c856 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py @@ -1,4 +1,3 @@ -from collections import Counter import random from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, @@ -13,6 +12,9 @@ ) from eth2spec.test.helpers.sync_committee import ( compute_aggregate_sync_committee_signature, + compute_sync_committee_participant_reward_and_penalty, + compute_sync_committee_proposer_reward, + compute_committee_indices, ) from eth2spec.test.context import ( expect_assertion_error, @@ -61,15 +63,6 @@ def get_committee_indices(spec, state, duplicates=False): state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index]) -def compute_committee_indices(spec, state, committee): - """ - Given a ``committee``, calculate and return the related indices - """ - all_pubkeys = [v.pubkey for v in state.validators] - committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys] - return committee_indices - - @with_altair_and_later @spec_state_test @always_bls @@ -115,41 +108,20 @@ def test_invalid_signature_extra_participant(spec, state): yield from run_sync_committee_processing(spec, state, block, expect_exception=True) -def compute_sync_committee_inclusion_reward(spec, state): - total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT - total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments) - max_participant_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // - spec.WEIGHT_DENOMINATOR // spec.SLOTS_PER_EPOCH) - return spec.Gwei(max_participant_rewards // spec.SYNC_COMMITTEE_SIZE) - - -def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits): - included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit] - multiplicities = Counter(included_indices) - - inclusion_reward = compute_sync_committee_inclusion_reward(spec, state) - return spec.Gwei(inclusion_reward * multiplicities[participant_index]) - - -def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits): - proposer_reward_denominator = spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT - inclusion_reward = compute_sync_committee_inclusion_reward(spec, state) - participant_number = committee_bits.count(True) - participant_reward = inclusion_reward * spec.PROPOSER_WEIGHT // proposer_reward_denominator - return spec.Gwei(participant_reward * participant_number) - - def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index): for index in range(len(post_state.validators)): reward = 0 + penalty = 0 if index in committee_indices: - reward += compute_sync_committee_participant_reward( + _reward, _penalty = compute_sync_committee_participant_reward_and_penalty( spec, pre_state, index, committee_indices, committee_bits, ) + reward += _reward + penalty += _penalty if proposer_index == index: reward += compute_sync_committee_proposer_reward( @@ -159,7 +131,7 @@ def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indic committee_bits, ) - assert post_state.balances[index] == pre_state.balances[index] + reward + assert post_state.balances[index] == pre_state.balances[index] + reward - penalty def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits): diff --git a/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py b/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py index d3520e580f..a783d2517d 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py +++ b/tests/core/pyspec/eth2spec/test/helpers/proposer_slashings.py @@ -2,6 +2,10 @@ from eth2spec.test.helpers.block_header import sign_block_header from eth2spec.test.helpers.keys import pubkey_to_privkey from eth2spec.test.helpers.state import get_balance +from eth2spec.test.helpers.sync_committee import ( + compute_committee_indices, + compute_sync_committee_participant_reward_and_penalty, +) def get_min_slashing_penalty_quotient(spec): @@ -11,7 +15,7 @@ def get_min_slashing_penalty_quotient(spec): return spec.MIN_SLASHING_PENALTY_QUOTIENT -def check_proposer_slashing_effect(spec, pre_state, state, slashed_index): +def check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block=None): slashed_validator = state.validators[slashed_index] assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH @@ -20,24 +24,51 @@ def check_proposer_slashing_effect(spec, pre_state, state, slashed_index): proposer_index = spec.get_beacon_proposer_index(state) slash_penalty = state.validators[slashed_index].effective_balance // get_min_slashing_penalty_quotient(spec) whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT + + # Altair introduces sync committee (SC) reward and penalty + sc_reward_for_slashed = sc_penalty_for_slashed = sc_reward_for_proposer = sc_penalty_for_proposer = 0 + if is_post_altair(spec) and block is not None: + committee_indices = compute_committee_indices(spec, state, state.current_sync_committee) + committee_bits = block.body.sync_aggregate.sync_committee_bits + sc_reward_for_slashed, sc_penalty_for_slashed = compute_sync_committee_participant_reward_and_penalty( + spec, + pre_state, + slashed_index, + committee_indices, + committee_bits, + ) + sc_reward_for_proposer, sc_penalty_for_proposer = compute_sync_committee_participant_reward_and_penalty( + spec, + pre_state, + proposer_index, + committee_indices, + committee_bits, + ) + if proposer_index != slashed_index: # slashed validator lost initial slash penalty assert ( get_balance(state, slashed_index) - == get_balance(pre_state, slashed_index) - slash_penalty + == get_balance(pre_state, slashed_index) - slash_penalty + sc_reward_for_slashed - sc_penalty_for_slashed ) # block proposer gained whistleblower reward # >= because proposer could have reported multiple assert ( get_balance(state, proposer_index) - >= get_balance(pre_state, proposer_index) + whistleblower_reward + >= ( + get_balance(pre_state, proposer_index) + whistleblower_reward + + sc_reward_for_proposer - sc_penalty_for_proposer + ) ) else: # proposer reported themself so get penalty and reward # >= because proposer could have reported multiple assert ( get_balance(state, slashed_index) - >= get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward + >= ( + get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward + + sc_reward_for_slashed - sc_penalty_for_slashed + ) ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py b/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py index da85fad606..fa753db527 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py +++ b/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py @@ -1,3 +1,5 @@ +from collections import Counter + from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, @@ -33,3 +35,42 @@ def compute_aggregate_sync_committee_signature(spec, state, slot, participants, ) ) return bls.Aggregate(signatures) + + +def compute_sync_committee_inclusion_reward(spec, state): + total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT + total_base_rewards = spec.get_base_reward_per_increment(state) * total_active_increments + max_participant_rewards = (total_base_rewards * spec.SYNC_REWARD_WEIGHT + // spec.WEIGHT_DENOMINATOR // spec.SLOTS_PER_EPOCH) + return max_participant_rewards // spec.SYNC_COMMITTEE_SIZE + + +def compute_sync_committee_participant_reward_and_penalty( + spec, state, participant_index, committee_indices, committee_bits): + inclusion_reward = compute_sync_committee_inclusion_reward(spec, state) + + included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit] + not_included_indices = [index for index, bit in zip(committee_indices, committee_bits) if not bit] + included_multiplicities = Counter(included_indices) + not_included_multiplicities = Counter(not_included_indices) + return ( + spec.Gwei(inclusion_reward * included_multiplicities[participant_index]), + spec.Gwei(inclusion_reward * not_included_multiplicities[participant_index]) + ) + + +def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits): + proposer_reward_denominator = spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT + inclusion_reward = compute_sync_committee_inclusion_reward(spec, state) + participant_number = committee_bits.count(True) + participant_reward = inclusion_reward * spec.PROPOSER_WEIGHT // proposer_reward_denominator + return spec.Gwei(participant_reward * participant_number) + + +def compute_committee_indices(spec, state, committee): + """ + Given a ``committee``, calculate and return the related indices + """ + all_pubkeys = [v.pubkey for v in state.validators] + committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys] + return committee_indices diff --git a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py index dba6238556..33e9854b2e 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py @@ -24,6 +24,10 @@ run_slash_and_exit, run_test_full_random_operations, ) +from eth2spec.test.helpers.sync_committee import ( + compute_committee_indices, + compute_sync_committee_participant_reward_and_penalty, +) from eth2spec.test.helpers.constants import PHASE0, MINIMAL from eth2spec.test.context import ( spec_test, spec_state_test, dump_skipping_message, @@ -416,7 +420,7 @@ def test_proposer_slashing(spec, state): yield 'blocks', [signed_block] yield 'post', state - check_proposer_slashing_effect(spec, pre_state, state, slashed_index) + check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block) @with_all_phases @@ -491,7 +495,7 @@ def test_multiple_different_proposer_slashings_same_block(spec, state): for proposer_slashing in proposer_slashings: slashed_index = proposer_slashing.signed_header_1.message.proposer_index - check_proposer_slashing_effect(spec, pre_state, state, slashed_index) + check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block) def check_attester_slashing_effect(spec, pre_state, state, slashed_indices): @@ -743,7 +747,8 @@ def test_deposit_top_up(spec, state): initial_balances_len = len(state.balances) validator_pre_balance = get_balance(state, validator_index) - yield 'pre', state + pre_state = state.copy() + yield 'pre', pre_state block = build_empty_block_for_next_slot(spec, state) block.body.deposits.append(deposit) @@ -755,7 +760,23 @@ def test_deposit_top_up(spec, state): assert len(state.validators) == initial_registry_len assert len(state.balances) == initial_balances_len - assert get_balance(state, validator_index) == validator_pre_balance + amount + + # Altair introduces sync committee (sm) reward and penalty + sync_committee_reward = sync_committee_penalty = 0 + if is_post_altair(spec): + committee_indices = compute_committee_indices(spec, state, state.current_sync_committee) + committee_bits = block.body.sync_aggregate.sync_committee_bits + sync_committee_reward, sync_committee_penalty = compute_sync_committee_participant_reward_and_penalty( + spec, + pre_state, + validator_index, + committee_indices, + committee_bits, + ) + + assert get_balance(state, validator_index) == ( + validator_pre_balance + amount + sync_committee_reward - sync_committee_penalty + ) @with_all_phases From 417dda832601595aa8c066734a32bc319dc1d584 Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 1 Jun 2021 22:38:36 +0200 Subject: [PATCH 52/82] fix committee index assertion --- specs/sharding/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 84ce948677..ce0dbd0f65 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -480,7 +480,7 @@ def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Sh epoch = compute_epoch_at_slot(slot) active_shards = get_active_shard_count(state, epoch) index = CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards) - assert index >= get_committee_count_per_slot(state, epoch) + assert index < get_committee_count_per_slot(state, epoch) return index ``` From 660483072961ded8426beb4efa4cc86b99cd2639 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 2 Jun 2021 15:00:01 +0600 Subject: [PATCH 53/82] Apply suggestions after the review --- specs/merge/fork-choice.md | 19 +++++-------------- specs/merge/fork.md | 4 ++++ specs/merge/validator.md | 2 +- 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index b2e43d94be..19f0db8aeb 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -16,8 +16,7 @@ - [`TransitionStore`](#transitionstore) - [`PowBlock`](#powblock) - [`get_pow_block`](#get_pow_block) - - [`get_transition_store`](#get_transition_store) - - [`is_valid_transition_block`](#is_valid_transition_block) + - [`is_valid_terminal_pow_block`](#is_valid_terminal_pow_block) - [Updated fork-choice handlers](#updated-fork-choice-handlers) - [`on_block`](#on_block) @@ -94,20 +93,12 @@ Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given t *Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that haven't been processed yet. Either extending this existing method or implementing a new one is required. -### `get_transition_store` - -```python -def get_transition_store(anchor_pow_block: PowBlock): - transition_total_difficulty = anchor_pow_block.total_difficulty + TRANSITION_TOTAL_DIFFICULTY_OFFSET - return TransitionStore(transition_total_difficulty=transition_total_difficulty) -``` - -### `is_valid_transition_block` +### `is_valid_terminal_pow_block` Used by fork-choice handler, `on_block`. ```python -def is_valid_transition_block(transition_store: TransitionStore, block: PowBlock) -> bool: +def is_valid_terminal_pow_block(transition_store: TransitionStore, block: PowBlock) -> bool: is_total_difficulty_reached = block.total_difficulty >= transition_store.transition_total_difficulty return block.is_valid and is_total_difficulty_reached ``` @@ -135,12 +126,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock, transition_store: Tr assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root # [New in Merge] - is_transition_store_initialized = transition_store is not None + is_transition_store_initialized = (transition_store is not None) if is_transition_store_initialized and is_transition_block(pre_state, block): # Delay consideration of block until PoW block is processed by the PoW node pow_block = get_pow_block(block.body.execution_payload.parent_hash) assert pow_block.is_processed - assert is_valid_transition_block(transition_store, pow_block) + assert is_valid_terminal_pow_block(transition_store, pow_block) # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/merge/fork.md b/specs/merge/fork.md index 373b57cdc0..8e5b607eff 100644 --- a/specs/merge/fork.md +++ b/specs/merge/fork.md @@ -96,6 +96,10 @@ If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == Transition store initialization occurs after the state has been modified by corresponding `upgrade_to_merge` function. ```python +def get_transition_store(anchor_pow_block: PowBlock) -> TransitionStore: + transition_total_difficulty = anchor_pow_block.total_difficulty + TRANSITION_TOTAL_DIFFICULTY_OFFSET + return TransitionStore(transition_total_difficulty=transition_total_difficulty) + def initialize_transition_store(state: BeaconState) -> TransitionStore: pow_block = get_pow_block(state.eth1_data.block_hash) return get_transition_store(pow_block) diff --git a/specs/merge/validator.md b/specs/merge/validator.md index 89b7a3c942..c5a7a4c789 100644 --- a/specs/merge/validator.md +++ b/specs/merge/validator.md @@ -75,7 +75,7 @@ def get_execution_payload(state: BeaconState, execution_engine: ExecutionEngine) -> ExecutionPayload: if not is_transition_completed(state): pow_block = get_pow_chain_head() - if not is_valid_transition_block(transition_store, pow_block): + if not is_valid_terminal_pow_block(transition_store, pow_block): # Pre-merge, empty payload return ExecutionPayload() else: From 65f48178b7f2848d0dedea90d3a028b8d22b255d Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 2 Jun 2021 14:41:24 -0700 Subject: [PATCH 54/82] Include block_root in SyncCommitteeSignature --- specs/altair/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 3b3362b22c..7469552fb8 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -282,7 +282,7 @@ def get_sync_committee_signature(state: BeaconState, signing_root = compute_signing_root(block_root, domain) signature = bls.Sign(privkey, signing_root) - return SyncCommitteeSignature(slot=state.slot, validator_index=validator_index, signature=signature) + return SyncCommitteeSignature(slot=state.slot, beacon_block_root=block_root, validator_index=validator_index, signature=signature) ``` ##### Broadcast sync committee signature From 45a71eb2670e3ba74ca48586e397240ff0a14c10 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 2 Jun 2021 15:58:51 -0600 Subject: [PATCH 55/82] line length lint --- specs/altair/validator.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 7469552fb8..0ba776d5e8 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -282,7 +282,12 @@ def get_sync_committee_signature(state: BeaconState, signing_root = compute_signing_root(block_root, domain) signature = bls.Sign(privkey, signing_root) - return SyncCommitteeSignature(slot=state.slot, beacon_block_root=block_root, validator_index=validator_index, signature=signature) + return SyncCommitteeSignature( + slot=state.slot, + beacon_block_root=block_root, + validator_index=validator_index, + signature=signature, + ) ``` ##### Broadcast sync committee signature From 6872c016826a896653fb2818fc4a8b4b94f1fc23 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 3 Jun 2021 12:31:59 +0600 Subject: [PATCH 56/82] Add an extra line between function def --- specs/merge/fork.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/merge/fork.md b/specs/merge/fork.md index 8e5b607eff..cf6d938ddd 100644 --- a/specs/merge/fork.md +++ b/specs/merge/fork.md @@ -100,6 +100,7 @@ def get_transition_store(anchor_pow_block: PowBlock) -> TransitionStore: transition_total_difficulty = anchor_pow_block.total_difficulty + TRANSITION_TOTAL_DIFFICULTY_OFFSET return TransitionStore(transition_total_difficulty=transition_total_difficulty) + def initialize_transition_store(state: BeaconState) -> TransitionStore: pow_block = get_pow_block(state.eth1_data.block_hash) return get_transition_store(pow_block) From 989d33f6ec44d0fd4a7c0c4c9b6ada852e2c75d3 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 3 Jun 2021 17:30:43 +0800 Subject: [PATCH 57/82] Bump mypy to v0.800. This release adds Python 3.9 support. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6e6349b6b4..8461fa995d 100644 --- a/setup.py +++ b/setup.py @@ -1007,7 +1007,7 @@ def run(self): python_requires=">=3.8, <4", extras_require={ "test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"], - "lint": ["flake8==3.7.7", "mypy==0.750"], + "lint": ["flake8==3.7.7", "mypy==0.800"], "generator": ["python-snappy==0.5.4"], }, install_requires=[ From 7f64757b5147174fe0bbb7943689b3a455644b27 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 3 Jun 2021 16:08:56 +0600 Subject: [PATCH 58/82] Fix Optional[TransitionStore] issue to satisfy linter --- specs/merge/fork-choice.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 19f0db8aeb..1a87864681 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -126,8 +126,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock, transition_store: Tr assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root # [New in Merge] - is_transition_store_initialized = (transition_store is not None) - if is_transition_store_initialized and is_transition_block(pre_state, block): + if (transition_store is not None) and is_transition_block(pre_state, block): # Delay consideration of block until PoW block is processed by the PoW node pow_block = get_pow_block(block.body.execution_payload.parent_hash) assert pow_block.is_processed From 049c18b6c4e708bda064f859e981fcc9143a8b55 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 3 Jun 2021 20:49:29 +0800 Subject: [PATCH 59/82] Bump mypy to v0.812 (latest) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8461fa995d..508baec662 100644 --- a/setup.py +++ b/setup.py @@ -1007,7 +1007,7 @@ def run(self): python_requires=">=3.8, <4", extras_require={ "test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"], - "lint": ["flake8==3.7.7", "mypy==0.800"], + "lint": ["flake8==3.7.7", "mypy==0.812"], "generator": ["python-snappy==0.5.4"], }, install_requires=[ From c0af4201306e42393f6c4de84413dd293d3e58ac Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 3 Jun 2021 17:32:35 +0200 Subject: [PATCH 60/82] shard work status enum, prefix instead of suffix + move full committee balance init line --- specs/sharding/beacon-chain.md | 40 +++++++++++++++++----------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index ce0dbd0f65..7c9c6295dc 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -13,7 +13,7 @@ - [Constants](#constants) - [Misc](#misc) - [Domain types](#domain-types) - - [Shard Header Status](#shard-header-status) + - [Shard Work Status](#shard-work-status) - [Preset](#preset) - [Misc](#misc-1) - [Shard block samples](#shard-block-samples) @@ -101,13 +101,13 @@ The following values are (non-configurable) constants used throughout the specif | `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` | | `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | -### Shard Header Status +### Shard Work Status | Name | Value | Notes | | - | - | - | -| `UNCONFIRMED_SHARD_DATA` | `0` | Unconfirmed, nullified after confirmation time elapses | -| `CONFIRMED_SHARD_DATA` | `1` | Confirmed, reduced to just the commitment | -| `PENDING_SHARD_DATA` | `2` | Pending, a list of competing headers | +| `SHARD_WORK_UNCONFIRMED` | `0` | Unconfirmed, nullified after confirmation time elapses | +| `SHARD_WORK_CONFIRMED` | `1` | Confirmed, reduced to just the commitment | +| `SHARD_WORK_PENDING` | `2` | Pending, a list of competing headers | ## Preset @@ -289,10 +289,10 @@ class ShardProposerSlashing(Container): ```python class ShardWork(Container): # Upon confirmation the data is reduced to just the header. - status: Union[ # See Shard Header Status enum - None, # UNCONFIRMED_SHARD_DATA - DataCommitment, # CONFIRMED_SHARD_DATA - List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # PENDING_SHARD_DATA + status: Union[ # See Shard Work Status enum + None, # SHARD_WORK_UNCONFIRMED + DataCommitment, # SHARD_WORK_CONFIRMED + List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # SHARD_WORK_PENDING ] ``` @@ -541,7 +541,7 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N committee_work = state.shard_buffer[buffer_index][attestation_shard] # Skip attestation vote accounting if the header is not pending - if committee_work.status.selector != PENDING_SHARD_DATA: + if committee_work.status.selector != SHARD_WORK_PENDING: # TODO In Altair: set participation bit flag, if attestation matches winning header. return @@ -552,7 +552,6 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N pending_header: PendingShardHeader = current_headers[header_index] full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index) - full_committee_balance = Gwei(0) # The weight may be outdated if it is not the initial weight, and from a previous epoch if pending_header.weight != 0 and compute_epoch_at_slot(pending_header.update_slot) < get_current_epoch(state): @@ -561,6 +560,7 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N pending_header.update_slot = state.slot + full_committee_balance = Gwei(0) # Update votes bitfield in the state, update weights for i, bit in enumerate(attestation.aggregation_bits): weight = state.validators[full_committee[i]].effective_balance @@ -576,12 +576,12 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N if pending_header.commitment == DataCommitment(): # The committee voted to not confirm anything state.shard_buffer[buffer_index][attestation_shard].change( - selector=UNCONFIRMED_SHARD_DATA, + selector=SHARD_WORK_UNCONFIRMED, value=None, ) else: state.shard_buffer[buffer_index][attestation_shard].change( - selector=CONFIRMED_SHARD_DATA, + selector=SHARD_WORK_CONFIRMED, value=pending_header.commitment, ) ``` @@ -604,7 +604,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade # Check that this data is still pending committee_work = state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard] - assert committee_work.status.selector == PENDING_SHARD_DATA + assert committee_work.status.selector == SHARD_WORK_PENDING # Check that this header is not yet in the pending list current_headers: Sequence[PendingShardHeader] = committee_work.status.value @@ -718,13 +718,13 @@ def process_pending_shard_confirmations(state: BeaconState) -> None: buffer_index = slot % SHARD_STATE_MEMORY_SLOTS for shard_index in range(len(state.shard_buffer[buffer_index])): committee_work = state.shard_buffer[buffer_index][shard_index] - if committee_work.selector == PENDING_SHARD_DATA: + if committee_work.selector == SHARD_WORK_PENDING: winning_header = max(committee_work.value, key=lambda header: header.weight) # TODO In Altair: set participation bit flag of voters for winning header if winning_header.commitment == DataCommitment(): - committee_work.change(selector=UNCONFIRMED_SHARD_DATA, value=None) + committee_work.change(selector=SHARD_WORK_UNCONFIRMED, value=None) else: - committee_work.change(selector=CONFIRMED_SHARD_DATA, value=winning_header.commitment) + committee_work.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.commitment) ``` #### `charge_confirmed_shard_fees` @@ -743,7 +743,7 @@ def charge_confirmed_shard_fees(state: BeaconState) -> None: buffer_index = slot % SHARD_STATE_MEMORY_SLOTS for shard_index in range(len(state.shard_buffer[buffer_index])): committee_work = state.shard_buffer[buffer_index][shard_index] - if committee_work.status.selector == CONFIRMED_SHARD_DATA: + if committee_work.status.selector == SHARD_WORK_CONFIRMED: # Charge EIP 1559 fee proposer = get_shard_proposer_index(state, slot, Shard(shard_index)) fee = ( @@ -783,7 +783,7 @@ def reset_pending_shard_work(state: BeaconState) -> None: # a committee is available, initialize a pending shard-header list committee_length = len(get_beacon_committee(state, slot, committee_index)) state.shard_buffer[buffer_index][shard].change( - selector=PENDING_SHARD_DATA, + selector=SHARD_WORK_PENDING, value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD]( PendingShardHeader( commitment=DataCommitment(), @@ -794,7 +794,7 @@ def reset_pending_shard_work(state: BeaconState) -> None: ) ) ) - # a shard without committee available defaults to UNCONFIRMED_SHARD_DATA. + # a shard without committee available defaults to SHARD_WORK_UNCONFIRMED. ``` #### `process_shard_epoch_increment` From d4f6459108a06a2e4fc5b2870d9c2ee19463cf36 Mon Sep 17 00:00:00 2001 From: Diederik Loerakker Date: Thu, 3 Jun 2021 18:00:51 +0200 Subject: [PATCH 61/82] Define missing header_root Co-authored-by: Anton Nashatyrev --- specs/sharding/beacon-chain.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 7c9c6295dc..9a0117fb46 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -608,6 +608,7 @@ def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeade # Check that this header is not yet in the pending list current_headers: Sequence[PendingShardHeader] = committee_work.status.value + header_root = hash_tree_root(header) assert header_root not in [pending_header.root for pending_header in current_headers] # Verify proposer From 9050897cb273cf772abb4ac97bc66215b1cf9740 Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 3 Jun 2021 19:11:47 +0200 Subject: [PATCH 62/82] fix committee work status and commitment references --- specs/sharding/beacon-chain.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index 9a0117fb46..4f9f6d9349 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -719,13 +719,13 @@ def process_pending_shard_confirmations(state: BeaconState) -> None: buffer_index = slot % SHARD_STATE_MEMORY_SLOTS for shard_index in range(len(state.shard_buffer[buffer_index])): committee_work = state.shard_buffer[buffer_index][shard_index] - if committee_work.selector == SHARD_WORK_PENDING: - winning_header = max(committee_work.value, key=lambda header: header.weight) + if committee_work.status.selector == SHARD_WORK_PENDING: + winning_header = max(committee_work.status.value, key=lambda header: header.weight) # TODO In Altair: set participation bit flag of voters for winning header if winning_header.commitment == DataCommitment(): - committee_work.change(selector=SHARD_WORK_UNCONFIRMED, value=None) + committee_work.status.change(selector=SHARD_WORK_UNCONFIRMED, value=None) else: - committee_work.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.commitment) + committee_work.status.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.commitment) ``` #### `charge_confirmed_shard_fees` @@ -745,10 +745,11 @@ def charge_confirmed_shard_fees(state: BeaconState) -> None: for shard_index in range(len(state.shard_buffer[buffer_index])): committee_work = state.shard_buffer[buffer_index][shard_index] if committee_work.status.selector == SHARD_WORK_CONFIRMED: + commitment: DataCommitment = committee_work.status.value # Charge EIP 1559 fee proposer = get_shard_proposer_index(state, slot, Shard(shard_index)) fee = ( - (state.shard_gasprice * candidate.commitment.length) + (state.shard_gasprice * commitment.length) // TARGET_SAMPLES_PER_BLOCK ) decrease_balance(state, proposer, fee) @@ -756,7 +757,7 @@ def charge_confirmed_shard_fees(state: BeaconState) -> None: # Track updated gas price new_gasprice = compute_updated_gasprice( new_gasprice, - candidate.commitment.length, + commitment.length, adjustment_quotient, ) state.shard_gasprice = new_gasprice From 071abfa846b9c4fc0bc141df8492938663c95ae5 Mon Sep 17 00:00:00 2001 From: Anton Nashatyrev Date: Fri, 4 Jun 2021 18:44:35 +0300 Subject: [PATCH 63/82] Revert beacon-chain changes as they are handled in PR #2455 --- specs/sharding/beacon-chain.md | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/specs/sharding/beacon-chain.md b/specs/sharding/beacon-chain.md index ae9177b5b3..a15a002e48 100644 --- a/specs/sharding/beacon-chain.md +++ b/specs/sharding/beacon-chain.md @@ -456,15 +456,8 @@ def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: Co ```python def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Shard) -> CommitteeIndex: - """ - Returns either committee index for ``shard`` at ``slot`` or ``None`` if no committee - """ active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot)) - index = (active_shards + shard - get_start_shard(state, slot)) % active_shards - if index >= get_committee_count_per_slot(state, compute_epoch_at_slot(slot)): - return None - else: - return CommitteeIndex(index) + return CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards) ``` @@ -567,7 +560,6 @@ def update_pending_votes(state: BeaconState, attestation: Attestation) -> None: def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None: header = signed_header.message - committee_index = compute_committee_index_from_shard(state, header.slot, header.shard) # Verify the header is not 0, and not from the future. assert Slot(0) < header.slot <= state.slot header_epoch = compute_epoch_at_slot(header.slot) @@ -575,8 +567,6 @@ def process_shard_header(state: BeaconState, assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)] # Verify that the shard is active assert header.shard < get_active_shard_count(state, header_epoch) - # Verify that shard has a committee at slot - assert committee_index is not None # Verify that the block root matches, # to ensure the header will only be included in this specific Beacon Chain sub-tree. assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1) @@ -606,7 +596,8 @@ def process_shard_header(state: BeaconState, assert header_root not in [pending_header.root for pending_header in pending_headers] # Include it in the pending list - committee_length = len(get_beacon_committee(state, header.slot, committee_index)) + index = compute_committee_index_from_shard(state, header.slot, header.shard) + committee_length = len(get_beacon_committee(state, header.slot, index)) pending_headers.append(PendingShardHeader( slot=header.slot, shard=header.shard, @@ -703,9 +694,6 @@ def process_pending_headers(state: BeaconState) -> None: # The entire committee (and its balance) index = compute_committee_index_from_shard(state, slot, shard) - if index is None: - # the shard had no committee on this slot - continue full_committee = get_beacon_committee(state, slot, index) # The set of voters who voted for each header (and their total balances) voting_sets = [ From 0ae9a85b17de320f7116e7ccd052e72eb29aa1e8 Mon Sep 17 00:00:00 2001 From: Anton Nashatyrev Date: Fri, 4 Jun 2021 18:48:09 +0300 Subject: [PATCH 64/82] Adopt shard_blob gossip validation on top of PR #2455: here the `compute_committee_index_from_shard` raises an error if no committee assigned to a shard --- specs/sharding/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/sharding/p2p-interface.md b/specs/sharding/p2p-interface.md index 9ff00b8cfa..51dbfd5a6a 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/sharding/p2p-interface.md @@ -125,7 +125,7 @@ The following validations MUST pass before forwarding the `signed_blob` (with in - _[IGNORE]_ The `blob` is new enough to be still be processed -- i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)` - _[REJECT]_ The shard should have a committee at slot -- - i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard) is not None` + i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard)` doesn't raise an error - _[REJECT]_ The shard blob is for the correct subnet -- i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id` - _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination. @@ -155,7 +155,7 @@ The following validations MUST pass before forwarding the `signed_shard_blob_hea i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)` - _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination. - _[REJECT]_ The shard should have a committee at slot -- - i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard) is not None` + i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error - _[REJECT]_ The proposer signature, `signed_shard_blob_header.signature`, is valid with respect to the `proposer_index` pubkey. - _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`). From 7710d4fa5c87d2f8e7b1e32f77b15b08851720c8 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 7 Jun 2021 07:55:04 -0600 Subject: [PATCH 65/82] add a few more preset/config invariants --- .../unittests/test_config_invariants.py | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py index 078b48ea5a..b39b011b4f 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py @@ -18,13 +18,28 @@ def test_validators(spec, state): check_bound(spec.MAX_COMMITTEES_PER_SLOT, 1, MAX_UINT_64) check_bound(spec.TARGET_COMMITTEE_SIZE, 1, MAX_UINT_64) - check_bound(spec.MAX_VALIDATORS_PER_COMMITTEE, 1, spec.VALIDATOR_REGISTRY_LIMIT) + # Note: can be less if you assume stricters bounds on validator set based on total ETH supply + maximum_validators_per_committee = ( + spec.VALIDATOR_REGISTRY_LIMIT + // spec.SLOTS_PER_EPOCH + // spec.MAX_COMMITTEES_PER_SLOT + ) + check_bound(spec.MAX_VALIDATORS_PER_COMMITTEE, 1, maximum_validators_per_committee) check_bound(spec.config.MIN_PER_EPOCH_CHURN_LIMIT, 1, spec.VALIDATOR_REGISTRY_LIMIT) check_bound(spec.config.CHURN_LIMIT_QUOTIENT, 1, spec.VALIDATOR_REGISTRY_LIMIT) check_bound(spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT, spec.TARGET_COMMITTEE_SIZE, MAX_UINT_64) +@with_all_phases +@spec_state_test +def test_balances(spec, state): + assert spec.MAX_EFFECTIVE_BALANCE % spec.EFFECTIVE_BALANCE_INCREMENT == 0 + check_bound(spec.MIN_DEPOSIT_AMOUNT, 1, MAX_UINT_64) + check_bound(spec.MAX_EFFECTIVE_BALANCE, spec.MIN_DEPOSIT_AMOUNT, MAX_UINT_64) + check_bound(spec.MAX_EFFECTIVE_BALANCE, spec.EFFECTIVE_BALANCE_INCREMENT, MAX_UINT_64) + + @with_all_phases @spec_state_test def test_hysteresis_quotient(spec, state): @@ -47,6 +62,10 @@ def test_incentives(spec, state): @spec_state_test def test_time(spec, state): assert spec.SLOTS_PER_EPOCH <= spec.SLOTS_PER_HISTORICAL_ROOT + assert spec.MIN_SEED_LOOKAHEAD < spec.MAX_SEED_LOOKAHEAD + assert spec.SLOTS_PER_HISTORICAL_ROOT % spec.SLOTS_PER_EPOCH == 0 + check_bound(spec.SLOTS_PER_HISTORICAL_ROOT, spec.SLOTS_PER_EPOCH, MAX_UINT_64) + check_bound(spec.MIN_ATTESTATION_INCLUSION_DELAY, 1, spec.SLOTS_PER_EPOCH) @with_all_phases From 6f7e04ef5c6d71fd20cd175e94b1cb37a415534b Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 7 Jun 2021 07:56:52 -0600 Subject: [PATCH 66/82] Apply suggestions from hwwhww code review Co-authored-by: Hsiao-Wei Wang --- .../test_process_participation_flag_updates.py | 6 +++--- tests/formats/epoch_processing/README.md | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py index 287525608b..3e322ad428 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py @@ -21,7 +21,7 @@ def run_process_participation_flag_updates(spec, state): @with_altair_and_later @spec_state_test -def test_zeroed(spec, state): +def test_all_zeroed(spec, state): next_epoch_via_block(spec, state) state.current_epoch_participation = [0] * len(state.validators) state.previous_epoch_participation = [0] * len(state.validators) @@ -70,7 +70,7 @@ def test_random_genesis(spec, state): @with_altair_and_later @spec_state_test -def test_zeroing(spec, state): +def test_current_epoch_zeroed(spec, state): next_epoch_via_block(spec, state) random_flags(spec, state, 12, current=False) state.current_epoch_participation = [0] * len(state.validators) @@ -79,7 +79,7 @@ def test_zeroing(spec, state): @with_altair_and_later @spec_state_test -def test_prev_zeroed(spec, state): +def test_previous_epoch_zeroed(spec, state): next_epoch_via_block(spec, state) random_flags(spec, state, 13, previous=False) state.previous_epoch_participation = [0] * len(state.validators) diff --git a/tests/formats/epoch_processing/README.md b/tests/formats/epoch_processing/README.md index 33ec79290e..1032026a63 100644 --- a/tests/formats/epoch_processing/README.md +++ b/tests/formats/epoch_processing/README.md @@ -33,7 +33,7 @@ The provided pre-state is already transitioned to just before the specific sub-t Sub-transitions: - `justification_and_finalization` -- `inactivity_penalty_updates` (Altair) +- `inactivity_updates` (Altair) - `rewards_and_penalties` - `registry_updates` - `slashings` From 9c78de9d420c36f99f69ad5bced9e9ee8a85534f Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 7 Jun 2021 08:00:03 -0600 Subject: [PATCH 67/82] pr review --- .../test_process_participation_flag_updates.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py index 3e322ad428..82acba3224 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_participation_flag_updates.py @@ -13,7 +13,7 @@ def run_process_participation_flag_updates(spec, state): - old = state.current_epoch_participation + old = state.current_epoch_participation.copy() yield from run_epoch_processing_with(spec, state, 'process_participation_flag_updates') assert state.current_epoch_participation == [0] * len(state.validators) assert state.previous_epoch_participation == old @@ -99,6 +99,7 @@ def initializer(spec): @with_custom_state(balances_fn=custom_validator_count(1.3), threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_slightly_larger_random(spec, state): + next_epoch_via_block(spec, state) random_flags(spec, state, 14) yield from run_process_participation_flag_updates(spec, state) @@ -109,5 +110,6 @@ def test_slightly_larger_random(spec, state): @with_custom_state(balances_fn=custom_validator_count(2.6), threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @single_phase def test_large_random(spec, state): + next_epoch_via_block(spec, state) random_flags(spec, state, 15) yield from run_process_participation_flag_updates(spec, state) From 96334e3ee7d1fbae079e18010524557aa333a315 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 7 Jun 2021 22:21:36 +0600 Subject: [PATCH 68/82] Switch to suggested td computation formula --- configs/mainnet.yaml | 2 +- configs/minimal.yaml | 2 +- specs/merge/beacon-chain.md | 12 ------------ specs/merge/fork.md | 16 +++++++++++++++- 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 80b8b6e7d6..dd5b394af4 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -32,7 +32,7 @@ SHARDING_FORK_VERSION: 0x03000000 SHARDING_FORK_EPOCH: 18446744073709551615 # TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY_OFFSET: 4294967296 +MIN_ANCHOR_POW_BLOCK_DIFFICULTY: 4294967296 # Time parameters diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 8abbdb7fce..37a428b50e 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -31,7 +31,7 @@ SHARDING_FORK_VERSION: 0x03000001 SHARDING_FORK_EPOCH: 18446744073709551615 # TBD, 2**32 is a placeholder. Merge transition approach is in active R&D. -TRANSITION_TOTAL_DIFFICULTY_OFFSET: 4294967296 +MIN_ANCHOR_POW_BLOCK_DIFFICULTY: 4294967296 # Time parameters diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index 621ebfddcb..b966c68113 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -14,8 +14,6 @@ - [Custom types](#custom-types) - [Constants](#constants) - [Execution](#execution) -- [Configuration](#configuration) - - [Transition](#transition) - [Containers](#containers) - [Extended containers](#extended-containers) - [`BeaconBlockBody`](#beaconblockbody) @@ -62,16 +60,6 @@ We define the following Python custom types for type hinting and readability: | `MAX_EXECUTION_TRANSACTIONS` | `uint64(2**14)` (= 16,384) | | `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) | -## Configuration - -Warning: this configuration is not definitive. - -### Transition - -| Name | Value | -| - | - | -| `TRANSITION_TOTAL_DIFFICULTY_OFFSET` | **TBD** | - ## Containers ### Extended containers diff --git a/specs/merge/fork.md b/specs/merge/fork.md index cf6d938ddd..0d0a81a338 100644 --- a/specs/merge/fork.md +++ b/specs/merge/fork.md @@ -28,6 +28,8 @@ Warning: this configuration is not definitive. | - | - | | `MERGE_FORK_VERSION` | `Version('0x02000000')` | | `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | +| `MIN_ANCHOR_POW_BLOCK_DIFFICULTY` | **TBD** | +| `SECONDS_SINCE_MERGE_FORK` | `uint64(7 * 86400)` = (604,800) | ## Fork to Merge @@ -35,6 +37,8 @@ Warning: this configuration is not definitive. TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at epoch `MERGE_FORK_EPOCH`. +Since the Merge transition process relies on `Eth1Data` in the beacon state we do want to make sure that this data is fresh. This is achieved by forcing `MERGE_FORK_EPOCH` to point to eth1 voting period boundary, i.e. `MERGE_FORK_EPOCH` should satisfy the following condition `MERGE_FORK_EPOCH % EPOCHS_PER_ETH1_VOTING_PERIOD == 0`. + Note that for the pure Merge networks, we don't apply `upgrade_to_merge` since it starts with Merge version logic. ### Upgrading the state @@ -96,8 +100,18 @@ If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == Transition store initialization occurs after the state has been modified by corresponding `upgrade_to_merge` function. ```python +def compute_transition_total_difficulty(anchor_pow_block: PowBlock) -> uint256: + seconds_per_voting_period = EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH * SECONDS_PER_SLOT + pow_blocks_per_voting_period = seconds_per_voting_period / SECONDS_PER_ETH1_BLOCK + pow_blocks_since_merge_fork = SECONDS_SINCE_MERGE_FORK / SECONDS_PER_ETH1_BLOCK + pow_blocks_to_transition = ETH1_FOLLOW_DISTANCE + pow_blocks_per_voting_period + pow_blocks_since_merge_fork + anchor_difficulty = max(MIN_ANCHOR_POW_BLOCK_DIFFICULTY, anchor_pow_block.difficulty) + + return anchor_pow_block.total_difficulty + anchor_difficulty * pow_blocks_to_transition + + def get_transition_store(anchor_pow_block: PowBlock) -> TransitionStore: - transition_total_difficulty = anchor_pow_block.total_difficulty + TRANSITION_TOTAL_DIFFICULTY_OFFSET + transition_total_difficulty = compute_transition_total_difficulty(anchor_pow_block) return TransitionStore(transition_total_difficulty=transition_total_difficulty) From 09b6fb0e644a1f781cc2c5220b784319a086b2ee Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 7 Jun 2021 16:15:53 -0600 Subject: [PATCH 69/82] ensure consistency about sync committees vs aggregates in naming throughout --- specs/altair/beacon-chain.md | 14 +++++++------- ...committee.py => test_process_sync_aggregate.py} | 0 tests/formats/operations/README.md | 2 +- tests/generators/operations/main.py | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) rename tests/core/pyspec/eth2spec/test/altair/block_processing/{test_process_sync_committee.py => test_process_sync_aggregate.py} (100%) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 3877f17d23..5e5d9abf88 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -446,7 +446,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_randao(state, block.body) process_eth1_data(state, block.body) process_operations(state, block.body) # [Modified in Altair] - process_sync_committee(state, block.body.sync_aggregate) # [New in Altair] + process_sync_aggregate(state, block.body.sync_aggregate) # [New in Altair] ``` #### Modified `process_attestation` @@ -532,19 +532,19 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: increase_balance(state, index, amount) ``` -#### Sync committee processing +#### Sync aggregate processing -*Note*: The function `process_sync_committee` is new. +*Note*: The function `process_sync_aggregate` is new. ```python -def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None: +def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) -> None: # Verify sync committee aggregate signature signing over the previous slot block root committee_pubkeys = state.current_sync_committee.pubkeys - participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, aggregate.sync_committee_bits) if bit] + participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, sync_aggregate.sync_committee_bits) if bit] previous_slot = max(state.slot, Slot(1)) - Slot(1) domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot)) signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain) - assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, aggregate.sync_committee_signature) + assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) # Compute participant and proposer rewards total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT @@ -556,7 +556,7 @@ def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None # Apply participant and proposer rewards all_pubkeys = [v.pubkey for v in state.validators] committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys] - for participant_index, participation_bit in zip(committee_indices, aggregate.sync_committee_bits): + for participant_index, participation_bit in zip(committee_indices, sync_aggregate.sync_committee_bits): if participation_bit: increase_balance(state, participant_index, participant_reward) increase_balance(state, get_beacon_proposer_index(state), proposer_reward) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_committee.py rename to tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py diff --git a/tests/formats/operations/README.md b/tests/formats/operations/README.md index f562a6f2aa..c69d798d77 100644 --- a/tests/formats/operations/README.md +++ b/tests/formats/operations/README.md @@ -41,7 +41,7 @@ Operations: | `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` | | `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` | | `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` | -| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) | +| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_aggregate(state, sync_aggregate)` (new in Altair) | | `execution_payload` | `ExecutionPayload` | `execution_payload` | `process_execution_payload(state, execution_payload)` (new in Merge) | Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here. diff --git a/tests/generators/operations/main.py b/tests/generators/operations/main.py index 554d0b30ad..57fc6dd967 100644 --- a/tests/generators/operations/main.py +++ b/tests/generators/operations/main.py @@ -13,7 +13,7 @@ ]} altair_mods = { **{key: 'eth2spec.test.altair.block_processing.test_process_' + key for key in [ - 'sync_committee', + 'sync_aggregate', ]}, **phase_0_mods, } # also run the previous phase 0 tests From cfc66a3794c4f102af9661fa417f4dd2b04893a5 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 7 Jun 2021 16:36:18 -0600 Subject: [PATCH 70/82] patch tests --- specs/altair/beacon-chain.md | 2 +- .../altair/block_processing/test_process_sync_aggregate.py | 2 +- tests/core/pyspec/eth2spec/test/helpers/block_processing.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 5e5d9abf88..3bd82aec88 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -44,7 +44,7 @@ - [Block processing](#block-processing) - [Modified `process_attestation`](#modified-process_attestation) - [Modified `process_deposit`](#modified-process_deposit) - - [Sync committee processing](#sync-committee-processing) + - [Sync aggregate processing](#sync-aggregate-processing) - [Epoch processing](#epoch-processing) - [Justification and finalization](#justification-and-finalization) - [Inactivity scores](#inactivity-scores) diff --git a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py index b0a294c856..fa7f89fdcb 100644 --- a/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py +++ b/tests/core/pyspec/eth2spec/test/altair/block_processing/test_process_sync_aggregate.py @@ -32,7 +32,7 @@ def run_sync_committee_processing(spec, state, block, expect_exception=False): produces a pre-state and post-state (None if exception) specifically for sync-committee processing changes. """ # process up to the sync committee work - call = run_block_processing_to(spec, state, block, 'process_sync_committee') + call = run_block_processing_to(spec, state, block, 'process_sync_aggregate') yield 'pre', state yield 'sync_aggregate', block.body.sync_aggregate if expect_exception: diff --git a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py index 676d8cb6d8..d2ec4a1115 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/block_processing.py +++ b/tests/core/pyspec/eth2spec/test/helpers/block_processing.py @@ -25,8 +25,8 @@ def get_process_calls(spec): 'process_voluntary_exit': lambda state, block: for_ops(state, block.body.voluntary_exits, spec.process_voluntary_exit), # Altair - 'process_sync_committee': - lambda state, block: spec.process_sync_committee(state, block.body.sync_aggregate), + 'process_sync_aggregate': + lambda state, block: spec.process_sync_aggregate(state, block.body.sync_aggregate), # Merge 'process_application_payload': lambda state, block: spec.process_application_payload(state, block.body), From 64f217d3e19091ce342c6982d79f995b7a7fbd57 Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 8 Jun 2021 01:05:50 +0200 Subject: [PATCH 71/82] update sync aggregate processing reference in validator doc --- specs/altair/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 844c7021b6..f193e3591c 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -243,7 +243,7 @@ def process_sync_committee_contributions(block: BeaconBlock, block.body.sync_aggregate = sync_aggregate ``` -*Note*: The resulting block must pass the validations for the `SyncAggregate` defined in `process_sync_committee` defined in the [state transition document](./beacon-chain.md#sync-committee-processing). +*Note*: The resulting block must pass the validations for the `SyncAggregate` defined in `process_sync_aggregate` defined in the [state transition document](./beacon-chain.md#sync-aggregate-processing). In particular, this means `SyncCommitteeContribution`s received from gossip must have a `beacon_block_root` that matches the proposer's local view of the chain. #### Packaging into a `SignedBeaconBlock` From 6258dc64f77a366efb01b146a73da4e204f587d0 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Mon, 7 Jun 2021 17:41:37 -0600 Subject: [PATCH 72/82] bump VERSION.txt to v1.1.0-alpha.7 --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 31838516b2..e79b1b4ae2 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.1.0-alpha.6 \ No newline at end of file +1.1.0-alpha.7 \ No newline at end of file From 3a6a565a8fad477534f9f2120bac878820485769 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 8 Jun 2021 15:03:59 +0600 Subject: [PATCH 73/82] Replace fractional division with integer one Co-authored-by: vbuterin --- specs/merge/fork.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/merge/fork.md b/specs/merge/fork.md index 0d0a81a338..141b05faa6 100644 --- a/specs/merge/fork.md +++ b/specs/merge/fork.md @@ -102,8 +102,8 @@ Transition store initialization occurs after the state has been modified by corr ```python def compute_transition_total_difficulty(anchor_pow_block: PowBlock) -> uint256: seconds_per_voting_period = EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH * SECONDS_PER_SLOT - pow_blocks_per_voting_period = seconds_per_voting_period / SECONDS_PER_ETH1_BLOCK - pow_blocks_since_merge_fork = SECONDS_SINCE_MERGE_FORK / SECONDS_PER_ETH1_BLOCK + pow_blocks_per_voting_period = seconds_per_voting_period // SECONDS_PER_ETH1_BLOCK + pow_blocks_since_merge_fork = SECONDS_SINCE_MERGE_FORK // SECONDS_PER_ETH1_BLOCK pow_blocks_to_transition = ETH1_FOLLOW_DISTANCE + pow_blocks_per_voting_period + pow_blocks_since_merge_fork anchor_difficulty = max(MIN_ANCHOR_POW_BLOCK_DIFFICULTY, anchor_pow_block.difficulty) From 038edb5b34fe944a2025bfe0c4298dae97fd2b96 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 8 Jun 2021 17:16:52 +0600 Subject: [PATCH 74/82] Rename SECONDS_SINCE_MERGE_FORK -> TARGET_SECONDS_TO_MERGE --- specs/merge/fork.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/merge/fork.md b/specs/merge/fork.md index 141b05faa6..1f2ea7fff5 100644 --- a/specs/merge/fork.md +++ b/specs/merge/fork.md @@ -29,7 +29,7 @@ Warning: this configuration is not definitive. | `MERGE_FORK_VERSION` | `Version('0x02000000')` | | `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | | `MIN_ANCHOR_POW_BLOCK_DIFFICULTY` | **TBD** | -| `SECONDS_SINCE_MERGE_FORK` | `uint64(7 * 86400)` = (604,800) | +| `TARGET_SECONDS_TO_MERGE` | `uint64(7 * 86400)` = (604,800) | ## Fork to Merge @@ -103,11 +103,11 @@ Transition store initialization occurs after the state has been modified by corr def compute_transition_total_difficulty(anchor_pow_block: PowBlock) -> uint256: seconds_per_voting_period = EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH * SECONDS_PER_SLOT pow_blocks_per_voting_period = seconds_per_voting_period // SECONDS_PER_ETH1_BLOCK - pow_blocks_since_merge_fork = SECONDS_SINCE_MERGE_FORK // SECONDS_PER_ETH1_BLOCK - pow_blocks_to_transition = ETH1_FOLLOW_DISTANCE + pow_blocks_per_voting_period + pow_blocks_since_merge_fork + pow_blocks_to_merge = TARGET_SECONDS_TO_MERGE // SECONDS_PER_ETH1_BLOCK + pow_blocks_after_anchor_block = ETH1_FOLLOW_DISTANCE + pow_blocks_per_voting_period + pow_blocks_to_merge anchor_difficulty = max(MIN_ANCHOR_POW_BLOCK_DIFFICULTY, anchor_pow_block.difficulty) - return anchor_pow_block.total_difficulty + anchor_difficulty * pow_blocks_to_transition + return anchor_pow_block.total_difficulty + anchor_difficulty * pow_blocks_after_anchor_block def get_transition_store(anchor_pow_block: PowBlock) -> TransitionStore: From 6350e2736641ff7e1fdf7f3f6206eaf2ab1f37c8 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 8 Jun 2021 17:26:09 +0600 Subject: [PATCH 75/82] Add difficulty field to PowBlock --- setup.py | 2 +- specs/merge/fork-choice.md | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 80e3d8d9f3..7f2408e2c6 100644 --- a/setup.py +++ b/setup.py @@ -509,7 +509,7 @@ def sundry_functions(cls) -> str: def get_pow_block(hash: Bytes32) -> PowBlock: return PowBlock(block_hash=hash, is_valid=True, is_processed=True, - total_difficulty=uint256(0)) + total_difficulty=uint256(0), difficulty=uint256(0)) def get_execution_state(execution_state_root: Bytes32) -> ExecutionState: diff --git a/specs/merge/fork-choice.md b/specs/merge/fork-choice.md index 1a87864681..56345dd90e 100644 --- a/specs/merge/fork-choice.md +++ b/specs/merge/fork-choice.md @@ -85,6 +85,7 @@ class PowBlock(object): is_processed: boolean is_valid: boolean total_difficulty: uint256 + difficulty: uint256 ``` ### `get_pow_block` From 5228dc814f00ea9db71a60cd80de6d999921d9f2 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 8 Jun 2021 18:44:53 +0600 Subject: [PATCH 76/82] Redefine initialize_beacon_state_from_eth1 --- specs/merge/beacon-chain.md | 61 +++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/specs/merge/beacon-chain.md b/specs/merge/beacon-chain.md index b966c68113..697bd0c96b 100644 --- a/specs/merge/beacon-chain.md +++ b/specs/merge/beacon-chain.md @@ -33,6 +33,7 @@ - [Block processing](#block-processing) - [Execution payload processing](#execution-payload-processing) - [`process_execution_payload`](#process_execution_payload) +- [Initialize state for pure Merge testnets and test vectors](#initialize-state-for-pure-merge-testnets-and-test-vectors) @@ -233,3 +234,63 @@ def process_execution_payload(state: BeaconState, transactions_root=hash_tree_root(execution_payload.transactions), ) ``` + +## Initialize state for pure Merge testnets and test vectors + +This helper function is only for initializing the state for pure Merge testnets and tests. + +*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `MERGE_FORK_VERSION` as the current fork version, (2) utilizing the Merge `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) adding initial `latest_execution_payload_header`. + +```python +def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32, + eth1_timestamp: uint64, + deposits: Sequence[Deposit]) -> BeaconState: + fork = Fork( + previous_version=GENESIS_FORK_VERSION, + current_version=MERGE_FORK_VERSION, # [Modified in Merge] + epoch=GENESIS_EPOCH, + ) + state = BeaconState( + genesis_time=eth1_timestamp + GENESIS_DELAY, + fork=fork, + eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), + latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), + randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy + ) + + # Process deposits + leaves = list(map(lambda deposit: deposit.data, deposits)) + for index, deposit in enumerate(deposits): + deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1]) + state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) + process_deposit(state, deposit) + + # Process activations + for index, validator in enumerate(state.validators): + balance = state.balances[index] + validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) + if validator.effective_balance == MAX_EFFECTIVE_BALANCE: + validator.activation_eligibility_epoch = GENESIS_EPOCH + validator.activation_epoch = GENESIS_EPOCH + + # Set genesis validators root for domain separation and chain versioning + state.genesis_validators_root = hash_tree_root(state.validators) + + # [New in Merge] Construct execution payload header + # Note: initialized with zero block height + state.latest_execution_payload_header = ExecutionPayloadHeader( + block_hash=eth1_block_hash, + parent_hash=Hash32(), + coinbase=Bytes20(), + state_root=Bytes32(), + number=uint64(0), + gas_limit=uint64(0), + gas_used=uint64(0), + timestamp=eth1_timestamp, + receipt_root=Bytes32(), + logs_bloom=ByteVector[BYTES_PER_LOGS_BLOOM](), + transactions_root=Root(), + ) + + return state +``` From c7980d3cd03a1d98a2dda4dff9abda59b4afccc3 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 8 Jun 2021 13:34:16 -0500 Subject: [PATCH 77/82] Clarify p2p validation conditions for Altair sync committees --- specs/altair/p2p-interface.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index d6f8a0dfee..b5006396ca 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -144,6 +144,7 @@ The following validations MUST pass before forwarding the `sync_committee_messag - _[IGNORE]_ The signature's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `sync_committee_message.slot == current_slot`. - _[IGNORE]_ The block being signed over (`sync_committee_message.beacon_block_root`) has been seen (via both gossip and non-gossip sources). - _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_message.validator_index`. + Note this validation is _per topic_ so that for a given `slot`, multiple messages could be forwarded with the same `validator_index` as long as the `subnet_id`s are distinct. - _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`. Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee. - _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`. From d64b4e7d245438bd6db0617f7eee619bbf7d7eee Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 8 Jun 2021 13:44:06 -0500 Subject: [PATCH 78/82] Add `message.topic` to gossipsub `message-id` in Altair --- specs/altair/p2p-interface.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index d6f8a0dfee..49082452fc 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -74,6 +74,15 @@ New topics are added in Altair to support the sync committees and the beacon blo The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 document. +The derivation of the `message-id` has changed starting with Altair to incorporate the message topic along with the message data. +The `message-id` MUST be the following 20 byte value computed from the message: +* If `message.data` has a valid snappy decompression, set `message-id` to the first 20 bytes of the `SHA256` hash of + the concatenation of `MESSAGE_DOMAIN_VALID_SNAPPY` with the snappy decompressed message data and the topic name, + i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + snappy_decompress(message.data) + message.topic)[:20]`. +* Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of + the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data and the topic name, + i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data + message.topic)[:20]`. + The new topics along with the type of the `data` field of a gossipsub message are given in this table: | Name | Message Type | From 25a2e3463ecc0fceb2677deda5f9456c445bd559 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 8 Jun 2021 12:06:02 -0700 Subject: [PATCH 79/82] Update specs/altair/p2p-interface.md Co-authored-by: Diederik Loerakker --- specs/altair/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 49082452fc..4021cbbd04 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -74,7 +74,7 @@ New topics are added in Altair to support the sync committees and the beacon blo The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 document. -The derivation of the `message-id` has changed starting with Altair to incorporate the message topic along with the message data. +The derivation of the `message-id` has changed starting with Altair to incorporate the message `topic` along with the message `data`. These are fields of the `Message` Protobuf, and interpreted as empty byte strings if missing. The `message-id` MUST be the following 20 byte value computed from the message: * If `message.data` has a valid snappy decompression, set `message-id` to the first 20 bytes of the `SHA256` hash of the concatenation of `MESSAGE_DOMAIN_VALID_SNAPPY` with the snappy decompressed message data and the topic name, From f60f13964c3c71196f5ea138ffb2edc6fa7c2db9 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 8 Jun 2021 14:15:57 -0500 Subject: [PATCH 80/82] Harden topic serialization --- specs/altair/p2p-interface.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 4021cbbd04..ccc31a2804 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -77,11 +77,13 @@ The specification around the creation, validation, and dissemination of messages The derivation of the `message-id` has changed starting with Altair to incorporate the message `topic` along with the message `data`. These are fields of the `Message` Protobuf, and interpreted as empty byte strings if missing. The `message-id` MUST be the following 20 byte value computed from the message: * If `message.data` has a valid snappy decompression, set `message-id` to the first 20 bytes of the `SHA256` hash of - the concatenation of `MESSAGE_DOMAIN_VALID_SNAPPY` with the snappy decompressed message data and the topic name, - i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + snappy_decompress(message.data) + message.topic)[:20]`. + the concatenation of the following data: `MESSAGE_DOMAIN_VALID_SNAPPY`, the length of the topic byte string (encoded as little-endian `uint64`), + the topic byte string, and the snappy decompressed message data: + i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + snappy_decompress(message.data)[:20]`. * Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of - the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data and the topic name, - i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data + message.topic)[:20]`. + the concatenation of the following data: `MESSAGE_DOMAIN_INVALID_SNAPPY`, the length of the topic byte string (encoded as little-endian `uint64`), + the topic byte string, and the raw message data: + i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20]`. The new topics along with the type of the `data` field of a gossipsub message are given in this table: From a343680fd41f74556e5381aca63412d7eaa0dad9 Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 8 Jun 2021 14:29:08 -0500 Subject: [PATCH 81/82] Add implementation note on message id fn --- specs/altair/p2p-interface.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index ccc31a2804..8de85188d1 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -85,6 +85,12 @@ The `message-id` MUST be the following 20 byte value computed from the message: the topic byte string, and the raw message data: i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + message.data)[:20]`. +Implementations may need to carefully handle the function that computes the `message-id`. In particular, messages on topics with the Phase 0 +fork digest should use the `message-id` procedure specified in the Phase 0 document. +Messages on topics with the Altair fork digest should use the `message-id` procedure defined here. +If an implementation only supports a single `message-id` function, it can define a switch inline; +for example, `if topic in phase0_topics: return phase0_msg_id_fn(message) else return altair_msg_id_fn(message)`. + The new topics along with the type of the `data` field of a gossipsub message are given in this table: | Name | Message Type | From 5140b59c57c979f23255701d40b6197d12f2261f Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 8 Jun 2021 21:51:26 +0200 Subject: [PATCH 82/82] add missing parenthesis --- specs/altair/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 8218551708..fc0a8a35fd 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -79,7 +79,7 @@ The `message-id` MUST be the following 20 byte value computed from the message: * If `message.data` has a valid snappy decompression, set `message-id` to the first 20 bytes of the `SHA256` hash of the concatenation of the following data: `MESSAGE_DOMAIN_VALID_SNAPPY`, the length of the topic byte string (encoded as little-endian `uint64`), the topic byte string, and the snappy decompressed message data: - i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + snappy_decompress(message.data)[:20]`. + i.e. `SHA256(MESSAGE_DOMAIN_VALID_SNAPPY + uint_to_bytes(uint64(len(message.topic))) + message.topic + snappy_decompress(message.data))[:20]`. * Otherwise, set `message-id` to the first 20 bytes of the `SHA256` hash of the concatenation of the following data: `MESSAGE_DOMAIN_INVALID_SNAPPY`, the length of the topic byte string (encoded as little-endian `uint64`), the topic byte string, and the raw message data: