diff --git a/Cargo.lock b/Cargo.lock index 3df8c9d8273f4..9881b2689e6ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -881,6 +881,7 @@ dependencies = [ "aptos-consensus-types", "aptos-crypto", "aptos-crypto-derive", + "aptos-dkg", "aptos-enum-conversion-derive", "aptos-event-notifications", "aptos-executor", @@ -938,6 +939,7 @@ dependencies = [ "serde_bytes", "serde_json", "serde_yaml 0.8.26", + "sha3 0.9.1", "strum_macros 0.24.3", "tempfile", "thiserror", @@ -1313,6 +1315,7 @@ dependencies = [ "async-trait", "bcs 0.1.4", "bytes", + "fail 0.5.1", "futures", "futures-channel", "futures-util", @@ -14972,6 +14975,8 @@ dependencies = [ "aptos-consensus", "aptos-crypto", "aptos-db", + "aptos-debugger", + "aptos-dkg", "aptos-faucet-core", "aptos-forge", "aptos-framework", @@ -15001,16 +15006,19 @@ dependencies = [ "base64 0.13.1", "bcs 0.1.4", "diesel", + "digest 0.9.0", "futures", "hex", "hyper", "move-core-types", + "num-traits", "num_cpus", "once_cell", "proptest", "rand 0.7.3", "regex", "reqwest", + "serde", "serde_json", "serde_yaml 0.8.26", "tokio", diff --git a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json index af33606c88587..a5b7b1b2413f4 100644 --- a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json +++ b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_output_user_transaction_with_entry_function_payload.json @@ -85,6 +85,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "address": "0x1", "state_key_hash": "", diff --git a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json index d407279e9c431..cf42a4ebd8a82 100644 --- a/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json +++ b/api/goldens/aptos_api__tests__transactions_test__test_get_transactions_returns_last_page_when_start_version_is_not_specified.json @@ -99,6 +99,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "state_key_hash": "", "handle": "0x38ff67f17cf7998cd41ed5267b52cff7af37d06a22e8b390ce44b69680fc0e97", @@ -420,6 +435,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "state_key_hash": "", "handle": "0x38ff67f17cf7998cd41ed5267b52cff7af37d06a22e8b390ce44b69680fc0e97", @@ -741,6 +771,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "state_key_hash": "", "handle": "0x38ff67f17cf7998cd41ed5267b52cff7af37d06a22e8b390ce44b69680fc0e97", @@ -1062,6 +1107,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "state_key_hash": "", "handle": "0x38ff67f17cf7998cd41ed5267b52cff7af37d06a22e8b390ce44b69680fc0e97", @@ -1383,6 +1443,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "state_key_hash": "", "handle": "0x38ff67f17cf7998cd41ed5267b52cff7af37d06a22e8b390ce44b69680fc0e97", @@ -1704,6 +1779,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "state_key_hash": "", "handle": "0x38ff67f17cf7998cd41ed5267b52cff7af37d06a22e8b390ce44b69680fc0e97", @@ -2025,6 +2115,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "state_key_hash": "", "handle": "0x38ff67f17cf7998cd41ed5267b52cff7af37d06a22e8b390ce44b69680fc0e97", @@ -2346,6 +2451,21 @@ }, "type": "write_resource" }, + { + "address": "0x1", + "state_key_hash": "", + "data": { + "type": "0x1::randomness::PerBlockRandomness", + "data": { + "epoch": "1", + "round": "1", + "seed": { + "vec": [] + } + } + }, + "type": "write_resource" + }, { "state_key_hash": "", "handle": "0x38ff67f17cf7998cd41ed5267b52cff7af37d06a22e8b390ce44b69680fc0e97", diff --git a/api/src/context.rs b/api/src/context.rs index ffcd13c320f4e..030652028e9db 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -648,7 +648,9 @@ impl Context { .into_iter() .map(|t| { // Update the timestamp if the next block occurs - if let Some(txn) = t.transaction.try_as_block_metadata() { + if let Some(txn) = t.transaction.try_as_block_metadata_ext() { + timestamp = txn.timestamp_usecs(); + } else if let Some(txn) = t.transaction.try_as_block_metadata() { timestamp = txn.timestamp_usecs(); } let txn = converter.try_into_onchain_transaction(timestamp, t)?; diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index 7eeae7ea071bc..128ed84b09c5e 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -21,7 +21,10 @@ use crate::{ use anyhow::anyhow; use aptos_block_executor::txn_commit_hook::NoOpTransactionCommitHook; use aptos_crypto::HashValue; -use aptos_framework::{natives::code::PublishRequest, RuntimeModuleMetadataV1}; +use aptos_framework::{ + natives::{code::PublishRequest, transaction_context::NativeTransactionContext}, + RuntimeModuleMetadataV1, +}; use aptos_gas_algebra::{Gas, GasQuantity, Octa}; use aptos_gas_meter::{AptosGasMeter, GasAlgebra, StandardGasAlgebra, StandardGasMeter}; use aptos_gas_schedule::{AptosGasParameters, VMGasParameters}; @@ -677,6 +680,18 @@ impl AptosVM { senders: Vec, entry_fn: &EntryFunction, ) -> Result { + let is_friend_or_private = session.load_function_def_is_friend_or_private( + entry_fn.module(), + entry_fn.function(), + entry_fn.ty_args(), + )?; + if is_friend_or_private { + let txn_context = session + .get_native_extensions() + .get_mut::(); + txn_context.set_is_friend_or_private_entry_func(); + } + let function = session.load_function(entry_fn.module(), entry_fn.function(), entry_fn.ty_args())?; let args = verifier::transaction_arg_validation::validate_combine_signer_and_txn_args( diff --git a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs index 20a7f7accdacd..ea40b90149eba 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs @@ -7,6 +7,7 @@ use aptos_framework::natives::{ code::NativeCodeContext, cryptography::{algebra::AlgebraContext, ristretto255_point::NativeRistrettoPointContext}, event::NativeEventContext, + randomness::RandomnessContext, state_storage::NativeStateStorageContext, transaction_context::NativeTransactionContext, }; @@ -201,6 +202,7 @@ impl MoveVmExt { extensions.add(NativeRistrettoPointContext::new()); extensions.add(AlgebraContext::new()); extensions.add(NativeAggregatorContext::new(txn_hash, resolver, resolver)); + extensions.add(RandomnessContext::new()); extensions.add(NativeTransactionContext::new( txn_hash.to_vec(), session_id.into_script_hash(), diff --git a/aptos-move/aptos-vm/src/natives.rs b/aptos-move/aptos-vm/src/natives.rs index 71f3a2ae616b5..5762c62ad2192 100644 --- a/aptos-move/aptos-vm/src/natives.rs +++ b/aptos-move/aptos-vm/src/natives.rs @@ -12,6 +12,8 @@ use aptos_aggregator::{ #[cfg(feature = "testing")] use aptos_aggregator::{resolver::TDelayedFieldView, types::DelayedFieldValue}; #[cfg(feature = "testing")] +use aptos_framework::natives::randomness::RandomnessContext; +#[cfg(feature = "testing")] use aptos_framework::natives::{cryptography::algebra::AlgebraContext, event::NativeEventContext}; use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters, LATEST_GAS_FEATURE_VERSION}; use aptos_native_interface::SafeNativeBuilder; @@ -225,11 +227,9 @@ fn unit_test_extensions_hook(exts: &mut NativeContextExtensions) { exts.add(NativeTableContext::new([0u8; 32], &*DUMMY_RESOLVER)); exts.add(NativeCodeContext::default()); - exts.add(NativeTransactionContext::new( - vec![1], - vec![1], - ChainId::test().id(), - )); // We use the testing environment chain ID here + let mut txn_context = NativeTransactionContext::new(vec![1], vec![1], ChainId::test().id()); + txn_context.set_is_friend_or_private_entry_func(); + exts.add(txn_context); // We use the testing environment chain ID here exts.add(NativeAggregatorContext::new( [0; 32], &*DUMMY_RESOLVER, @@ -238,4 +238,5 @@ fn unit_test_extensions_hook(exts: &mut NativeContextExtensions) { exts.add(NativeRistrettoPointContext::new()); exts.add(AlgebraContext::new()); exts.add(NativeEventContext::default()); + exts.add(RandomnessContext::new()); } diff --git a/aptos-move/aptos-vm/src/validator_txns/dkg.rs b/aptos-move/aptos-vm/src/validator_txns/dkg.rs index 0df0e7daff18a..31468fd7fcbd9 100644 --- a/aptos-move/aptos-vm/src/validator_txns/dkg.rs +++ b/aptos-move/aptos-vm/src/validator_txns/dkg.rs @@ -15,7 +15,7 @@ use aptos_types::{ dkg::{DKGState, DKGTrait, DKGTranscript, DefaultDKG}, fee_statement::FeeStatement, move_utils::as_move_value::AsMoveValue, - on_chain_config::OnChainConfig, + on_chain_config::{ConfigurationResource, OnChainConfig}, transaction::{ExecutionStatus, TransactionStatus}, }; use aptos_vm_logging::log_schema::AdapterLogSchema; @@ -27,6 +27,7 @@ use move_core_types::{ }; use move_vm_types::gas::UnmeteredGasMeter; +#[derive(Debug)] enum ExpectedFailure { // Move equivalent: `errors::invalid_argument(*)` EpochNotCurrent = 0x10001, @@ -36,6 +37,7 @@ enum ExpectedFailure { // Move equivalent: `errors::invalid_state(*)` MissingResourceDKGState = 0x30001, MissingResourceInprogressDKGSession = 0x30002, + MissingResourceConfiguration = 0x30003, } enum ExecutionFailure { @@ -73,13 +75,14 @@ impl AptosVM { ) -> Result<(VMStatus, VMOutput), ExecutionFailure> { let dkg_state = OnChainConfig::fetch_config(resolver) .ok_or_else(|| Expected(MissingResourceDKGState))?; - + let config_resource = ConfigurationResource::fetch_config(resolver) + .ok_or_else(|| Expected(MissingResourceConfiguration))?; let DKGState { in_progress, .. } = dkg_state; let in_progress_session_state = in_progress.ok_or_else(|| Expected(MissingResourceInprogressDKGSession))?; // Check epoch number. - if dkg_node.metadata.epoch != in_progress_session_state.metadata.dealer_epoch { + if dkg_node.metadata.epoch != config_resource.epoch() { return Err(Expected(EpochNotCurrent)); } diff --git a/aptos-move/framework/aptos-framework/doc/aptos_governance.md b/aptos-move/framework/aptos-framework/doc/aptos_governance.md index a0ee05b1710f3..9da28060f91db 100644 --- a/aptos-move/framework/aptos-framework/doc/aptos_governance.md +++ b/aptos-move/framework/aptos-framework/doc/aptos_governance.md @@ -48,6 +48,7 @@ on a proposal multiple times as long as the total voting power of these votes do - [Function `resolve_multi_step_proposal`](#0x1_aptos_governance_resolve_multi_step_proposal) - [Function `remove_approved_hash`](#0x1_aptos_governance_remove_approved_hash) - [Function `reconfigure`](#0x1_aptos_governance_reconfigure) +- [Function `force_end_epoch`](#0x1_aptos_governance_force_end_epoch) - [Function `toggle_features`](#0x1_aptos_governance_toggle_features) - [Function `get_signer_testnet_only`](#0x1_aptos_governance_get_signer_testnet_only) - [Function `get_voting_power`](#0x1_aptos_governance_get_voting_power) @@ -79,6 +80,7 @@ on a proposal multiple times as long as the total voting power of these votes do - [Function `resolve_multi_step_proposal`](#@Specification_1_resolve_multi_step_proposal) - [Function `remove_approved_hash`](#@Specification_1_remove_approved_hash) - [Function `reconfigure`](#@Specification_1_reconfigure) + - [Function `force_end_epoch`](#@Specification_1_force_end_epoch) - [Function `toggle_features`](#@Specification_1_toggle_features) - [Function `get_signer_testnet_only`](#@Specification_1_get_signer_testnet_only) - [Function `get_voting_power`](#@Specification_1_get_voting_power) @@ -91,13 +93,14 @@ on a proposal multiple times as long as the total voting power of these votes do
use 0x1::account;
 use 0x1::aptos_coin;
 use 0x1::coin;
+use 0x1::consensus_config;
 use 0x1::error;
 use 0x1::event;
 use 0x1::features;
 use 0x1::governance_proposal;
 use 0x1::math64;
 use 0x1::option;
-use 0x1::reconfiguration;
+use 0x1::reconfiguration_with_dkg;
 use 0x1::signer;
 use 0x1::simple_map;
 use 0x1::smart_table;
@@ -1460,7 +1463,15 @@ Remove an approved proposal's execution script hash.
 
 ## Function `reconfigure`
 
-Force reconfigure. To be called at the end of a proposal that alters on-chain configs.
+Manually reconfigure. Called at the end of a governance txn that alters on-chain configs.
+
+WARNING: this function always ensures a reconfiguration starts, but when the reconfiguration finishes depends.
+- If feature RECONFIGURE_WITH_DKG is disabled, it finishes immediately.
+- At the end of the calling transaction, we will be in a new epoch.
+- If feature RECONFIGURE_WITH_DKG is enabled, it starts DKG, and the new epoch will start in a block prologue after DKG finishes.
+
+This behavior affects when an update of an on-chain config (e.g. ConsensusConfig, Features) takes effect,
+since such updates are applied whenever we enter an new epoch.
 
 
 
public fun reconfigure(aptos_framework: &signer)
@@ -1474,7 +1485,42 @@ Force reconfigure. To be called at the end of a proposal that alters on-chain co
 
 
public fun reconfigure(aptos_framework: &signer) {
     system_addresses::assert_aptos_framework(aptos_framework);
-    reconfiguration::reconfigure();
+    if (consensus_config::validator_txn_enabled() && features::reconfigure_with_dkg_enabled()) {
+        reconfiguration_with_dkg::try_start();
+    } else {
+        reconfiguration_with_dkg::finish(aptos_framework);
+    }
+}
+
+ + + + + + + +## Function `force_end_epoch` + +Change epoch immediately. +If RECONFIGURE_WITH_DKG is enabled and we are in the middle of a DKG, +stop waiting for DKG and enter the new epoch without randomness. + +WARNING: currently only used by tests. In most cases you should use reconfigure() instead. +TODO: migrate these tests to be aware of async reconfiguration. + + +
public fun force_end_epoch(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun force_end_epoch(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    reconfiguration_with_dkg::finish(aptos_framework);
 }
 
@@ -1500,8 +1546,8 @@ Update feature flags and also trigger reconfiguration.
public fun toggle_features(aptos_framework: &signer, enable: vector<u64>, disable: vector<u64>) {
     system_addresses::assert_aptos_framework(aptos_framework);
-    features::change_feature_flags(aptos_framework, enable, disable);
-    reconfiguration::reconfigure();
+    features::change_feature_flags_for_next_epoch(aptos_framework, enable, disable);
+    reconfigure(aptos_framework);
 }
 
@@ -2605,8 +2651,12 @@ Address @aptos_framework must exist ApprovedExecutionHashes and GovernancePropos -
pragma verify_duration_estimate = 120;
+
pragma verify_duration_estimate = 600;
 aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework));
+include reconfiguration_with_dkg::FinishRequirement {
+    account: aptos_framework
+};
+include stake::GetReconfigStartTimeRequirement;
 include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
 requires chain_status::is_operating();
 requires exists<stake::ValidatorFees>(@aptos_framework);
@@ -2617,6 +2667,37 @@ Address @aptos_framework must exist ApprovedExecutionHashes and GovernancePropos
 
 
 
+
+
+### Function `force_end_epoch`
+
+
+
public fun force_end_epoch(aptos_framework: &signer)
+
+ + + + +
pragma verify_duration_estimate = 600;
+let address = signer::address_of(aptos_framework);
+include reconfiguration_with_dkg::FinishRequirement {
+    account: aptos_framework
+};
+
+ + + + + + + +
schema VotingInitializationAbortIfs {
+    aborts_if features::spec_partial_governance_voting_enabled() && !exists<VotingRecordsV2>(@aptos_framework);
+}
+
+ + + ### Function `toggle_features` @@ -2630,9 +2711,13 @@ Signer address must be @aptos_framework. Address @aptos_framework must exist GovernanceConfig and GovernanceEvents. -
pragma verify_duration_estimate = 200;
+
pragma verify_duration_estimate = 600;
 let addr = signer::address_of(aptos_framework);
 aborts_if addr != @aptos_framework;
+include reconfiguration_with_dkg::FinishRequirement {
+    account: aptos_framework
+};
+include stake::GetReconfigStartTimeRequirement;
 include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
 requires chain_status::is_operating();
 requires exists<stake::ValidatorFees>(@aptos_framework);
@@ -2793,17 +2878,6 @@ pool_address must exist in StakePool.
 
 
 
-
-
-
-
-
schema VotingInitializationAbortIfs {
-    aborts_if features::spec_partial_governance_voting_enabled() && !exists<VotingRecordsV2>(@aptos_framework);
-}
-
- - - ### Function `initialize_for_verification` diff --git a/aptos-move/framework/aptos-framework/doc/block.md b/aptos-move/framework/aptos-framework/doc/block.md index 7a76880b323f6..06e313f7fd76e 100644 --- a/aptos-move/framework/aptos-framework/doc/block.md +++ b/aptos-move/framework/aptos-framework/doc/block.md @@ -15,7 +15,9 @@ This module defines a struct storing the metadata of the block and new block eve - [Function `initialize_commit_history`](#0x1_block_initialize_commit_history) - [Function `update_epoch_interval_microsecs`](#0x1_block_update_epoch_interval_microsecs) - [Function `get_epoch_interval_secs`](#0x1_block_get_epoch_interval_secs) +- [Function `block_prologue_common`](#0x1_block_block_prologue_common) - [Function `block_prologue`](#0x1_block_block_prologue) +- [Function `block_prologue_ext`](#0x1_block_block_prologue_ext) - [Function `get_current_block_height`](#0x1_block_get_current_block_height) - [Function `emit_new_block_event`](#0x1_block_emit_new_block_event) - [Function `emit_genesis_block_event`](#0x1_block_emit_genesis_block_event) @@ -27,7 +29,9 @@ This module defines a struct storing the metadata of the block and new block eve - [Function `initialize`](#@Specification_1_initialize) - [Function `update_epoch_interval_microsecs`](#@Specification_1_update_epoch_interval_microsecs) - [Function `get_epoch_interval_secs`](#@Specification_1_get_epoch_interval_secs) + - [Function `block_prologue_common`](#@Specification_1_block_prologue_common) - [Function `block_prologue`](#@Specification_1_block_prologue) + - [Function `block_prologue_ext`](#@Specification_1_block_prologue_ext) - [Function `get_current_block_height`](#@Specification_1_get_current_block_height) - [Function `emit_new_block_event`](#@Specification_1_emit_new_block_event) - [Function `emit_genesis_block_event`](#@Specification_1_emit_genesis_block_event) @@ -35,11 +39,14 @@ This module defines a struct storing the metadata of the block and new block eve
use 0x1::account;
+use 0x1::dkg;
 use 0x1::error;
 use 0x1::event;
 use 0x1::features;
 use 0x1::option;
+use 0x1::randomness;
 use 0x1::reconfiguration;
+use 0x1::reconfiguration_with_dkg;
 use 0x1::stake;
 use 0x1::state_storage;
 use 0x1::system_addresses;
@@ -431,15 +438,13 @@ Return epoch interval in seconds.
 
 
- + -## Function `block_prologue` +## Function `block_prologue_common` -Set the metadata for the current block. -The runtime always runs this before executing the transactions in a block. -
fun block_prologue(vm: signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64)
+
fun block_prologue_common(vm: &signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64): u64
 
@@ -448,8 +453,8 @@ The runtime always runs this before executing the transactions in a block. Implementation -
fun block_prologue(
-    vm: signer,
+
fun block_prologue_common(
+    vm: &signer,
     hash: address,
     epoch: u64,
     round: u64,
@@ -457,9 +462,9 @@ The runtime always runs this before executing the transactions in a block.
     failed_proposer_indices: vector<u64>,
     previous_block_votes_bitvec: vector<u8>,
     timestamp: u64
-) acquires BlockResource, CommitHistory {
+): u64 acquires BlockResource, CommitHistory {
     // Operational constraint: can only be invoked by the VM.
-    system_addresses::assert_vm(&vm);
+    system_addresses::assert_vm(vm);
 
     // Blocks can only be produced by a valid proposer or by the VM itself for Nil blocks (no user txs).
     assert!(
@@ -485,7 +490,7 @@ The runtime always runs this before executing the transactions in a block.
         failed_proposer_indices,
         time_microseconds: timestamp,
     };
-    emit_new_block_event(&vm, &mut block_metadata_ref.new_block_events, new_block_event);
+    emit_new_block_event(vm, &mut block_metadata_ref.new_block_events, new_block_event);
 
     if (features::collect_and_distribute_gas_fees()) {
         // Assign the fees collected from the previous block to the previous block proposer.
@@ -501,7 +506,44 @@ The runtime always runs this before executing the transactions in a block.
     stake::update_performance_statistics(proposer_index, failed_proposer_indices);
     state_storage::on_new_block(reconfiguration::current_epoch());
 
-    if (timestamp - reconfiguration::last_reconfiguration_time() >= block_metadata_ref.epoch_interval) {
+    block_metadata_ref.epoch_interval
+}
+
+ + + + + + + +## Function `block_prologue` + +Set the metadata for the current block. +The runtime always runs this before executing the transactions in a block. + + +
fun block_prologue(vm: signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64)
+
+ + + +
+Implementation + + +
fun block_prologue(
+    vm: signer,
+    hash: address,
+    epoch: u64,
+    round: u64,
+    proposer: address,
+    failed_proposer_indices: vector<u64>,
+    previous_block_votes_bitvec: vector<u8>,
+    timestamp: u64
+) acquires BlockResource, CommitHistory {
+    let epoch_interval = block_prologue_common(&vm, hash, epoch, round, proposer, failed_proposer_indices, previous_block_votes_bitvec, timestamp);
+    randomness::on_new_block(&vm, epoch, round, option::none());
+    if (timestamp - reconfiguration::last_reconfiguration_time() >= epoch_interval) {
         reconfiguration::reconfigure();
     };
 }
@@ -509,6 +551,55 @@ The runtime always runs this before executing the transactions in a block.
 
 
 
+
+ + + +## Function `block_prologue_ext` + +block_prologue() but trigger reconfiguration with DKG after epoch timed out. + + +
fun block_prologue_ext(vm: signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64, randomness_seed: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
fun block_prologue_ext(
+    vm: signer,
+    hash: address,
+    epoch: u64,
+    round: u64,
+    proposer: address,
+    failed_proposer_indices: vector<u64>,
+    previous_block_votes_bitvec: vector<u8>,
+    timestamp: u64,
+    randomness_seed: Option<vector<u8>>,
+) acquires BlockResource, CommitHistory {
+    let epoch_interval = block_prologue_common(
+        &vm,
+        hash,
+        epoch,
+        round,
+        proposer,
+        failed_proposer_indices,
+        previous_block_votes_bitvec,
+        timestamp
+    );
+    randomness::on_new_block(&vm, epoch, round, randomness_seed);
+
+    if (!dkg::in_progress() && timestamp - reconfiguration::last_reconfiguration_time() >= epoch_interval) {
+        reconfiguration_with_dkg::try_start();
+    }
+}
+
+ + +
@@ -804,6 +895,34 @@ The number of new events created does not exceed MAX_U64. + + + +
schema BlockRequirement {
+    vm: signer;
+    hash: address;
+    epoch: u64;
+    round: u64;
+    proposer: address;
+    failed_proposer_indices: vector<u64>;
+    previous_block_votes_bitvec: vector<u8>;
+    timestamp: u64;
+    requires chain_status::is_operating();
+    requires system_addresses::is_vm(vm);
+    // This enforces high-level requirement 4:
+    requires proposer == @vm_reserved || stake::spec_is_current_epoch_validator(proposer);
+    requires (proposer == @vm_reserved) ==> (timestamp::spec_now_microseconds() == timestamp);
+    requires (proposer != @vm_reserved) ==> (timestamp::spec_now_microseconds() < timestamp);
+    requires exists<stake::ValidatorFees>(@aptos_framework);
+    requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+    include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+    include staking_config::StakingRewardsConfigRequirement;
+}
+
+ + + + @@ -893,6 +1012,24 @@ The BlockResource existed under the @aptos_framework. + + +### Function `block_prologue_common` + + +
fun block_prologue_common(vm: &signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64): u64
+
+ + + + +
pragma verify_duration_estimate = 1000;
+include BlockRequirement;
+aborts_if false;
+
+ + + ### Function `block_prologue` @@ -904,18 +1041,30 @@ The BlockResource existed under the @aptos_framework. -
pragma verify_duration_estimate = 120;
-requires chain_status::is_operating();
-requires system_addresses::is_vm(vm);
-// This enforces high-level requirement 4:
-requires proposer == @vm_reserved || stake::spec_is_current_epoch_validator(proposer);
+
pragma verify_duration_estimate = 1000;
 requires timestamp >= reconfiguration::last_reconfiguration_time();
-requires (proposer == @vm_reserved) ==> (timestamp::spec_now_microseconds() == timestamp);
-requires (proposer != @vm_reserved) ==> (timestamp::spec_now_microseconds() < timestamp);
-requires exists<stake::ValidatorFees>(@aptos_framework);
-requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
-include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
-include staking_config::StakingRewardsConfigRequirement;
+include BlockRequirement;
+aborts_if false;
+
+ + + + + +### Function `block_prologue_ext` + + +
fun block_prologue_ext(vm: signer, hash: address, epoch: u64, round: u64, proposer: address, failed_proposer_indices: vector<u64>, previous_block_votes_bitvec: vector<u8>, timestamp: u64, randomness_seed: option::Option<vector<u8>>)
+
+ + + + +
pragma verify_duration_estimate = 1000;
+requires timestamp >= reconfiguration::last_reconfiguration_time();
+include BlockRequirement;
+include stake::ResourceRequirement;
+include stake::GetReconfigStartTimeRequirement;
 aborts_if false;
 
diff --git a/aptos-move/framework/aptos-framework/doc/config_buffer.md b/aptos-move/framework/aptos-framework/doc/config_buffer.md new file mode 100644 index 0000000000000..38424bc243284 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/config_buffer.md @@ -0,0 +1,357 @@ + + + +# Module `0x1::config_buffer` + +This wrapper helps store an on-chain config for the next epoch. + +Once reconfigure with DKG is introduced, every on-chain config C should do the following. +- Support async update when DKG is enabled. This is typically done by 3 steps below. +- Implement C::set_for_next_epoch() using upsert() function in this module. +- Implement C::on_new_epoch() using extract() function in this module. +- Update 0x1::reconfiguration_with_dkg::finish() to call C::on_new_epoch(). +- Support sychronous update when DKG is disabled. +This is typically done by implementing C::set() to update the config resource directly. + +NOTE: on-chain config 0x1::state::ValidatorSet implemented its own buffer. + + +- [Resource `PendingConfigs`](#0x1_config_buffer_PendingConfigs) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_config_buffer_initialize) +- [Function `does_exist`](#0x1_config_buffer_does_exist) +- [Function `upsert`](#0x1_config_buffer_upsert) +- [Function `extract`](#0x1_config_buffer_extract) +- [Specification](#@Specification_1) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `does_exist`](#@Specification_1_does_exist) + - [Function `upsert`](#@Specification_1_upsert) + - [Function `extract`](#@Specification_1_extract) + + +
use 0x1::any;
+use 0x1::option;
+use 0x1::simple_map;
+use 0x1::string;
+use 0x1::type_info;
+
+ + + + + +## Resource `PendingConfigs` + + + +
struct PendingConfigs has key
+
+ + + +
+Fields + + +
+
+configs: simple_map::SimpleMap<string::String, any::Any> +
+
+ +
+
+ + +
+ + + +## Constants + + + + +Config buffer operations failed with permission denied. + + +
const ESTD_SIGNER_NEEDED: u64 = 1;
+
+ + + + + +## Function `initialize` + + + +
public fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(aptos_framework: &signer) {
+    move_to(aptos_framework, PendingConfigs {
+        configs: simple_map::new(),
+    })
+}
+
+ + + +
+ + + +## Function `does_exist` + +Check whether there is a pending config payload for T. + + +
public fun does_exist<T: store>(): bool
+
+ + + +
+Implementation + + +
public fun does_exist<T: store>(): bool acquires PendingConfigs {
+    if (exists<PendingConfigs>(@aptos_framework)) {
+        let config = borrow_global<PendingConfigs>(@aptos_framework);
+        simple_map::contains_key(&config.configs, &type_info::type_name<T>())
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Function `upsert` + +Upsert an on-chain config to the buffer for the next epoch. + +Typically used in X::set_for_next_epoch() where X is an on-chain config. + + +
public(friend) fun upsert<T: drop, store>(config: T)
+
+ + + +
+Implementation + + +
public(friend) fun upsert<T: drop + store>(config: T) acquires PendingConfigs {
+    let configs = borrow_global_mut<PendingConfigs>(@aptos_framework);
+    let key = type_info::type_name<T>();
+    let value = any::pack(config);
+    simple_map::upsert(&mut configs.configs, key, value);
+}
+
+ + + +
+ + + +## Function `extract` + +Take the buffered config T out (buffer cleared). Abort if the buffer is empty. +Should only be used at the end of a reconfiguration. + +Typically used in X::on_new_epoch() where X is an on-chaon config. + + +
public fun extract<T: store>(): T
+
+ + + +
+Implementation + + +
public fun extract<T: store>(): T acquires PendingConfigs {
+    let configs = borrow_global_mut<PendingConfigs>(@aptos_framework);
+    let key = type_info::type_name<T>();
+    let (_, value_packed) = simple_map::remove(&mut configs.configs, &key);
+    any::unpack(value_packed)
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+
+ + + + + +### Function `initialize` + + +
public fun initialize(aptos_framework: &signer)
+
+ + + + +
aborts_if exists<PendingConfigs>(signer::address_of(aptos_framework));
+
+ + + + + +### Function `does_exist` + + +
public fun does_exist<T: store>(): bool
+
+ + + + +
aborts_if false;
+let type_name = type_info::type_name<T>();
+ensures result == spec_fun_does_exist<T>(type_name);
+
+ + + + + + + +
fun spec_fun_does_exist<T: store>(type_name: String): bool {
+   if (exists<PendingConfigs>(@aptos_framework)) {
+       let config = global<PendingConfigs>(@aptos_framework);
+       simple_map::spec_contains_key(config.configs, type_name)
+   } else {
+       false
+   }
+}
+
+ + + + + +### Function `upsert` + + +
public(friend) fun upsert<T: drop, store>(config: T)
+
+ + + + +
aborts_if !exists<PendingConfigs>(@aptos_framework);
+
+ + + + + +### Function `extract` + + +
public fun extract<T: store>(): T
+
+ + + + +
aborts_if !exists<PendingConfigs>(@aptos_framework);
+include ExtractAbortsIf<T>;
+
+ + + + + + + +
schema ExtractAbortsIf<T> {
+    let configs = global<PendingConfigs>(@aptos_framework);
+    let key = type_info::type_name<T>();
+    aborts_if !simple_map::spec_contains_key(configs.configs, key);
+    include any::UnpackAbortsIf<T> {
+        x: simple_map::spec_get(configs.configs, key)
+    };
+}
+
+ + + + + + + +
schema SetForNextEpochAbortsIf {
+    account: &signer;
+    config: vector<u8>;
+    let account_addr = std::signer::address_of(account);
+    aborts_if account_addr != @aptos_framework;
+    aborts_if len(config) == 0;
+    aborts_if !exists<PendingConfigs>(@aptos_framework);
+}
+
+ + + + + + + +
schema OnNewEpochAbortsIf<T> {
+    let type_name = type_info::type_name<T>();
+    aborts_if spec_fun_does_exist<T>(type_name) && !exists<T>(@aptos_framework);
+    let configs = global<PendingConfigs>(@aptos_framework);
+    include spec_fun_does_exist<T>(type_name) ==> any::UnpackAbortsIf<T> {
+        x: simple_map::spec_get(configs.configs, type_name)
+    };
+}
+
+ + + + + + + +
schema OnNewEpochRequirement<T> {
+    let type_name = type_info::type_name<T>();
+    requires spec_fun_does_exist<T>(type_name) ==> exists<T>(@aptos_framework);
+    let configs = global<PendingConfigs>(@aptos_framework);
+    include spec_fun_does_exist<T>(type_name) ==> any::UnpackRequirement<T> {
+        x: simple_map::spec_get(configs.configs, type_name)
+    };
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/consensus_config.md b/aptos-move/framework/aptos-framework/doc/consensus_config.md index 3d386cca963a6..dcaaa4b01e131 100644 --- a/aptos-move/framework/aptos-framework/doc/consensus_config.md +++ b/aptos-move/framework/aptos-framework/doc/consensus_config.md @@ -11,14 +11,23 @@ Reconfiguration, and may be updated by root. - [Constants](#@Constants_0) - [Function `initialize`](#0x1_consensus_config_initialize) - [Function `set`](#0x1_consensus_config_set) +- [Function `set_for_next_epoch`](#0x1_consensus_config_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_consensus_config_on_new_epoch) +- [Function `validator_txn_enabled`](#0x1_consensus_config_validator_txn_enabled) +- [Function `validator_txn_enabled_internal`](#0x1_consensus_config_validator_txn_enabled_internal) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - [Function `initialize`](#@Specification_1_initialize) - [Function `set`](#@Specification_1_set) + - [Function `set_for_next_epoch`](#@Specification_1_set_for_next_epoch) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + - [Function `validator_txn_enabled`](#@Specification_1_validator_txn_enabled) + - [Function `validator_txn_enabled_internal`](#@Specification_1_validator_txn_enabled_internal) -
use 0x1::error;
+
use 0x1::config_buffer;
+use 0x1::error;
 use 0x1::reconfiguration;
 use 0x1::system_addresses;
 
@@ -31,7 +40,7 @@ Reconfiguration, and may be updated by root. -
struct ConsensusConfig has key
+
struct ConsensusConfig has drop, store, key
 
@@ -98,7 +107,11 @@ Publishes the ConsensusConfig config. ## Function `set` -This can be called by on-chain governance to update on-chain consensus configs. +Deprecated by set_for_next_epoch(). + +WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + +TODO: update all the tests that reference this function, then disable this function.
public fun set(account: &signer, config: vector<u8>)
@@ -124,6 +137,112 @@ This can be called by on-chain governance to update on-chain consensus configs.
 
 
 
+
+
+
+
+## Function `set_for_next_epoch`
+
+This can be called by on-chain governance to update on-chain consensus configs for the next epoch.
+Example usage:
+```
+aptos_framework::consensus_config::set_for_next_epoch(&framework_signer, some_config_bytes);
+aptos_framework::aptos_governance::reconfigure(&framework_signer);
+```
+
+
+
public fun set_for_next_epoch(account: &signer, config: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>) {
+    system_addresses::assert_aptos_framework(account);
+    assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG));
+    std::config_buffer::upsert<ConsensusConfig>(ConsensusConfig {config});
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending ConsensusConfig, if there is any. + + +
public(friend) fun on_new_epoch()
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch() acquires ConsensusConfig {
+    if (config_buffer::does_exist<ConsensusConfig>()) {
+        *borrow_global_mut<ConsensusConfig>(@aptos_framework) = config_buffer::extract();
+    }
+}
+
+ + + +
+ + + +## Function `validator_txn_enabled` + + + +
public fun validator_txn_enabled(): bool
+
+ + + +
+Implementation + + +
public fun validator_txn_enabled(): bool acquires ConsensusConfig {
+    let config_bytes = borrow_global<ConsensusConfig>(@aptos_framework).config;
+    validator_txn_enabled_internal(config_bytes)
+}
+
+ + + +
+ + + +## Function `validator_txn_enabled_internal` + + + +
fun validator_txn_enabled_internal(config_bytes: vector<u8>): bool
+
+ + + +
+Implementation + + +
native fun validator_txn_enabled_internal(config_bytes: vector<u8>): bool;
+
+ + +
@@ -178,6 +297,7 @@ This can be called by on-chain governance to update on-chain consensus configs.
pragma verify = true;
 pragma aborts_if_is_strict;
+invariant [suspendable] chain_status::is_operating() ==> exists<ConsensusConfig>(@aptos_framework);
 
@@ -219,7 +339,7 @@ Ensure the caller is admin and transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; let addr = signer::address_of(account); @@ -236,4 +356,80 @@ When setting now time must be later than last_reconfiguration_time.
+ + + +### Function `set_for_next_epoch` + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>)
+
+ + + + +
include config_buffer::SetForNextEpochAbortsIf;
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch()
+
+ + + + +
include config_buffer::OnNewEpochAbortsIf<ConsensusConfig>;
+
+ + + + + +### Function `validator_txn_enabled` + + +
public fun validator_txn_enabled(): bool
+
+ + + + +
pragma opaque;
+aborts_if !exists<ConsensusConfig>(@aptos_framework);
+ensures [abstract] result == spec_validator_txn_enabled_internal(global<ConsensusConfig>(@aptos_framework).config);
+
+ + + + + +### Function `validator_txn_enabled_internal` + + +
fun validator_txn_enabled_internal(config_bytes: vector<u8>): bool
+
+ + + + +
pragma opaque;
+ensures [abstract] result == spec_validator_txn_enabled_internal(config_bytes);
+
+ + + + + + + +
fun spec_validator_txn_enabled_internal(config_bytes: vector<u8>): bool;
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/dkg.md b/aptos-move/framework/aptos-framework/doc/dkg.md new file mode 100644 index 0000000000000..8fe701165ca37 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/dkg.md @@ -0,0 +1,631 @@ + + + +# Module `0x1::dkg` + +DKG on-chain states and helper functions. + + +- [Resource `FailureInjectionBlockDKG`](#0x1_dkg_FailureInjectionBlockDKG) +- [Resource `FailureInjectionBlockRandomness`](#0x1_dkg_FailureInjectionBlockRandomness) +- [Struct `DKGSessionMetadata`](#0x1_dkg_DKGSessionMetadata) +- [Struct `DKGStartEvent`](#0x1_dkg_DKGStartEvent) +- [Struct `DKGSessionState`](#0x1_dkg_DKGSessionState) +- [Resource `DKGState`](#0x1_dkg_DKGState) +- [Constants](#@Constants_0) +- [Function `block_dkg`](#0x1_dkg_block_dkg) +- [Function `unblock_dkg`](#0x1_dkg_unblock_dkg) +- [Function `block_randomness`](#0x1_dkg_block_randomness) +- [Function `unblock_randomness`](#0x1_dkg_unblock_randomness) +- [Function `initialize`](#0x1_dkg_initialize) +- [Function `start`](#0x1_dkg_start) +- [Function `finish`](#0x1_dkg_finish) +- [Function `in_progress`](#0x1_dkg_in_progress) +- [Specification](#@Specification_1) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `start`](#@Specification_1_start) + - [Function `finish`](#@Specification_1_finish) + - [Function `in_progress`](#@Specification_1_in_progress) + + +
use 0x1::error;
+use 0x1::event;
+use 0x1::option;
+use 0x1::system_addresses;
+use 0x1::timestamp;
+use 0x1::validator_consensus_info;
+
+ + + + + +## Resource `FailureInjectionBlockDKG` + +If this resource is present under 0x1, validators should not do DKG (so the epoch change get stuck). +This is test-only. + + +
struct FailureInjectionBlockDKG has drop, key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `FailureInjectionBlockRandomness` + +If this resource is present under 0x1, validators should not provider randomness to block (so the execution get stuck). +This is test-only. + + +
struct FailureInjectionBlockRandomness has drop, key
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `DKGSessionMetadata` + +This can be considered as the public input of DKG. + + +
struct DKGSessionMetadata has copy, drop, store
+
+ + + +
+Fields + + +
+
+dealer_epoch: u64 +
+
+ +
+
+dealer_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo> +
+
+ +
+
+target_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo> +
+
+ +
+
+block_dkg: bool +
+
+ +
+
+block_randomness: bool +
+
+ +
+
+ + +
+ + + +## Struct `DKGStartEvent` + + + +
#[event]
+struct DKGStartEvent has drop, store
+
+ + + +
+Fields + + +
+
+session_metadata: dkg::DKGSessionMetadata +
+
+ +
+
+start_time_us: u64 +
+
+ +
+
+ + +
+ + + +## Struct `DKGSessionState` + +The input and output of a DKG session. +The validator set of epoch x works together for an DKG output for the target validator set of epoch x+1. + + +
struct DKGSessionState has copy, drop, store
+
+ + + +
+Fields + + +
+
+metadata: dkg::DKGSessionMetadata +
+
+ +
+
+start_time_us: u64 +
+
+ +
+
+transcript: vector<u8> +
+
+ +
+
+ + +
+ + + +## Resource `DKGState` + +The completed and in-progress DKG sessions. + + +
struct DKGState has key
+
+ + + +
+Fields + + +
+
+last_completed: option::Option<dkg::DKGSessionState> +
+
+ +
+
+in_progress: option::Option<dkg::DKGSessionState> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const EDKG_IN_PROGRESS: u64 = 1;
+
+ + + + + + + +
const EDKG_NOT_IN_PROGRESS: u64 = 2;
+
+ + + + + +## Function `block_dkg` + + + +
public fun block_dkg(framework: &signer)
+
+ + + +
+Implementation + + +
public fun block_dkg(framework: &signer) {
+    system_addresses::assert_aptos_framework(framework);
+    if (!exists<FailureInjectionBlockDKG>(@aptos_framework)) {
+        move_to(framework, FailureInjectionBlockDKG {})
+    }
+}
+
+ + + +
+ + + +## Function `unblock_dkg` + + + +
public fun unblock_dkg(framework: &signer)
+
+ + + +
+Implementation + + +
public fun unblock_dkg(framework: &signer) acquires FailureInjectionBlockDKG {
+    system_addresses::assert_aptos_framework(framework);
+    if (exists<FailureInjectionBlockDKG>(@aptos_framework)) {
+        move_from<FailureInjectionBlockDKG>(@aptos_framework);
+    }
+}
+
+ + + +
+ + + +## Function `block_randomness` + + + +
public fun block_randomness(framework: &signer)
+
+ + + +
+Implementation + + +
public fun block_randomness(framework: &signer) {
+    system_addresses::assert_aptos_framework(framework);
+    if (!exists<FailureInjectionBlockRandomness>(@aptos_framework)) {
+        move_to(framework, FailureInjectionBlockRandomness {})
+    }
+}
+
+ + + +
+ + + +## Function `unblock_randomness` + + + +
public fun unblock_randomness(framework: &signer)
+
+ + + +
+Implementation + + +
public fun unblock_randomness(framework: &signer) acquires FailureInjectionBlockRandomness {
+    system_addresses::assert_aptos_framework(framework);
+    if (!exists<FailureInjectionBlockRandomness>(@aptos_framework)) {
+        move_from<FailureInjectionBlockRandomness>(@aptos_framework);
+    }
+}
+
+ + + +
+ + + +## Function `initialize` + +Called in genesis to initialize on-chain states. + + +
public fun initialize(aptos_framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(aptos_framework: &signer) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    move_to<DKGState>(
+        aptos_framework,
+        DKGState {
+            last_completed: std::option::none(),
+            in_progress: std::option::none(),
+        }
+    );
+}
+
+ + + +
+ + + +## Function `start` + +Mark on-chain DKG state as in-progress. Notify validators to start DKG. +Abort if a DKG is already in progress. + + +
public(friend) fun start(dealer_epoch: u64, dealer_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo>, target_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo>)
+
+ + + +
+Implementation + + +
public(friend) fun start(
+    dealer_epoch: u64,
+    dealer_validator_set: vector<ValidatorConsensusInfo>,
+    target_validator_set: vector<ValidatorConsensusInfo>,
+) acquires DKGState {
+    let dkg_state = borrow_global_mut<DKGState>(@aptos_framework);
+    assert!(std::option::is_none(&dkg_state.in_progress), error::invalid_state(EDKG_IN_PROGRESS));
+    let new_session_metadata = DKGSessionMetadata {
+        dealer_epoch,
+        dealer_validator_set,
+        target_validator_set,
+        block_dkg: exists<FailureInjectionBlockDKG>(@aptos_framework),
+        block_randomness: exists<FailureInjectionBlockRandomness>(@aptos_framework),
+    };
+    let start_time_us = timestamp::now_microseconds();
+    dkg_state.in_progress = std::option::some(DKGSessionState {
+        metadata: new_session_metadata,
+        start_time_us,
+        transcript: vector[],
+    });
+
+    emit(DKGStartEvent {
+        start_time_us,
+        session_metadata: new_session_metadata,
+    });
+}
+
+ + + +
+ + + +## Function `finish` + +Update the current DKG state at the beginning of every block in block_prologue_ext(), +or when DKG result is available. + +Return true if and only if this update completes/aborts the DKG and we should proceed to the next epoch. + +Abort if DKG is not in progress. + + +
public(friend) fun finish(transcript: vector<u8>)
+
+ + + +
+Implementation + + +
public(friend) fun finish(transcript: vector<u8>) acquires DKGState {
+    let dkg_state = borrow_global_mut<DKGState>(@aptos_framework);
+    assert!(option::is_some(&dkg_state.in_progress), error::invalid_state(EDKG_NOT_IN_PROGRESS));
+    let session = option::extract(&mut dkg_state.in_progress);
+    session.transcript = transcript;
+    dkg_state.last_completed = option::some(session);
+    dkg_state.in_progress = option::none();
+}
+
+ + + +
+ + + +## Function `in_progress` + +Return whether a DKG is in progress. + + +
public(friend) fun in_progress(): bool
+
+ + + +
+Implementation + + +
public(friend) fun in_progress(): bool acquires DKGState {
+    if (exists<DKGState>(@aptos_framework)) {
+        option::is_some(&borrow_global<DKGState>(@aptos_framework).in_progress)
+    } else {
+        false
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
invariant [suspendable] chain_status::is_operating() ==> exists<DKGState>(@aptos_framework);
+
+ + + + + +### Function `initialize` + + +
public fun initialize(aptos_framework: &signer)
+
+ + + + +
let aptos_framework_addr = signer::address_of(aptos_framework);
+aborts_if aptos_framework_addr != @aptos_framework;
+aborts_if exists<DKGState>(@aptos_framework);
+
+ + + + + +### Function `start` + + +
public(friend) fun start(dealer_epoch: u64, dealer_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo>, target_validator_set: vector<validator_consensus_info::ValidatorConsensusInfo>)
+
+ + + + +
aborts_if !exists<DKGState>(@aptos_framework);
+aborts_if option::is_some(global<DKGState>(@aptos_framework).in_progress);
+aborts_if !exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+
+ + + + + +### Function `finish` + + +
public(friend) fun finish(transcript: vector<u8>)
+
+ + + + +
aborts_if !exists<DKGState>(@aptos_framework);
+aborts_if option::is_none(global<DKGState>(@aptos_framework).in_progress);
+
+ + + + + +### Function `in_progress` + + +
public(friend) fun in_progress(): bool
+
+ + + + +
aborts_if false;
+ensures result == spec_in_progress();
+
+ + + + + + + +
fun spec_in_progress(): bool {
+   if (exists<DKGState>(@aptos_framework)) {
+       option::spec_is_some(global<DKGState>(@aptos_framework).in_progress)
+   } else {
+       false
+   }
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/execution_config.md b/aptos-move/framework/aptos-framework/doc/execution_config.md index dc86e0df4220b..d08efe8896935 100644 --- a/aptos-move/framework/aptos-framework/doc/execution_config.md +++ b/aptos-move/framework/aptos-framework/doc/execution_config.md @@ -10,11 +10,16 @@ Reconfiguration, and may be updated by root. - [Resource `ExecutionConfig`](#0x1_execution_config_ExecutionConfig) - [Constants](#@Constants_0) - [Function `set`](#0x1_execution_config_set) +- [Function `set_for_next_epoch`](#0x1_execution_config_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_execution_config_on_new_epoch) - [Specification](#@Specification_1) - [Function `set`](#@Specification_1_set) + - [Function `set_for_next_epoch`](#@Specification_1_set_for_next_epoch) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) -
use 0x1::error;
+
use 0x1::config_buffer;
+use 0x1::error;
 use 0x1::reconfiguration;
 use 0x1::system_addresses;
 
@@ -27,7 +32,7 @@ Reconfiguration, and may be updated by root. -
struct ExecutionConfig has key
+
struct ExecutionConfig has drop, store, key
 
@@ -67,7 +72,11 @@ The provided on chain config bytes are empty or invalid ## Function `set` -This can be called by on-chain governance to update on-chain execution configs. +Deprecated by set_for_next_epoch(). + +WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + +TODO: update all the tests that reference this function, then disable this function.
public fun set(account: &signer, config: vector<u8>)
@@ -96,6 +105,66 @@ This can be called by on-chain governance to update on-chain execution configs.
 
 
 
+
+
+
+
+## Function `set_for_next_epoch`
+
+This can be called by on-chain governance to update on-chain execution configs for the next epoch.
+Example usage:
+```
+aptos_framework::execution_config::set_for_next_epoch(&framework_signer, some_config_bytes);
+aptos_framework::aptos_governance::reconfigure(&framework_signer);
+```
+
+
+
public fun set_for_next_epoch(account: &signer, config: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>) {
+    system_addresses::assert_aptos_framework(account);
+    assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG));
+    config_buffer::upsert(ExecutionConfig { config });
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending ExecutionConfig, if there is any. + + +
public(friend) fun on_new_epoch()
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch() acquires ExecutionConfig {
+    if (config_buffer::does_exist<ExecutionConfig>()) {
+        let config = config_buffer::extract<ExecutionConfig>();
+        *borrow_global_mut<ExecutionConfig>(@aptos_framework) = config;
+    }
+}
+
+ + +
@@ -123,7 +192,7 @@ Ensure the caller is admin When setting now time must be later than last_reconfiguration_time. -
pragma verify_duration_estimate = 120;
+
pragma verify_duration_estimate = 600;
 let addr = signer::address_of(account);
 include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
 requires chain_status::is_operating();
@@ -138,4 +207,36 @@ When setting now time must be later than last_reconfiguration_time.
 
+ + + +### Function `set_for_next_epoch` + + +
public fun set_for_next_epoch(account: &signer, config: vector<u8>)
+
+ + + + +
include config_buffer::SetForNextEpochAbortsIf;
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch()
+
+ + + + +
include config_buffer::OnNewEpochAbortsIf<ExecutionConfig>;
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/gas_schedule.md b/aptos-move/framework/aptos-framework/doc/gas_schedule.md index f37e79abbb025..777205a96d2b1 100644 --- a/aptos-move/framework/aptos-framework/doc/gas_schedule.md +++ b/aptos-move/framework/aptos-framework/doc/gas_schedule.md @@ -13,16 +13,23 @@ it costs to execute Move on the network. - [Constants](#@Constants_0) - [Function `initialize`](#0x1_gas_schedule_initialize) - [Function `set_gas_schedule`](#0x1_gas_schedule_set_gas_schedule) +- [Function `set_for_next_epoch`](#0x1_gas_schedule_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_gas_schedule_on_new_epoch) - [Function `set_storage_gas_config`](#0x1_gas_schedule_set_storage_gas_config) +- [Function `set_storage_gas_config_for_next_epoch`](#0x1_gas_schedule_set_storage_gas_config_for_next_epoch) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - [Function `initialize`](#@Specification_1_initialize) - [Function `set_gas_schedule`](#@Specification_1_set_gas_schedule) + - [Function `set_for_next_epoch`](#@Specification_1_set_for_next_epoch) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) - [Function `set_storage_gas_config`](#@Specification_1_set_storage_gas_config) + - [Function `set_storage_gas_config_for_next_epoch`](#@Specification_1_set_storage_gas_config_for_next_epoch) -
use 0x1::error;
+
use 0x1::config_buffer;
+use 0x1::error;
 use 0x1::reconfiguration;
 use 0x1::storage_gas;
 use 0x1::string;
@@ -99,7 +106,7 @@ it costs to execute Move on the network.
 
 
 
-
struct GasScheduleV2 has copy, drop, key
+
struct GasScheduleV2 has copy, drop, store, key
 
@@ -184,7 +191,11 @@ Only called during genesis. ## Function `set_gas_schedule` -This can be called by on-chain governance to update the gas schedule. +Deprecated by set_for_next_epoch(). + +WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + +TODO: update all the tests that reference this function, then disable this function.
public fun set_gas_schedule(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
@@ -224,6 +235,68 @@ This can be called by on-chain governance to update the gas schedule.
 
 
 
+
+
+
+
+## Function `set_for_next_epoch`
+
+Set the gas schedule for the next epoch, typically called by on-chain governance.
+Example usage:
+```
+aptos_framework::gas_schedule::set_for_next_epoch(&framework_signer, some_gas_schedule_blob);
+aptos_framework::aptos_governance::reconfigure(&framework_signer);
+```
+
+
+
public fun set_for_next_epoch(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(aptos_framework: &signer, gas_schedule_blob: vector<u8>) {
+    system_addresses::assert_aptos_framework(aptos_framework);
+    assert!(!vector::is_empty(&gas_schedule_blob), error::invalid_argument(EINVALID_GAS_SCHEDULE));
+    let new_gas_schedule: GasScheduleV2 = from_bytes(gas_schedule_blob);
+    config_buffer::upsert(new_gas_schedule);
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending GasScheduleV2, if there is any. + + +
public(friend) fun on_new_epoch()
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch() acquires GasScheduleV2 {
+    if (config_buffer::does_exist<GasScheduleV2>()) {
+        let new_gas_schedule: GasScheduleV2 = config_buffer::extract<GasScheduleV2>();
+        let gas_schedule = borrow_global_mut<GasScheduleV2>(@aptos_framework);
+        *gas_schedule = new_gas_schedule;
+    }
+}
+
+ + +
@@ -251,6 +324,30 @@ This can be called by on-chain governance to update the gas schedule. + + + + +## Function `set_storage_gas_config_for_next_epoch` + + + +
public fun set_storage_gas_config_for_next_epoch(aptos_framework: &signer, config: storage_gas::StorageGasConfig)
+
+ + + +
+Implementation + + +
public fun set_storage_gas_config_for_next_epoch(aptos_framework: &signer, config: StorageGasConfig) {
+    storage_gas::set_config(aptos_framework, config);
+}
+
+ + +
@@ -350,7 +447,7 @@ This can be called by on-chain governance to update the gas schedule. -
pragma verify_duration_estimate = 120;
+
pragma verify_duration_estimate = 600;
 requires exists<stake::ValidatorFees>(@aptos_framework);
 requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
 include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
@@ -369,6 +466,41 @@ This can be called by on-chain governance to update the gas schedule.
 
 
 
+
+
+### Function `set_for_next_epoch`
+
+
+
public fun set_for_next_epoch(aptos_framework: &signer, gas_schedule_blob: vector<u8>)
+
+ + + + +
include config_buffer::SetForNextEpochAbortsIf {
+    account: aptos_framework,
+    config: gas_schedule_blob
+};
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch()
+
+ + + + +
include config_buffer::OnNewEpochAbortsIf<GasScheduleV2>;
+
+ + + ### Function `set_storage_gas_config` @@ -380,7 +512,7 @@ This can be called by on-chain governance to update the gas schedule. -
pragma verify_duration_estimate = 120;
+
pragma verify_duration_estimate = 600;
 requires exists<stake::ValidatorFees>(@aptos_framework);
 requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
 include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
@@ -391,4 +523,28 @@ This can be called by on-chain governance to update the gas schedule.
 
+ + +
include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+aborts_if !exists<storage_gas::StorageGasConfig>(@aptos_framework);
+
+ + + + + +### Function `set_storage_gas_config_for_next_epoch` + + +
public fun set_storage_gas_config_for_next_epoch(aptos_framework: &signer, config: storage_gas::StorageGasConfig)
+
+ + + + +
include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework };
+aborts_if !exists<storage_gas::StorageGasConfig>(@aptos_framework);
+
+ + [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/genesis.md b/aptos-move/framework/aptos-framework/doc/genesis.md index fcc8afa9f61b1..0d5c3f8a5b798 100644 --- a/aptos-move/framework/aptos-framework/doc/genesis.md +++ b/aptos-move/framework/aptos-framework/doc/genesis.md @@ -49,7 +49,6 @@ use 0x1::features; use 0x1::fixed_point32; use 0x1::gas_schedule; -use 0x1::jwks; use 0x1::reconfiguration; use 0x1::simple_map; use 0x1::stake; @@ -365,7 +364,6 @@ Genesis step 1: Initialize aptos framework account and core modules on chain. block::initialize(&aptos_framework_account, epoch_interval_microsecs); state_storage::initialize(&aptos_framework_account); timestamp::set_time_has_started(&aptos_framework_account); - jwks::initialize(&aptos_framework_account); }
diff --git a/aptos-move/framework/aptos-framework/doc/jwks.md b/aptos-move/framework/aptos-framework/doc/jwks.md index c8448032df8a4..5d95f7efe0ee2 100644 --- a/aptos-move/framework/aptos-framework/doc/jwks.md +++ b/aptos-move/framework/aptos-framework/doc/jwks.md @@ -31,7 +31,10 @@ have a simple layout which is easily accessible in Rust. - [Function `get_patched_jwk`](#0x1_jwks_get_patched_jwk) - [Function `try_get_patched_jwk`](#0x1_jwks_try_get_patched_jwk) - [Function `upsert_oidc_provider`](#0x1_jwks_upsert_oidc_provider) +- [Function `upsert_oidc_provider_for_next_epoch`](#0x1_jwks_upsert_oidc_provider_for_next_epoch) - [Function `remove_oidc_provider`](#0x1_jwks_remove_oidc_provider) +- [Function `remove_oidc_provider_for_next_epoch`](#0x1_jwks_remove_oidc_provider_for_next_epoch) +- [Function `on_new_epoch`](#0x1_jwks_on_new_epoch) - [Function `set_patches`](#0x1_jwks_set_patches) - [Function `new_patch_remove_all`](#0x1_jwks_new_patch_remove_all) - [Function `new_patch_remove_issuer`](#0x1_jwks_new_patch_remove_issuer) @@ -55,6 +58,7 @@ have a simple layout which is easily accessible in Rust.
use 0x1::comparator;
+use 0x1::config_buffer;
 use 0x1::copyable_any;
 use 0x1::error;
 use 0x1::event;
@@ -74,7 +78,7 @@ have a simple layout which is easily accessible in Rust.
 An OIDC provider.
 
 
-
struct OIDCProvider has drop, store
+
struct OIDCProvider has copy, drop, store
 
@@ -109,7 +113,7 @@ An OIDC provider. A list of OIDC providers whose JWKs should be watched by validators. Maintained by governance proposals. -
struct SupportedOIDCProviders has key
+
struct SupportedOIDCProviders has copy, drop, store, key
 
@@ -760,9 +764,9 @@ More convenient to call from Move, since it does not abort. ## Function `upsert_oidc_provider` -Upsert an OIDC provider metadata into the SupportedOIDCProviders resource. -Can only be called in a governance proposal. -Returns the old config URL of the provider, if any, as an Option. +Deprecated by upsert_oidc_provider_for_next_epoch(). + +TODO: update all the tests that reference this function, then disable this function.
public fun upsert_oidc_provider(fx: &signer, name: vector<u8>, config_url: vector<u8>): option::Option<vector<u8>>
@@ -787,19 +791,60 @@ Returns the old config URL of the provider, if any, as an Option.
 
 
 
+
+
+
+
+## Function `upsert_oidc_provider_for_next_epoch`
+
+Used in on-chain governances to update the supported OIDC providers, effective starting next epoch.
+Example usage:
+```
+aptos_framework::jwks::upsert_oidc_provider_for_next_epoch(
+&framework_signer,
+b"https://accounts.google.com",
+b"https://accounts.google.com/.well-known/openid-configuration"
+);
+aptos_framework::aptos_governance::reconfigure(&framework_signer);
+```
+
+
+
public fun upsert_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>, config_url: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun upsert_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>, config_url: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+
+    let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        config_buffer::extract<SupportedOIDCProviders>()
+    } else {
+        *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework)
+    };
+
+    let old_config_url = remove_oidc_provider_internal(&mut provider_set, name);
+    vector::push_back(&mut provider_set.providers, OIDCProvider { name, config_url });
+    config_buffer::upsert(provider_set);
+    old_config_url
+}
+
+ + +
## Function `remove_oidc_provider` -Remove an OIDC provider from the SupportedOIDCProviders resource. -Can only be called in a governance proposal. -Returns the old config URL of the provider, if any, as an Option. +Deprecated by remove_oidc_provider_for_next_epoch(). -NOTE: this only stops validators from watching the provider and generate updates to ObservedJWKs. -It does NOT touch ObservedJWKs or Patches. -If you are disabling a provider, you probably also need remove_issuer_from_observed_jwks() and possibly set_patches(). +TODO: update all the tests that reference this function, then disable this function.
public fun remove_oidc_provider(fx: &signer, name: vector<u8>): option::Option<vector<u8>>
@@ -821,6 +866,75 @@ If you are disabling a provider, you probably also need 
+
+## Function `remove_oidc_provider_for_next_epoch`
+
+Used in on-chain governances to update the supported OIDC providers, effective starting next epoch.
+Example usage:
+```
+aptos_framework::jwks::remove_oidc_provider_for_next_epoch(
+&framework_signer,
+b"https://accounts.google.com",
+);
+aptos_framework::aptos_governance::reconfigure(&framework_signer);
+```
+
+
+
public fun remove_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>): option::Option<vector<u8>>
+
+ + + +
+Implementation + + +
public fun remove_oidc_provider_for_next_epoch(fx: &signer, name: vector<u8>): Option<vector<u8>> acquires SupportedOIDCProviders {
+    system_addresses::assert_aptos_framework(fx);
+
+    let provider_set = if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        config_buffer::extract<SupportedOIDCProviders>()
+    } else {
+        *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework)
+    };
+    let ret = remove_oidc_provider_internal(&mut provider_set, name);
+    config_buffer::upsert(provider_set);
+    ret
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending SupportedOIDCProviders, if there is any. + + +
public(friend) fun on_new_epoch()
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch() acquires SupportedOIDCProviders {
+    if (config_buffer::does_exist<SupportedOIDCProviders>()) {
+        *borrow_global_mut<SupportedOIDCProviders>(@aptos_framework) = config_buffer::extract();
+    }
+}
+
+ + +
diff --git a/aptos-move/framework/aptos-framework/doc/overview.md b/aptos-move/framework/aptos-framework/doc/overview.md index 10520e74c33e0..f993863da04b1 100644 --- a/aptos-move/framework/aptos-framework/doc/overview.md +++ b/aptos-move/framework/aptos-framework/doc/overview.md @@ -24,9 +24,11 @@ This is the reference documentation of the Aptos framework. - [`0x1::chain_status`](chain_status.md#0x1_chain_status) - [`0x1::code`](code.md#0x1_code) - [`0x1::coin`](coin.md#0x1_coin) +- [`0x1::config_buffer`](config_buffer.md#0x1_config_buffer) - [`0x1::consensus_config`](consensus_config.md#0x1_consensus_config) - [`0x1::create_signer`](create_signer.md#0x1_create_signer) - [`0x1::delegation_pool`](delegation_pool.md#0x1_delegation_pool) +- [`0x1::dkg`](dkg.md#0x1_dkg) - [`0x1::event`](event.md#0x1_event) - [`0x1::execution_config`](execution_config.md#0x1_execution_config) - [`0x1::fungible_asset`](fungible_asset.md#0x1_fungible_asset) @@ -42,7 +44,10 @@ This is the reference documentation of the Aptos framework. - [`0x1::openid_account`](openid_account.md#0x1_openid_account) - [`0x1::optional_aggregator`](optional_aggregator.md#0x1_optional_aggregator) - [`0x1::primary_fungible_store`](primary_fungible_store.md#0x1_primary_fungible_store) +- [`0x1::randomness`](randomness.md#0x1_randomness) - [`0x1::reconfiguration`](reconfiguration.md#0x1_reconfiguration) +- [`0x1::reconfiguration_state`](reconfiguration_state.md#0x1_reconfiguration_state) +- [`0x1::reconfiguration_with_dkg`](reconfiguration_with_dkg.md#0x1_reconfiguration_with_dkg) - [`0x1::resource_account`](resource_account.md#0x1_resource_account) - [`0x1::stake`](stake.md#0x1_stake) - [`0x1::staking_config`](staking_config.md#0x1_staking_config) @@ -56,6 +61,7 @@ This is the reference documentation of the Aptos framework. - [`0x1::transaction_fee`](transaction_fee.md#0x1_transaction_fee) - [`0x1::transaction_validation`](transaction_validation.md#0x1_transaction_validation) - [`0x1::util`](util.md#0x1_util) +- [`0x1::validator_consensus_info`](validator_consensus_info.md#0x1_validator_consensus_info) - [`0x1::version`](version.md#0x1_version) - [`0x1::vesting`](vesting.md#0x1_vesting) - [`0x1::voting`](voting.md#0x1_voting) diff --git a/aptos-move/framework/aptos-framework/doc/randomness.md b/aptos-move/framework/aptos-framework/doc/randomness.md new file mode 100644 index 0000000000000..36969528f8bfc --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/randomness.md @@ -0,0 +1,1352 @@ + + + +# Module `0x1::randomness` + +This module provides access to *instant* secure randomness generated by the Aptos validators, as documented in +[AIP-41](https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-41.md). + +Secure randomness means (1) the randomness cannot be predicted ahead of time by validators, developers or users +and (2) the randomness cannot be biased in any way by validators, developers or users. + +Security holds under the same proof-of-stake assumption that secures the Aptos network. + + +- [Resource `PerBlockRandomness`](#0x1_randomness_PerBlockRandomness) +- [Struct `RandomnessGeneratedEvent`](#0x1_randomness_RandomnessGeneratedEvent) +- [Resource `Ghost$var`](#0x1_randomness_Ghost$var) +- [Constants](#@Constants_0) +- [Function `initialize`](#0x1_randomness_initialize) +- [Function `on_new_block`](#0x1_randomness_on_new_block) +- [Function `next_32_bytes`](#0x1_randomness_next_32_bytes) +- [Function `bytes`](#0x1_randomness_bytes) +- [Function `u8_integer`](#0x1_randomness_u8_integer) +- [Function `u16_integer`](#0x1_randomness_u16_integer) +- [Function `u32_integer`](#0x1_randomness_u32_integer) +- [Function `u64_integer`](#0x1_randomness_u64_integer) +- [Function `u128_integer`](#0x1_randomness_u128_integer) +- [Function `u256_integer`](#0x1_randomness_u256_integer) +- [Function `u256_integer_internal`](#0x1_randomness_u256_integer_internal) +- [Function `u8_range`](#0x1_randomness_u8_range) +- [Function `u16_range`](#0x1_randomness_u16_range) +- [Function `u32_range`](#0x1_randomness_u32_range) +- [Function `u64_range`](#0x1_randomness_u64_range) +- [Function `u64_range_internal`](#0x1_randomness_u64_range_internal) +- [Function `u128_range`](#0x1_randomness_u128_range) +- [Function `u256_range`](#0x1_randomness_u256_range) +- [Function `permutation`](#0x1_randomness_permutation) +- [Function `safe_add_mod`](#0x1_randomness_safe_add_mod) +- [Function `safe_add_mod_for_verification`](#0x1_randomness_safe_add_mod_for_verification) +- [Function `fetch_and_increment_txn_counter`](#0x1_randomness_fetch_and_increment_txn_counter) +- [Function `is_safe_call`](#0x1_randomness_is_safe_call) +- [Specification](#@Specification_1) + - [Function `initialize`](#@Specification_1_initialize) + - [Function `on_new_block`](#@Specification_1_on_new_block) + - [Function `next_32_bytes`](#@Specification_1_next_32_bytes) + - [Function `u8_integer`](#@Specification_1_u8_integer) + - [Function `u16_integer`](#@Specification_1_u16_integer) + - [Function `u32_integer`](#@Specification_1_u32_integer) + - [Function `u64_integer`](#@Specification_1_u64_integer) + - [Function `u128_integer`](#@Specification_1_u128_integer) + - [Function `u256_integer`](#@Specification_1_u256_integer) + - [Function `u8_range`](#@Specification_1_u8_range) + - [Function `u64_range`](#@Specification_1_u64_range) + - [Function `u256_range`](#@Specification_1_u256_range) + - [Function `permutation`](#@Specification_1_permutation) + - [Function `safe_add_mod_for_verification`](#@Specification_1_safe_add_mod_for_verification) + - [Function `fetch_and_increment_txn_counter`](#@Specification_1_fetch_and_increment_txn_counter) + - [Function `is_safe_call`](#@Specification_1_is_safe_call) + + +
use 0x1::event;
+use 0x1::hash;
+use 0x1::option;
+use 0x1::system_addresses;
+use 0x1::transaction_context;
+use 0x1::vector;
+
+ + + + + +## Resource `PerBlockRandomness` + +32-byte randomness seed unique to every block. +This resource is updated in every block prologue. + + +
struct PerBlockRandomness has drop, key
+
+ + + +
+Fields + + +
+
+epoch: u64 +
+
+ +
+
+round: u64 +
+
+ +
+
+seed: option::Option<vector<u8>> +
+
+ +
+
+ + +
+ + + +## Struct `RandomnessGeneratedEvent` + +Event emitted every time a public randomness API in this module is called. + + +
#[event]
+struct RandomnessGeneratedEvent has drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$var` + + + +
struct Ghost$var has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: vector<u8> +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const MAX_U256: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935;
+
+ + + + + + + +
const DST: vector<u8> = [65, 80, 84, 79, 83, 95, 82, 65, 78, 68, 79, 77, 78, 69, 83, 83];
+
+ + + + + +Randomness APIs calls must originate from a private entry function. Otherwise, test-and-abort attacks are possible. + + +
const E_API_USE_SUSCEPTIBLE_TO_TEST_AND_ABORT: u64 = 1;
+
+ + + + + +## Function `initialize` + +Called in genesis.move. +Must be called in tests to initialize the PerBlockRandomness resource. + + +
public fun initialize(framework: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(framework: &signer) {
+    system_addresses::assert_aptos_framework(framework);
+    move_to(framework, PerBlockRandomness {
+        epoch: 0,
+        round: 0,
+        seed: option::none(),
+    });
+}
+
+ + + +
+ + + +## Function `on_new_block` + +Invoked in block prologues to update the block-level randomness seed. + + +
public(friend) fun on_new_block(vm: &signer, epoch: u64, round: u64, seed_for_new_block: option::Option<vector<u8>>)
+
+ + + +
+Implementation + + +
public(friend) fun on_new_block(vm: &signer, epoch: u64, round: u64, seed_for_new_block: Option<vector<u8>>) acquires PerBlockRandomness {
+    system_addresses::assert_vm(vm);
+    if (exists<PerBlockRandomness>(@aptos_framework)) {
+        let randomness = borrow_global_mut<PerBlockRandomness>(@aptos_framework);
+        randomness.epoch = epoch;
+        randomness.round = round;
+        randomness.seed = seed_for_new_block;
+    }
+}
+
+ + + +
+ + + +## Function `next_32_bytes` + +Generate the next 32 random bytes. Repeated calls will yield different results (assuming the collision-resistance +of the hash function). + + +
fun next_32_bytes(): vector<u8>
+
+ + + +
+Implementation + + +
fun next_32_bytes(): vector<u8> acquires PerBlockRandomness {
+    assert!(is_safe_call(), E_API_USE_SUSCEPTIBLE_TO_TEST_AND_ABORT);
+
+    let input = DST;
+    let randomness = borrow_global<PerBlockRandomness>(@aptos_framework);
+    let seed = *option::borrow(&randomness.seed);
+
+    vector::append(&mut input, seed);
+    vector::append(&mut input, transaction_context::get_transaction_hash());
+    vector::append(&mut input, fetch_and_increment_txn_counter());
+    hash::sha3_256(input)
+}
+
+ + + +
+ + + +## Function `bytes` + +Generates a sequence of bytes uniformly at random + + +
public fun bytes(n: u64): vector<u8>
+
+ + + +
+Implementation + + +
public fun bytes(n: u64): vector<u8> acquires PerBlockRandomness {
+    let v = vector[];
+    let c = 0;
+    while (c < n) {
+        let blob = next_32_bytes();
+        vector::append(&mut v, blob);
+
+        c = c + 32;
+    };
+
+    if (c > n) {
+        vector::trim(&mut v, n);
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    v
+}
+
+ + + +
+ + + +## Function `u8_integer` + +Generates an u8 uniformly at random. + + +
public fun u8_integer(): u8
+
+ + + +
+Implementation + + +
public fun u8_integer(): u8 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let ret: u8 = vector::pop_back(&mut raw);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u16_integer` + +Generates an u16 uniformly at random. + + +
public fun u16_integer(): u16
+
+ + + +
+Implementation + + +
public fun u16_integer(): u16 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u16 = 0;
+    while (i < 2) {
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u16);
+        i = i + 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u32_integer` + +Generates an u32 uniformly at random. + + +
public fun u32_integer(): u32
+
+ + + +
+Implementation + + +
public fun u32_integer(): u32 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u32 = 0;
+    while (i < 4) {
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u32);
+        i = i + 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u64_integer` + +Generates an u64 uniformly at random. + + +
public fun u64_integer(): u64
+
+ + + +
+Implementation + + +
public fun u64_integer(): u64 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u64 = 0;
+    while (i < 8) {
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u64);
+        i = i + 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u128_integer` + +Generates an u128 uniformly at random. + + +
public fun u128_integer(): u128
+
+ + + +
+Implementation + + +
public fun u128_integer(): u128 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u128 = 0;
+    while (i < 16) {
+        spec {
+            // TODO: Prove these with proper loop invaraints.
+            assume ret * 256 + 255 <= MAX_U256;
+            assume len(raw) > 0;
+        };
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u128);
+        i = i + 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    ret
+}
+
+ + + +
+ + + +## Function `u256_integer` + +Generates a u256 uniformly at random. + + +
public fun u256_integer(): u256
+
+ + + +
+Implementation + + +
public fun u256_integer(): u256 acquires PerBlockRandomness {
+    event::emit(RandomnessGeneratedEvent {});
+    u256_integer_internal()
+}
+
+ + + +
+ + + +## Function `u256_integer_internal` + +Generates a u256 uniformly at random. + + +
fun u256_integer_internal(): u256
+
+ + + +
+Implementation + + +
fun u256_integer_internal(): u256 acquires PerBlockRandomness {
+    let raw = next_32_bytes();
+    let i = 0;
+    let ret: u256 = 0;
+    while (i < 32) {
+        spec {
+            // TODO: Prove these with proper loop invaraints.
+            assume ret * 256 + 255 <= MAX_U256;
+            assume len(raw) > 0;
+        };
+        ret = ret * 256 + (vector::pop_back(&mut raw) as u256);
+        i = i + 1;
+    };
+    ret
+}
+
+ + + +
+ + + +## Function `u8_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u8_range(min_incl: u8, max_excl: u8): u8
+
+ + + +
+Implementation + + +
public fun u8_range(min_incl: u8, max_excl: u8): u8 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u8);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u16_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u16_range(min_incl: u16, max_excl: u16): u16
+
+ + + +
+Implementation + + +
public fun u16_range(min_incl: u16, max_excl: u16): u16 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u16);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u32_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u32_range(min_incl: u32, max_excl: u32): u32
+
+ + + +
+Implementation + + +
public fun u32_range(min_incl: u32, max_excl: u32): u32 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u32);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u64_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u64_range(min_incl: u64, max_excl: u64): u64
+
+ + + +
+Implementation + + +
public fun u64_range(min_incl: u64, max_excl: u64): u64 acquires PerBlockRandomness {
+    event::emit(RandomnessGeneratedEvent {});
+
+    u64_range_internal(min_incl, max_excl)
+}
+
+ + + +
+ + + +## Function `u64_range_internal` + + + +
public fun u64_range_internal(min_incl: u64, max_excl: u64): u64
+
+ + + +
+Implementation + + +
public fun u64_range_internal(min_incl: u64, max_excl: u64): u64 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u64);
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u128_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own via rejection sampling. + + +
public fun u128_range(min_incl: u128, max_excl: u128): u128
+
+ + + +
+Implementation + + +
public fun u128_range(min_incl: u128, max_excl: u128): u128 acquires PerBlockRandomness {
+    let range = ((max_excl - min_incl) as u256);
+    let sample = ((u256_integer_internal() % range) as u128);
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `u256_range` + +Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + +NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. +If you need perfect uniformity, consider implement your own with u256_integer() + rejection sampling. + + +
public fun u256_range(min_incl: u256, max_excl: u256): u256
+
+ + + +
+Implementation + + +
public fun u256_range(min_incl: u256, max_excl: u256): u256 acquires PerBlockRandomness {
+    let range = max_excl - min_incl;
+    let r0 = u256_integer_internal();
+    let r1 = u256_integer_internal();
+
+    // Will compute sample := (r0 + r1*2^256) % range.
+
+    let sample = r1 % range;
+    let i = 0;
+    while ({
+        spec {
+            invariant sample >= 0 && sample < max_excl - min_incl;
+        };
+        i < 256
+    }) {
+        sample = safe_add_mod(sample, sample, range);
+        i = i + 1;
+    };
+
+    let sample = safe_add_mod(sample, r0 % range, range);
+    spec {
+        assert sample >= 0 && sample < max_excl - min_incl;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    min_incl + sample
+}
+
+ + + +
+ + + +## Function `permutation` + +Generate a permutation of [0, 1, ..., n-1] uniformly at random. +If n is 0, returns the empty vector. + + +
public fun permutation(n: u64): vector<u64>
+
+ + + +
+Implementation + + +
public fun permutation(n: u64): vector<u64> acquires PerBlockRandomness {
+    let values = vector[];
+
+    if(n == 0) {
+        return vector[]
+    };
+
+    // Initialize into [0, 1, ..., n-1].
+    let i = 0;
+    while ({
+        spec {
+            invariant i <= n;
+            invariant len(values) == i;
+        };
+        i < n
+    }) {
+        std::vector::push_back(&mut values, i);
+        i = i + 1;
+    };
+    spec {
+        assert len(values) == n;
+    };
+
+    // Shuffle.
+    let tail = n - 1;
+    while ({
+        spec {
+            invariant tail >= 0 && tail < len(values);
+        };
+        tail > 0
+    }) {
+        let pop_position = u64_range_internal(0, tail + 1);
+        spec {
+            assert pop_position < len(values);
+        };
+        std::vector::swap(&mut values, pop_position, tail);
+        tail = tail - 1;
+    };
+
+    event::emit(RandomnessGeneratedEvent {});
+
+    values
+}
+
+ + + +
+ + + +## Function `safe_add_mod` + +Compute (a + b) % m, assuming m >= 1, 0 <= a < m, 0<= b < m. + + +
fun safe_add_mod(a: u256, b: u256, m: u256): u256
+
+ + + +
+Implementation + + +
inline fun safe_add_mod(a: u256, b: u256, m: u256): u256 {
+    let neg_b = m - b;
+    if (a < neg_b) {
+        a + b
+    } else {
+        a - neg_b
+    }
+}
+
+ + + +
+ + + +## Function `safe_add_mod_for_verification` + + + +
#[verify_only]
+fun safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256
+
+ + + +
+Implementation + + +
fun safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256 {
+    let neg_b = m - b;
+    if (a < neg_b) {
+        a + b
+    } else {
+        a - neg_b
+    }
+}
+
+ + + +
+ + + +## Function `fetch_and_increment_txn_counter` + +Fetches and increments a transaction-specific 32-byte randomness-related counter. + + +
fun fetch_and_increment_txn_counter(): vector<u8>
+
+ + + +
+Implementation + + +
native fun fetch_and_increment_txn_counter(): vector<u8>;
+
+ + + +
+ + + +## Function `is_safe_call` + +Called in each randomness generation function to ensure certain safety invariants. +1. Ensure that the TXN that led to the call of this function had a private (or friend) entry function as its TXN payload. +2. TBA + + +
fun is_safe_call(): bool
+
+ + + +
+Implementation + + +
native fun is_safe_call(): bool;
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+invariant [suspendable] chain_status::is_operating() ==> exists<PerBlockRandomness>(@aptos_framework);
+
+global var: vector<u8>;
+
+ + + + + +### Function `initialize` + + +
public fun initialize(framework: &signer)
+
+ + + + +
let framework_addr = signer::address_of(framework);
+aborts_if framework_addr != @aptos_framework;
+aborts_if exists<PerBlockRandomness>(framework_addr);
+ensures global<PerBlockRandomness>(framework_addr).seed == option::spec_none<vector<u8>>();
+
+ + + + + +### Function `on_new_block` + + +
public(friend) fun on_new_block(vm: &signer, epoch: u64, round: u64, seed_for_new_block: option::Option<vector<u8>>)
+
+ + + + +
aborts_if signer::address_of(vm) != @vm;
+ensures exists<PerBlockRandomness>(@aptos_framework) ==> global<PerBlockRandomness>(@aptos_framework).seed == seed_for_new_block;
+ensures exists<PerBlockRandomness>(@aptos_framework) ==> global<PerBlockRandomness>(@aptos_framework).epoch == epoch;
+ensures exists<PerBlockRandomness>(@aptos_framework) ==> global<PerBlockRandomness>(@aptos_framework).round == round;
+
+ + + + + +### Function `next_32_bytes` + + +
fun next_32_bytes(): vector<u8>
+
+ + + + +
include NextBlobAbortsIf;
+let input = b"APTOS_RANDOMNESS";
+let randomness = global<PerBlockRandomness>(@aptos_framework);
+let seed = option::spec_borrow(randomness.seed);
+let txn_hash = transaction_context::spec_get_txn_hash();
+let txn_counter = spec_fetch_and_increment_txn_counter();
+ensures len(result) == 32;
+ensures result == hash::sha3_256(concat(concat(concat(input, seed), txn_hash), txn_counter));
+
+ + + + + + + +
schema NextBlobAbortsIf {
+    let randomness = global<PerBlockRandomness>(@aptos_framework);
+    aborts_if option::spec_is_none(randomness.seed);
+    aborts_if !spec_is_safe_call();
+    aborts_if !exists<PerBlockRandomness>(@aptos_framework);
+}
+
+ + + + + +### Function `u8_integer` + + +
public fun u8_integer(): u8
+
+ + + + +
include NextBlobAbortsIf;
+
+ + + + + +### Function `u16_integer` + + +
public fun u16_integer(): u16
+
+ + + + +
pragma unroll = 2;
+include NextBlobAbortsIf;
+
+ + + + + +### Function `u32_integer` + + +
public fun u32_integer(): u32
+
+ + + + +
pragma unroll = 4;
+include NextBlobAbortsIf;
+
+ + + + + +### Function `u64_integer` + + +
public fun u64_integer(): u64
+
+ + + + +
pragma unroll = 8;
+include NextBlobAbortsIf;
+
+ + + + + +### Function `u128_integer` + + +
public fun u128_integer(): u128
+
+ + + + +
pragma unroll = 16;
+include NextBlobAbortsIf;
+
+ + + + + +### Function `u256_integer` + + +
public fun u256_integer(): u256
+
+ + + + +
pragma unroll = 32;
+include NextBlobAbortsIf;
+ensures [abstract] result == spec_u256_integer();
+
+ + + + + + + +
fun spec_u256_integer(): u256;
+
+ + + + + +### Function `u8_range` + + +
public fun u8_range(min_incl: u8, max_excl: u8): u8
+
+ + + + +
pragma verify_duration_estimate = 120;
+pragma opaque;
+include NextBlobAbortsIf;
+aborts_if min_incl >= max_excl;
+ensures result >= min_incl && result < max_excl;
+
+ + + + + +### Function `u64_range` + + +
public fun u64_range(min_incl: u64, max_excl: u64): u64
+
+ + + + +
include NextBlobAbortsIf;
+aborts_if min_incl >= max_excl;
+ensures result >= min_incl && result < max_excl;
+
+ + + + + +### Function `u256_range` + + +
public fun u256_range(min_incl: u256, max_excl: u256): u256
+
+ + + + +
pragma verify_duration_estimate = 120;
+include NextBlobAbortsIf;
+aborts_if min_incl >= max_excl;
+ensures result >= min_incl && result < max_excl;
+
+ + + + + +### Function `permutation` + + +
public fun permutation(n: u64): vector<u64>
+
+ + + + +
pragma aborts_if_is_partial;
+
+ + + + + +### Function `safe_add_mod_for_verification` + + +
#[verify_only]
+fun safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256
+
+ + + + +
aborts_if m < b;
+aborts_if a < m - b && a + b > MAX_U256;
+ensures result == spec_safe_add_mod(a, b, m);
+
+ + + + + + + +
fun spec_safe_add_mod(a: u256, b: u256, m: u256): u256 {
+   if (a < m - b) {
+       a + b
+   } else {
+       a - (m - b)
+   }
+}
+
+ + + + + +### Function `fetch_and_increment_txn_counter` + + +
fun fetch_and_increment_txn_counter(): vector<u8>
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_fetch_and_increment_txn_counter();
+
+ + + + + + + +
fun spec_fetch_and_increment_txn_counter(): vector<u8>;
+
+ + + + + +### Function `is_safe_call` + + +
fun is_safe_call(): bool
+
+ + + + +
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == spec_is_safe_call();
+
+ + + + + + + +
fun spec_is_safe_call(): bool;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/reconfiguration.md b/aptos-move/framework/aptos-framework/doc/reconfiguration.md index edf30014fd4cd..8bc6fe963b08b 100644 --- a/aptos-move/framework/aptos-framework/doc/reconfiguration.md +++ b/aptos-move/framework/aptos-framework/doc/reconfiguration.md @@ -37,6 +37,7 @@ to synchronize configuration changes for the validators. use 0x1::error; use 0x1::event; use 0x1::features; +use 0x1::reconfiguration_state; use 0x1::signer; use 0x1::stake; use 0x1::storage_gas; @@ -360,6 +361,8 @@ Signal validators to start using new configuration. Must be called from friend c return }; + reconfiguration_state::on_reconfig_start(); + // Reconfiguration "forces the block" to end, as mentioned above. Therefore, we must process the collected fees // explicitly so that staking can distribute them. // @@ -391,6 +394,8 @@ Signal validators to start using new configuration. Must be called from friend c epoch: config_ref.epoch, }, ); + + reconfiguration_state::on_reconfig_finish(); }
@@ -672,7 +677,8 @@ Make sure the caller is admin and check the resource DisableReconfiguration. -
pragma verify_duration_estimate = 120;
+
pragma verify = true;
+pragma verify_duration_estimate = 600;
 requires exists<stake::ValidatorFees>(@aptos_framework);
 let success = !(chain_status::is_genesis() || timestamp::spec_now_microseconds() == 0 || !reconfiguration_enabled())
     && timestamp::spec_now_microseconds() != global<Configuration>(@aptos_framework).last_reconfiguration_time;
diff --git a/aptos-move/framework/aptos-framework/doc/reconfiguration_state.md b/aptos-move/framework/aptos-framework/doc/reconfiguration_state.md
new file mode 100644
index 0000000000000..433cf335b9499
--- /dev/null
+++ b/aptos-move/framework/aptos-framework/doc/reconfiguration_state.md
@@ -0,0 +1,573 @@
+
+
+
+# Module `0x1::reconfiguration_state`
+
+Reconfiguration meta-state resources and util functions.
+
+WARNING: reconfiguration_state::initialize() is required before RECONFIGURE_WITH_DKG can be enabled.
+
+
+-  [Resource `State`](#0x1_reconfiguration_state_State)
+-  [Struct `StateInactive`](#0x1_reconfiguration_state_StateInactive)
+-  [Struct `StateActive`](#0x1_reconfiguration_state_StateActive)
+-  [Constants](#@Constants_0)
+-  [Function `is_initialized`](#0x1_reconfiguration_state_is_initialized)
+-  [Function `initialize`](#0x1_reconfiguration_state_initialize)
+-  [Function `initialize_for_testing`](#0x1_reconfiguration_state_initialize_for_testing)
+-  [Function `is_in_progress`](#0x1_reconfiguration_state_is_in_progress)
+-  [Function `on_reconfig_start`](#0x1_reconfiguration_state_on_reconfig_start)
+-  [Function `start_time_secs`](#0x1_reconfiguration_state_start_time_secs)
+-  [Function `on_reconfig_finish`](#0x1_reconfiguration_state_on_reconfig_finish)
+-  [Specification](#@Specification_1)
+    -  [Resource `State`](#@Specification_1_State)
+    -  [Function `initialize`](#@Specification_1_initialize)
+    -  [Function `initialize_for_testing`](#@Specification_1_initialize_for_testing)
+    -  [Function `is_in_progress`](#@Specification_1_is_in_progress)
+    -  [Function `on_reconfig_start`](#@Specification_1_on_reconfig_start)
+    -  [Function `start_time_secs`](#@Specification_1_start_time_secs)
+
+
+
use 0x1::copyable_any;
+use 0x1::error;
+use 0x1::string;
+use 0x1::system_addresses;
+use 0x1::timestamp;
+
+ + + + + +## Resource `State` + +Reconfiguration drivers update this resources to notify other modules of some reconfiguration state. + + +
struct State has key
+
+ + + +
+Fields + + +
+
+variant: copyable_any::Any +
+
+ The state variant packed as an Any. + Currently the variant type is one of the following. + - ReconfigStateInactive + - ReconfigStateActive +
+
+ + +
+ + + +## Struct `StateInactive` + +A state variant indicating no reconfiguration is in progress. + + +
struct StateInactive has copy, drop, store
+
+ + + +
+Fields + + +
+
+dummy_field: bool +
+
+ +
+
+ + +
+ + + +## Struct `StateActive` + +A state variant indicating a reconfiguration is in progress. + + +
struct StateActive has copy, drop, store
+
+ + + +
+Fields + + +
+
+start_time_secs: u64 +
+
+ +
+
+ + +
+ + + +## Constants + + + + + + +
const ERECONFIG_NOT_IN_PROGRESS: u64 = 1;
+
+ + + + + +## Function `is_initialized` + + + +
public fun is_initialized(): bool
+
+ + + +
+Implementation + + +
public fun is_initialized(): bool {
+    exists<State>(@aptos_framework)
+}
+
+ + + +
+ + + +## Function `initialize` + + + +
public fun initialize(fx: &signer)
+
+ + + +
+Implementation + + +
public fun initialize(fx: &signer) {
+    system_addresses::assert_aptos_framework(fx);
+    if (!exists<State>(@aptos_framework)) {
+        move_to(fx, State {
+            variant: copyable_any::pack(StateInactive {})
+        })
+    }
+}
+
+ + + +
+ + + +## Function `initialize_for_testing` + + + +
public fun initialize_for_testing(fx: &signer)
+
+ + + +
+Implementation + + +
public fun initialize_for_testing(fx: &signer) {
+    initialize(fx)
+}
+
+ + + +
+ + + +## Function `is_in_progress` + +Return whether the reconfiguration state is marked "in progress". + + +
public(friend) fun is_in_progress(): bool
+
+ + + +
+Implementation + + +
public(friend) fun is_in_progress(): bool acquires State {
+    if (!exists<State>(@aptos_framework)) {
+        return false
+    };
+
+    let state = borrow_global<State>(@aptos_framework);
+    let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant));
+    variant_type_name == b"0x1::reconfiguration_state::StateActive"
+}
+
+ + + +
+ + + +## Function `on_reconfig_start` + +Called at the beginning of a reconfiguration (either immediate or async) +to mark the reconfiguration state "in progress" if it is currently "stopped". + +Also record the current time as the reconfiguration start time. (Some module, e.g., stake.move, needs this info). + + +
public(friend) fun on_reconfig_start()
+
+ + + +
+Implementation + + +
public(friend) fun on_reconfig_start() acquires State {
+    if (exists<State>(@aptos_framework)) {
+        let state = borrow_global_mut<State>(@aptos_framework);
+        let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant));
+        if (variant_type_name == b"0x1::reconfiguration_state::StateInactive") {
+            state.variant = copyable_any::pack(StateActive {
+                start_time_secs: timestamp::now_seconds()
+            });
+        }
+    };
+}
+
+ + + +
+ + + +## Function `start_time_secs` + +Get the unix time when the currently in-progress reconfiguration started. +Abort if the reconfiguration state is not "in progress". + + +
public(friend) fun start_time_secs(): u64
+
+ + + +
+Implementation + + +
public(friend) fun start_time_secs(): u64 acquires State {
+    let state = borrow_global<State>(@aptos_framework);
+    let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant));
+    if (variant_type_name == b"0x1::reconfiguration_state::StateActive") {
+        let active = copyable_any::unpack<StateActive>(state.variant);
+        active.start_time_secs
+    } else {
+        abort(error::invalid_state(ERECONFIG_NOT_IN_PROGRESS))
+    }
+}
+
+ + + +
+ + + +## Function `on_reconfig_finish` + +Called at the end of every reconfiguration to mark the state as "stopped". +Abort if the current state is not "in progress". + + +
public(friend) fun on_reconfig_finish()
+
+ + + +
+Implementation + + +
public(friend) fun on_reconfig_finish() acquires State {
+    if (exists<State>(@aptos_framework)) {
+        let state = borrow_global_mut<State>(@aptos_framework);
+        let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant));
+        if (variant_type_name == b"0x1::reconfiguration_state::StateActive") {
+            state.variant = copyable_any::pack(StateInactive {});
+        } else {
+            abort(error::invalid_state(ERECONFIG_NOT_IN_PROGRESS))
+        }
+    }
+}
+
+ + + +
+ + + +## Specification + + + +
invariant [suspendable] chain_status::is_operating() ==> exists<State>(@aptos_framework);
+
+ + + + + +### Resource `State` + + +
struct State has key
+
+ + + +
+
+variant: copyable_any::Any +
+
+ The state variant packed as an Any. + Currently the variant type is one of the following. + - ReconfigStateInactive + - ReconfigStateActive +
+
+ + + +
invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive" ||
+    copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive";
+invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive"
+    ==> from_bcs::deserializable<StateActive>(variant.data);
+invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive"
+    ==> from_bcs::deserializable<StateInactive>(variant.data);
+invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive" ==>
+    type_info::type_name<StateActive>() == variant.type_name;
+invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive" ==>
+    type_info::type_name<StateInactive>() == variant.type_name;
+
+ + + + + +### Function `initialize` + + +
public fun initialize(fx: &signer)
+
+ + + + +
aborts_if signer::address_of(fx) != @aptos_framework;
+let post post_state = global<State>(@aptos_framework);
+ensures exists<State>(@aptos_framework);
+ensures !exists<State>(@aptos_framework) ==> from_bcs::deserializable<StateInactive>(post_state.variant.data);
+
+ + + + + +### Function `initialize_for_testing` + + +
public fun initialize_for_testing(fx: &signer)
+
+ + + + +
aborts_if signer::address_of(fx) != @aptos_framework;
+
+ + + + + +### Function `is_in_progress` + + +
public(friend) fun is_in_progress(): bool
+
+ + + + +
aborts_if false;
+
+ + + + + + + +
fun spec_is_in_progress(): bool {
+   if (!exists<State>(@aptos_framework)) {
+       false
+   } else {
+       copyable_any::type_name(global<State>(@aptos_framework).variant).bytes == b"0x1::reconfiguration_state::StateActive"
+   }
+}
+
+ + + + + +### Function `on_reconfig_start` + + +
public(friend) fun on_reconfig_start()
+
+ + + + +
aborts_if false;
+requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+let state = Any {
+    type_name: type_info::type_name<StateActive>(),
+    data: bcs::serialize(StateActive {
+        start_time_secs: timestamp::spec_now_seconds()
+    })
+};
+let pre_state = global<State>(@aptos_framework);
+let post post_state = global<State>(@aptos_framework);
+ensures (exists<State>(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes
+    == b"0x1::reconfiguration_state::StateInactive") ==> copyable_any::type_name(post_state.variant).bytes
+    == b"0x1::reconfiguration_state::StateActive";
+ensures (exists<State>(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes
+    == b"0x1::reconfiguration_state::StateInactive") ==> post_state.variant == state;
+ensures (exists<State>(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes
+    == b"0x1::reconfiguration_state::StateInactive") ==> from_bcs::deserializable<StateActive>(post_state.variant.data);
+
+ + + + + +### Function `start_time_secs` + + +
public(friend) fun start_time_secs(): u64
+
+ + + + +
include StartTimeSecsAbortsIf;
+
+ + + + + + + +
fun spec_start_time_secs(): u64 {
+   use aptos_std::from_bcs;
+   let state = global<State>(@aptos_framework);
+   from_bcs::deserialize<StateActive>(state.variant.data).start_time_secs
+}
+
+ + + + + + + +
schema StartTimeSecsRequirement {
+    requires exists<State>(@aptos_framework);
+    requires copyable_any::type_name(global<State>(@aptos_framework).variant).bytes
+        == b"0x1::reconfiguration_state::StateActive";
+    include UnpackRequiresStateActive {
+        x:  global<State>(@aptos_framework).variant
+    };
+}
+
+ + + + + + + +
schema UnpackRequiresStateActive {
+    x: Any;
+    requires type_info::type_name<StateActive>() == x.type_name && from_bcs::deserializable<StateActive>(x.data);
+}
+
+ + + + + + + +
schema StartTimeSecsAbortsIf {
+    aborts_if !exists<State>(@aptos_framework);
+    include  copyable_any::type_name(global<State>(@aptos_framework).variant).bytes
+        == b"0x1::reconfiguration_state::StateActive" ==>
+    copyable_any::UnpackAbortsIf<StateActive> {
+        x:  global<State>(@aptos_framework).variant
+    };
+    aborts_if copyable_any::type_name(global<State>(@aptos_framework).variant).bytes
+        != b"0x1::reconfiguration_state::StateActive";
+}
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/reconfiguration_with_dkg.md b/aptos-move/framework/aptos-framework/doc/reconfiguration_with_dkg.md new file mode 100644 index 0000000000000..c159883030fff --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/reconfiguration_with_dkg.md @@ -0,0 +1,216 @@ + + + +# Module `0x1::reconfiguration_with_dkg` + +Reconfiguration with DKG helper functions. + + +- [Function `try_start`](#0x1_reconfiguration_with_dkg_try_start) +- [Function `finish`](#0x1_reconfiguration_with_dkg_finish) +- [Function `finish_with_dkg_result`](#0x1_reconfiguration_with_dkg_finish_with_dkg_result) +- [Specification](#@Specification_0) + - [Function `try_start`](#@Specification_0_try_start) + - [Function `finish`](#@Specification_0_finish) + - [Function `finish_with_dkg_result`](#@Specification_0_finish_with_dkg_result) + + +
use 0x1::consensus_config;
+use 0x1::dkg;
+use 0x1::execution_config;
+use 0x1::features;
+use 0x1::gas_schedule;
+use 0x1::jwks;
+use 0x1::reconfiguration;
+use 0x1::reconfiguration_state;
+use 0x1::stake;
+use 0x1::validator_consensus_info;
+use 0x1::version;
+
+ + + + + +## Function `try_start` + +Trigger a reconfiguration with DKG. +Do nothing if one is already in progress. + + +
public(friend) fun try_start()
+
+ + + +
+Implementation + + +
public(friend) fun try_start() {
+    if (dkg::in_progress()) { return };
+    reconfiguration_state::on_reconfig_start();
+    let cur_epoch = reconfiguration::current_epoch();
+    dkg::start(
+        cur_epoch,
+        stake::cur_validator_consensus_infos(),
+        stake::next_validator_consensus_infos(),
+    );
+}
+
+ + + +
+ + + +## Function `finish` + +Apply buffered on-chain configs (except for ValidatorSet, which is done inside reconfiguration::reconfigure()). +Re-enable validator set changes. +Run the default reconfiguration to enter the new epoch. + + +
public(friend) fun finish(account: &signer)
+
+ + + +
+Implementation + + +
public(friend) fun finish(account: &signer) {
+    consensus_config::on_new_epoch();
+    execution_config::on_new_epoch();
+    gas_schedule::on_new_epoch();
+    std::version::on_new_epoch();
+    jwks::on_new_epoch();
+    features::on_new_epoch(account);
+    reconfiguration::reconfigure();
+}
+
+ + + +
+ + + +## Function `finish_with_dkg_result` + +Complete the current reconfiguration with DKG. +Abort if no DKG is in progress. + + +
fun finish_with_dkg_result(account: &signer, dkg_result: vector<u8>)
+
+ + + +
+Implementation + + +
fun finish_with_dkg_result(account: &signer, dkg_result: vector<u8>) {
+    dkg::finish(dkg_result);
+    finish(account);
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+
+ + + + + +### Function `try_start` + + +
public(friend) fun try_start()
+
+ + + + +
requires chain_status::is_operating();
+include stake::ResourceRequirement;
+include stake::GetReconfigStartTimeRequirement;
+include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement;
+aborts_if false;
+
+ + + + + +### Function `finish` + + +
public(friend) fun finish(account: &signer)
+
+ + + + +
pragma verify_duration_estimate = 600;
+include FinishRequirement;
+
+ + + + + + + +
schema FinishRequirement {
+    account: signer;
+    requires signer::address_of(account) == @aptos_framework;
+    requires chain_status::is_operating();
+    requires exists<CoinInfo<AptosCoin>>(@aptos_framework);
+    include staking_config::StakingRewardsConfigRequirement;
+    requires exists<stake::ValidatorFees>(@aptos_framework);
+    include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply;
+    requires exists<features::Features>(@std);
+    include config_buffer::OnNewEpochRequirement<version::Version>;
+    include config_buffer::OnNewEpochRequirement<gas_schedule::GasScheduleV2>;
+    include config_buffer::OnNewEpochRequirement<execution_config::ExecutionConfig>;
+    include config_buffer::OnNewEpochRequirement<consensus_config::ConsensusConfig>;
+    aborts_if false;
+}
+
+ + + + + +### Function `finish_with_dkg_result` + + +
fun finish_with_dkg_result(account: &signer, dkg_result: vector<u8>)
+
+ + + + +
pragma verify = true;
+pragma verify_duration_estimate = 600;
+include FinishRequirement;
+requires dkg::spec_in_progress();
+aborts_if false;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/stake.md b/aptos-move/framework/aptos-framework/doc/stake.md index 43f203a97b549..b6193d4b3eb22 100644 --- a/aptos-move/framework/aptos-framework/doc/stake.md +++ b/aptos-move/framework/aptos-framework/doc/stake.md @@ -48,6 +48,8 @@ or if their stake drops below the min required, they would get removed at the en - [Resource `AllowedValidators`](#0x1_stake_AllowedValidators) - [Resource `Ghost$ghost_valid_perf`](#0x1_stake_Ghost$ghost_valid_perf) - [Resource `Ghost$ghost_proposer_idx`](#0x1_stake_Ghost$ghost_proposer_idx) +- [Resource `Ghost$ghost_active_num`](#0x1_stake_Ghost$ghost_active_num) +- [Resource `Ghost$ghost_pending_inactive_num`](#0x1_stake_Ghost$ghost_pending_inactive_num) - [Constants](#@Constants_0) - [Function `initialize_validator_fees`](#0x1_stake_initialize_validator_fees) - [Function `add_transaction_fee`](#0x1_stake_add_transaction_fee) @@ -94,7 +96,12 @@ or if their stake drops below the min required, they would get removed at the en - [Function `is_current_epoch_validator`](#0x1_stake_is_current_epoch_validator) - [Function `update_performance_statistics`](#0x1_stake_update_performance_statistics) - [Function `on_new_epoch`](#0x1_stake_on_new_epoch) +- [Function `cur_validator_consensus_infos`](#0x1_stake_cur_validator_consensus_infos) +- [Function `next_validator_consensus_infos`](#0x1_stake_next_validator_consensus_infos) +- [Function `validator_consensus_infos_from_validator_set`](#0x1_stake_validator_consensus_infos_from_validator_set) +- [Function `addresses_from_validator_infos`](#0x1_stake_addresses_from_validator_infos) - [Function `update_stake_pool`](#0x1_stake_update_stake_pool) +- [Function `get_reconfig_start_time_secs`](#0x1_stake_get_reconfig_start_time_secs) - [Function `calculate_rewards_amount`](#0x1_stake_calculate_rewards_amount) - [Function `distribute_rewards`](#0x1_stake_distribute_rewards) - [Function `append`](#0x1_stake_append) @@ -106,6 +113,7 @@ or if their stake drops below the min required, they would get removed at the en - [Function `configure_allowed_validators`](#0x1_stake_configure_allowed_validators) - [Function `is_allowed`](#0x1_stake_is_allowed) - [Function `assert_owner_cap_exists`](#0x1_stake_assert_owner_cap_exists) +- [Function `assert_reconfig_not_in_progress`](#0x1_stake_assert_reconfig_not_in_progress) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) @@ -134,7 +142,10 @@ or if their stake drops below the min required, they would get removed at the en - [Function `is_current_epoch_validator`](#@Specification_1_is_current_epoch_validator) - [Function `update_performance_statistics`](#@Specification_1_update_performance_statistics) - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) + - [Function `next_validator_consensus_infos`](#@Specification_1_next_validator_consensus_infos) + - [Function `validator_consensus_infos_from_validator_set`](#@Specification_1_validator_consensus_infos_from_validator_set) - [Function `update_stake_pool`](#@Specification_1_update_stake_pool) + - [Function `get_reconfig_start_time_secs`](#@Specification_1_get_reconfig_start_time_secs) - [Function `calculate_rewards_amount`](#@Specification_1_calculate_rewards_amount) - [Function `distribute_rewards`](#@Specification_1_distribute_rewards) - [Function `append`](#@Specification_1_append) @@ -148,6 +159,7 @@ or if their stake drops below the min required, they would get removed at the en
use 0x1::account;
 use 0x1::aptos_coin;
 use 0x1::bls12381;
+use 0x1::chain_status;
 use 0x1::coin;
 use 0x1::error;
 use 0x1::event;
@@ -155,11 +167,13 @@ or if their stake drops below the min required, they would get removed at the en
 use 0x1::fixed_point64;
 use 0x1::math64;
 use 0x1::option;
+use 0x1::reconfiguration_state;
 use 0x1::signer;
 use 0x1::staking_config;
 use 0x1::system_addresses;
 use 0x1::table;
 use 0x1::timestamp;
+use 0x1::validator_consensus_info;
 use 0x1::vector;
 
@@ -437,7 +451,7 @@ Full ValidatorSet, stored in @aptos_framework. 3. on_new_epoch processes two pending queues and refresh ValidatorInfo from the owner's address. -
struct ValidatorSet has key
+
struct ValidatorSet has copy, drop, store, key
 
@@ -1102,6 +1116,60 @@ security of the testnet. This will NOT be enabled in Mainnet. + + + + +## Resource `Ghost$ghost_active_num` + + + +
struct Ghost$ghost_active_num has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: u64 +
+
+ +
+
+ + +
+ + + +## Resource `Ghost$ghost_pending_inactive_num` + + + +
struct Ghost$ghost_pending_inactive_num has copy, drop, store, key
+
+ + + +
+Fields + + +
+
+v: u64 +
+
+ +
+
+ +
@@ -1248,6 +1316,16 @@ Owner capability does not exist at the provided account. + + +Validator set change temporarily disabled because of in-progress reconfiguration. + + +
const ERECONFIGURATION_IN_PROGRESS: u64 = 20;
+
+ + + Total stake exceeds maximum allowed. @@ -1866,29 +1944,38 @@ Allow on chain governance to remove validators from the validator set. aptos_framework: &signer, validators: &vector<address>, ) acquires ValidatorSet { + assert_reconfig_not_in_progress(); system_addresses::assert_aptos_framework(aptos_framework); - let validator_set = borrow_global_mut<ValidatorSet>(@aptos_framework); let active_validators = &mut validator_set.active_validators; let pending_inactive = &mut validator_set.pending_inactive; - let len = vector::length(validators); + spec { + update ghost_active_num = len(active_validators); + update ghost_pending_inactive_num = len(pending_inactive); + }; + let len_validators = vector::length(validators); let i = 0; // Remove each validator from the validator set. while ({ spec { - invariant i <= len; + invariant i <= len_validators; invariant spec_validators_are_initialized(active_validators); invariant spec_validator_indices_are_valid(active_validators); invariant spec_validators_are_initialized(pending_inactive); invariant spec_validator_indices_are_valid(pending_inactive); + invariant ghost_active_num + ghost_pending_inactive_num == len(active_validators) + len(pending_inactive); }; - i < len + i < len_validators }) { let validator = *vector::borrow(validators, i); let validator_index = find_validator(active_validators, validator); if (option::is_some(&validator_index)) { let validator_info = vector::swap_remove(active_validators, *option::borrow(&validator_index)); vector::push_back(pending_inactive, validator_info); + spec { + update ghost_active_num = ghost_active_num - 1; + update ghost_pending_inactive_num = ghost_pending_inactive_num + 1; + }; }; i = i + 1; }; @@ -2291,6 +2378,7 @@ Add coins into pool_address. this requires the corresp
public fun add_stake_with_cap(owner_cap: &OwnerCapability, coins: Coin<AptosCoin>) acquires StakePool, ValidatorSet {
+    assert_reconfig_not_in_progress();
     let pool_address = owner_cap.pool_address;
     assert_stake_pool_exists(pool_address);
 
@@ -2354,6 +2442,7 @@ Move amount of coins from pending_inactive to active.
 
 
 
public entry fun reactivate_stake(owner: &signer, amount: u64) acquires OwnerCapability, StakePool {
+    assert_reconfig_not_in_progress();
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
     let ownership_cap = borrow_global<OwnerCapability>(owner_address);
@@ -2381,6 +2470,7 @@ Move amount of coins from pending_inactive to active.
 
 
 
public fun reactivate_stake_with_cap(owner_cap: &OwnerCapability, amount: u64) acquires StakePool {
+    assert_reconfig_not_in_progress();
     let pool_address = owner_cap.pool_address;
     assert_stake_pool_exists(pool_address);
 
@@ -2431,7 +2521,9 @@ Rotate the consensus key of the validator, it'll take effect in next epoch.
     new_consensus_pubkey: vector<u8>,
     proof_of_possession: vector<u8>,
 ) acquires StakePool, ValidatorConfig {
+    assert_reconfig_not_in_progress();
     assert_stake_pool_exists(pool_address);
+
     let stake_pool = borrow_global_mut<StakePool>(pool_address);
     assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR));
 
@@ -2483,10 +2575,10 @@ Update the network and full node addresses of the validator. This only takes eff
     new_network_addresses: vector<u8>,
     new_fullnode_addresses: vector<u8>,
 ) acquires StakePool, ValidatorConfig {
+    assert_reconfig_not_in_progress();
     assert_stake_pool_exists(pool_address);
     let stake_pool = borrow_global_mut<StakePool>(pool_address);
     assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR));
-
     assert!(exists<ValidatorConfig>(pool_address), error::not_found(EVALIDATOR_CONFIG));
     let validator_info = borrow_global_mut<ValidatorConfig>(pool_address);
     let old_network_addresses = validator_info.network_addresses;
@@ -2640,6 +2732,7 @@ This internal version can only be called by the Genesis module during Genesis.
     operator: &signer,
     pool_address: address
 ) acquires StakePool, ValidatorConfig, ValidatorSet {
+    assert_reconfig_not_in_progress();
     assert_stake_pool_exists(pool_address);
     let stake_pool = borrow_global_mut<StakePool>(pool_address);
     assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR));
@@ -2695,6 +2788,7 @@ Similar to unlock_with_cap but will use ownership capability from the signing ac
 
 
 
public entry fun unlock(owner: &signer, amount: u64) acquires OwnerCapability, StakePool {
+    assert_reconfig_not_in_progress();
     let owner_address = signer::address_of(owner);
     assert_owner_cap_exists(owner_address);
     let ownership_cap = borrow_global<OwnerCapability>(owner_address);
@@ -2723,6 +2817,7 @@ Unlock amount from the active stake. Only possible if the lockup ha
 
 
 
public fun unlock_with_cap(amount: u64, owner_cap: &OwnerCapability) acquires StakePool {
+    assert_reconfig_not_in_progress();
     // Short-circuit if amount to unlock is 0 so we don't emit events.
     if (amount == 0) {
         return
@@ -2804,6 +2899,7 @@ Withdraw from pool_address's inactive stake with the corresponding
     owner_cap: &OwnerCapability,
     withdraw_amount: u64
 ): Coin<AptosCoin> acquires StakePool, ValidatorSet {
+    assert_reconfig_not_in_progress();
     let pool_address = owner_cap.pool_address;
     assert_stake_pool_exists(pool_address);
     let stake_pool = borrow_global_mut<StakePool>(pool_address);
@@ -2861,6 +2957,7 @@ Can only be called by the operator of the validator/staking pool.
     operator: &signer,
     pool_address: address
 ) acquires StakePool, ValidatorSet {
+    assert_reconfig_not_in_progress();
     let config = staking_config::get();
     assert!(
         staking_config::get_allow_validator_set_change(&config),
@@ -3016,7 +3113,7 @@ This function cannot abort.
 
 ## Function `on_new_epoch`
 
-Triggers at epoch boundary. This function shouldn't abort.
+Triggered during a reconfiguration. This function shouldn't abort.
 
 1. Distribute transaction fees and rewards to stake pools of active and pending inactive validators (requested
 to leave but not yet removed).
@@ -3072,6 +3169,7 @@ power.
     while ({
         spec {
             invariant spec_validators_are_initialized(next_epoch_validators);
+            invariant i <= vlen;
         };
         i < vlen
     }) {
@@ -3110,6 +3208,8 @@ power.
             invariant vlen == len(validator_set.active_validators);
             invariant forall i in 0..validator_index:
                 global<ValidatorConfig>(validator_set.active_validators[i].addr).validator_index < validator_index;
+            invariant forall i in 0..validator_index:
+                validator_set.active_validators[i].config.validator_index < validator_index;
             invariant len(validator_perf.validators) == validator_index;
         };
         validator_index < vlen
@@ -3127,12 +3227,17 @@ power.
         // Automatically renew a validator's lockup for validators that will still be in the validator set in the
         // next epoch.
         let stake_pool = borrow_global_mut<StakePool>(validator_info.addr);
-        if (stake_pool.locked_until_secs <= timestamp::now_seconds()) {
+        let now_secs = timestamp::now_seconds();
+        let reconfig_start_secs = if (chain_status::is_operating()) {
+            get_reconfig_start_time_secs()
+        } else {
+            now_secs
+        };
+        if (stake_pool.locked_until_secs <= reconfig_start_secs) {
             spec {
-                assume timestamp::spec_now_seconds() + recurring_lockup_duration_secs <= MAX_U64;
+                assume now_secs + recurring_lockup_duration_secs <= MAX_U64;
             };
-            stake_pool.locked_until_secs =
-                timestamp::now_seconds() + recurring_lockup_duration_secs;
+            stake_pool.locked_until_secs = now_secs + recurring_lockup_duration_secs;
         };
 
         validator_index = validator_index + 1;
@@ -3147,13 +3252,282 @@ power.
 
 
 
+
+
+
+
+## Function `cur_validator_consensus_infos`
+
+Return the ValidatorConsensusInfo of each current validator, sorted by current validator index.
+
+
+
public fun cur_validator_consensus_infos(): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + +
+Implementation + + +
public fun cur_validator_consensus_infos(): vector<ValidatorConsensusInfo> acquires ValidatorSet {
+    let validator_set = borrow_global<ValidatorSet>(@aptos_framework);
+    validator_consensus_infos_from_validator_set(validator_set)
+}
+
+ + + +
+ + + +## Function `next_validator_consensus_infos` + + + +
public fun next_validator_consensus_infos(): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + +
+Implementation + + +
public fun next_validator_consensus_infos(): vector<ValidatorConsensusInfo> acquires ValidatorSet, ValidatorPerformance, StakePool, ValidatorFees, ValidatorConfig {
+    // Init.
+    let cur_validator_set = borrow_global<ValidatorSet>(@aptos_framework);
+    let staking_config = staking_config::get();
+    let validator_perf = borrow_global<ValidatorPerformance>(@aptos_framework);
+    let (minimum_stake, _) = staking_config::get_required_stake(&staking_config);
+    let (rewards_rate, rewards_rate_denominator) = staking_config::get_reward_rate(&staking_config);
+
+    // Compute new validator set.
+    let new_active_validators = vector[];
+    let num_new_actives = 0;
+    let candidate_idx = 0;
+    let new_total_power = 0;
+    let num_cur_actives = vector::length(&cur_validator_set.active_validators);
+    let num_cur_pending_actives = vector::length(&cur_validator_set.pending_active);
+    spec {
+        assume num_cur_actives + num_cur_pending_actives <= MAX_U64;
+    };
+    let num_candidates = num_cur_actives + num_cur_pending_actives;
+    while ({
+        spec {
+            invariant candidate_idx <= num_candidates;
+            invariant spec_validators_are_initialized(new_active_validators);
+            invariant len(new_active_validators) == num_new_actives;
+            invariant forall i in 0..len(new_active_validators):
+                new_active_validators[i].config.validator_index == i;
+            invariant num_new_actives <= candidate_idx;
+            invariant spec_validators_are_initialized(new_active_validators);
+        };
+        candidate_idx < num_candidates
+    }) {
+        let candidate_in_current_validator_set = candidate_idx < num_cur_actives;
+        let candidate = if (candidate_idx < num_cur_actives) {
+            vector::borrow(&cur_validator_set.active_validators, candidate_idx)
+        } else {
+            vector::borrow(&cur_validator_set.pending_active, candidate_idx - num_cur_actives)
+        };
+        let stake_pool = borrow_global<StakePool>(candidate.addr);
+        let cur_active = coin::value(&stake_pool.active);
+        let cur_pending_active = coin::value(&stake_pool.pending_active);
+        let cur_pending_inactive = coin::value(&stake_pool.pending_inactive);
+
+        let cur_reward = if (candidate_in_current_validator_set && cur_active > 0) {
+            spec {
+                assert candidate.config.validator_index < len(validator_perf.validators);
+            };
+            let cur_perf = vector::borrow(&validator_perf.validators, candidate.config.validator_index);
+            spec {
+                assume cur_perf.successful_proposals + cur_perf.failed_proposals <= MAX_U64;
+            };
+            calculate_rewards_amount(cur_active, cur_perf.successful_proposals, cur_perf.successful_proposals + cur_perf.failed_proposals, rewards_rate, rewards_rate_denominator)
+        } else {
+            0
+        };
+
+        let cur_fee = 0;
+        if (features::collect_and_distribute_gas_fees()) {
+            let fees_table = &borrow_global<ValidatorFees>(@aptos_framework).fees_table;
+            if (table::contains(fees_table, candidate.addr)) {
+                let fee_coin = table::borrow(fees_table, candidate.addr);
+                cur_fee = coin::value(fee_coin);
+            }
+        };
+
+        let lockup_expired = get_reconfig_start_time_secs() >= stake_pool.locked_until_secs;
+        spec {
+            assume cur_active + cur_pending_active + cur_reward + cur_fee <= MAX_U64;
+            assume cur_active + cur_pending_inactive + cur_pending_active + cur_reward + cur_fee <= MAX_U64;
+        };
+        let new_voting_power =
+            cur_active
+            + if (lockup_expired) { 0 } else { cur_pending_inactive }
+            + cur_pending_active
+            + cur_reward + cur_fee;
+
+        if (new_voting_power >= minimum_stake) {
+            let config = *borrow_global<ValidatorConfig>(candidate.addr);
+            config.validator_index = num_new_actives;
+            let new_validator_info = ValidatorInfo {
+                addr: candidate.addr,
+                voting_power: new_voting_power,
+                config,
+            };
+
+            // Update ValidatorSet.
+            spec {
+                assume new_total_power + new_voting_power <= MAX_U128;
+            };
+            new_total_power = new_total_power + (new_voting_power as u128);
+            vector::push_back(&mut new_active_validators, new_validator_info);
+            num_new_actives = num_new_actives + 1;
+
+        };
+        candidate_idx = candidate_idx + 1;
+    };
+
+    let new_validator_set = ValidatorSet {
+        consensus_scheme: cur_validator_set.consensus_scheme,
+        active_validators: new_active_validators,
+        pending_inactive: vector[],
+        pending_active: vector[],
+        total_voting_power: new_total_power,
+        total_joining_power: 0,
+    };
+
+    validator_consensus_infos_from_validator_set(&new_validator_set)
+}
+
+ + + +
+ + + +## Function `validator_consensus_infos_from_validator_set` + + + +
fun validator_consensus_infos_from_validator_set(validator_set: &stake::ValidatorSet): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + +
+Implementation + + +
fun validator_consensus_infos_from_validator_set(validator_set: &ValidatorSet): vector<ValidatorConsensusInfo> {
+    let validator_consensus_infos = vector[];
+
+    let num_active = vector::length(&validator_set.active_validators);
+    let num_pending_inactive = vector::length(&validator_set.pending_inactive);
+    spec {
+        assume num_active + num_pending_inactive <= MAX_U64;
+    };
+    let total = num_active + num_pending_inactive;
+
+    // Pre-fill the return value with dummy values.
+    let idx = 0;
+    while ({
+        spec {
+            invariant idx <= len(validator_set.active_validators) + len(validator_set.pending_inactive);
+            invariant len(validator_consensus_infos) == idx;
+            invariant len(validator_consensus_infos) <= len(validator_set.active_validators) + len(validator_set.pending_inactive);
+        };
+        idx < total
+    }) {
+        vector::push_back(&mut validator_consensus_infos, validator_consensus_info::default());
+        idx = idx + 1;
+    };
+    spec {
+        assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+        assert spec_validator_indices_are_valid_config(validator_set.active_validators,
+            len(validator_set.active_validators) + len(validator_set.pending_inactive));
+    };
+
+    vector::for_each_ref(&validator_set.active_validators, |obj| {
+        let vi: &ValidatorInfo = obj;
+        spec {
+            assume len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+            assert vi.config.validator_index < len(validator_consensus_infos);
+        };
+        let vci = vector::borrow_mut(&mut validator_consensus_infos, vi.config.validator_index);
+        *vci = validator_consensus_info::new(
+            vi.addr,
+            vi.config.consensus_pubkey,
+            vi.voting_power
+        );
+        spec {
+            assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+        };
+    });
+
+    vector::for_each_ref(&validator_set.pending_inactive, |obj| {
+        let vi: &ValidatorInfo = obj;
+        spec {
+            assume len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+            assert vi.config.validator_index < len(validator_consensus_infos);
+        };
+        let vci = vector::borrow_mut(&mut validator_consensus_infos, vi.config.validator_index);
+        *vci = validator_consensus_info::new(
+            vi.addr,
+            vi.config.consensus_pubkey,
+            vi.voting_power
+        );
+        spec {
+            assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive);
+        };
+    });
+
+    validator_consensus_infos
+}
+
+ + + +
+ + + +## Function `addresses_from_validator_infos` + + + +
fun addresses_from_validator_infos(infos: &vector<stake::ValidatorInfo>): vector<address>
+
+ + + +
+Implementation + + +
fun addresses_from_validator_infos(infos: &vector<ValidatorInfo>): vector<address> {
+    vector::map_ref(infos, |obj| {
+        let info: &ValidatorInfo = obj;
+        info.addr
+    })
+}
+
+ + +
## Function `update_stake_pool` -Update individual validator's stake pool +Calculate the stake amount of a stake pool for the next epoch. +Update individual validator's stake pool if commit == true. + 1. distribute transaction fees to active/pending_inactive delegations 2. distribute rewards to active/pending_inactive delegations 3. process pending_active, pending_inactive correspondingly @@ -3217,7 +3591,7 @@ This function shouldn't abort. // Pending inactive stake is only fully unlocked and moved into inactive if the current lockup cycle has expired let current_lockup_expiration = stake_pool.locked_until_secs; - if (timestamp::now_seconds() >= current_lockup_expiration) { + if (get_reconfig_start_time_secs() >= current_lockup_expiration) { coin::merge( &mut stake_pool.inactive, coin::extract_all(&mut stake_pool.pending_inactive), @@ -3236,6 +3610,35 @@ This function shouldn't abort. + + + + +## Function `get_reconfig_start_time_secs` + +Assuming we are in a middle of a reconfiguration (no matter it is immediate or async), get its start time. + + +
fun get_reconfig_start_time_secs(): u64
+
+ + + +
+Implementation + + +
fun get_reconfig_start_time_secs(): u64 {
+    if (reconfiguration_state::is_initialized()) {
+        reconfiguration_state::start_time_secs()
+    } else {
+        timestamp::now_seconds()
+    }
+}
+
+ + +
@@ -3590,6 +3993,30 @@ Returns validator's next epoch voting power, including pending_active, active, a + + + + +## Function `assert_reconfig_not_in_progress` + + + +
fun assert_reconfig_not_in_progress()
+
+ + + +
+Implementation + + +
fun assert_reconfig_not_in_progress() {
+    assert!(!reconfiguration_state::is_in_progress(), error::invalid_state(ERECONFIGURATION_IN_PROGRESS));
+}
+
+ + +
@@ -3650,7 +4077,8 @@ Returns validator's next epoch voting power, including pending_active, active, a ### Module-level Specification -
invariant [suspendable] exists<ValidatorSet>(@aptos_framework) ==> validator_set_is_valid();
+
pragma verify = true;
+invariant [suspendable] exists<ValidatorSet>(@aptos_framework) ==> validator_set_is_valid();
 invariant [suspendable] chain_status::is_operating() ==> exists<AptosCoinCapabilities>(@aptos_framework);
 invariant [suspendable] chain_status::is_operating() ==> exists<ValidatorPerformance>(@aptos_framework);
 invariant [suspendable] chain_status::is_operating() ==> exists<ValidatorSet>(@aptos_framework);
@@ -3659,6 +4087,10 @@ Returns validator's next epoch voting power, including pending_active, active, a
 global ghost_valid_perf: ValidatorPerformance;
 
 global ghost_proposer_idx: Option<u64>;
+
+global ghost_active_num: u64;
+
+global ghost_pending_inactive_num: u64;
 
@@ -3668,7 +4100,7 @@ Returns validator's next epoch voting power, including pending_active, active, a ### Resource `ValidatorSet` -
struct ValidatorSet has key
+
struct ValidatorSet has copy, drop, store, key
 
@@ -3755,11 +4187,23 @@ Returns validator's next epoch voting power, including pending_active, active, a
fun validator_set_is_valid(): bool {
    let validator_set = global<ValidatorSet>(@aptos_framework);
+   validator_set_is_valid_impl(validator_set)
+}
+
+ + + + + + + +
fun validator_set_is_valid_impl(validator_set: ValidatorSet): bool {
    spec_validators_are_initialized(validator_set.active_validators) &&
        spec_validators_are_initialized(validator_set.pending_inactive) &&
        spec_validators_are_initialized(validator_set.pending_active) &&
        spec_validator_indices_are_valid(validator_set.active_validators) &&
        spec_validator_indices_are_valid(validator_set.pending_inactive)
+       && spec_validator_indices_active_pending_inactive(validator_set)
 }
 
@@ -3847,7 +4291,8 @@ Returns validator's next epoch voting power, including pending_active, active, a -
let aptos_addr = signer::address_of(aptos_framework);
+
pragma disable_invariants_in_body;
+let aptos_addr = signer::address_of(aptos_framework);
 aborts_if !system_addresses::is_aptos_framework_address(aptos_addr);
 aborts_if exists<ValidatorSet>(aptos_addr);
 aborts_if exists<ValidatorPerformance>(aptos_addr);
@@ -3871,8 +4316,14 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
requires chain_status::is_operating();
 let validator_set = global<ValidatorSet>(@aptos_framework);
+let post post_validator_set = global<ValidatorSet>(@aptos_framework);
 let active_validators = validator_set.active_validators;
+let post post_active_validators = post_validator_set.active_validators;
+let pending_inactive_validators = validator_set.pending_inactive;
+let post post_pending_inactive_validators = post_validator_set.pending_inactive;
 invariant len(active_validators) > 0;
+ensures len(active_validators) + len(pending_inactive_validators) == len(post_active_validators)
+    + len(post_pending_inactive_validators);
 
@@ -4035,7 +4486,8 @@ Returns validator's next epoch voting power, including pending_active, active, a -
include ResourceRequirement;
+
aborts_if reconfiguration_state::spec_is_in_progress();
+include ResourceRequirement;
 include AddStakeAbortsIfAndEnsures;
 
@@ -4054,6 +4506,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
include ResourceRequirement;
 let amount = coins.value;
+aborts_if reconfiguration_state::spec_is_in_progress();
 include AddStakeWithCapAbortsIfAndEnsures { amount };
 
@@ -4072,6 +4525,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
let pool_address = owner_cap.pool_address;
 include StakedValueNochange;
+aborts_if reconfiguration_state::spec_is_in_progress();
 aborts_if !stake_pool_exists(pool_address);
 let pre_stake_pool = global<StakePool>(pool_address);
 let post stake_pool = global<StakePool>(pool_address);
@@ -4096,6 +4550,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
let pre_stake_pool = global<StakePool>(pool_address);
 let post validator_info = global<ValidatorConfig>(pool_address);
+aborts_if reconfiguration_state::spec_is_in_progress();
 aborts_if !exists<StakePool>(pool_address);
 aborts_if signer::address_of(operator) != pre_stake_pool.operator_address;
 aborts_if !exists<ValidatorConfig>(pool_address);
@@ -4126,6 +4581,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
 let post validator_info = global<ValidatorConfig>(pool_address);
 modifies global<ValidatorConfig>(pool_address);
 include StakedValueNochange;
+aborts_if reconfiguration_state::spec_is_in_progress();
 aborts_if !exists<StakePool>(pool_address);
 aborts_if !exists<ValidatorConfig>(pool_address);
 aborts_if signer::address_of(operator) != pre_stake_pool.operator_address;
@@ -4180,6 +4636,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
 aborts_if !exists<ValidatorConfig>(pool_address);
 aborts_if !exists<StakingConfig>(@aptos_framework);
 aborts_if !exists<ValidatorSet>(@aptos_framework);
+aborts_if reconfiguration_state::spec_is_in_progress();
 let stake_pool = global<StakePool>(pool_address);
 let validator_set = global<ValidatorSet>(@aptos_framework);
 let post p_validator_set = global<ValidatorSet>(@aptos_framework);
@@ -4227,6 +4684,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
let pool_address = owner_cap.pool_address;
 let pre_stake_pool = global<StakePool>(pool_address);
 let post stake_pool = global<StakePool>(pool_address);
+aborts_if reconfiguration_state::spec_is_in_progress();
 aborts_if amount != 0 && !exists<StakePool>(pool_address);
 modifies global<StakePool>(pool_address);
 include StakedValueNochange;
@@ -4248,7 +4706,8 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
let addr = signer::address_of(owner);
+
aborts_if reconfiguration_state::spec_is_in_progress();
+let addr = signer::address_of(owner);
 let ownership_cap = global<OwnerCapability>(addr);
 let pool_address = ownership_cap.pool_address;
 let stake_pool = global<StakePool>(pool_address);
@@ -4291,7 +4750,9 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
requires chain_status::is_operating();
+
pragma disable_invariants_in_body;
+requires chain_status::is_operating();
+aborts_if reconfiguration_state::spec_is_in_progress();
 let config = staking_config::get();
 aborts_if !staking_config::get_allow_validator_set_change(config);
 aborts_if !exists<StakePool>(pool_address);
@@ -4303,6 +4764,12 @@ Returns validator's next epoch voting power, including pending_active, active, a
 let validator_find_bool = option::spec_is_some(spec_find_validator(validator_set.pending_active, pool_address));
 let active_validators = validator_set.active_validators;
 let pending_active = validator_set.pending_active;
+let post post_validator_set = global<ValidatorSet>(@aptos_framework);
+let post post_active_validators = post_validator_set.active_validators;
+let pending_inactive_validators = validator_set.pending_inactive;
+let post post_pending_inactive_validators = post_validator_set.pending_inactive;
+ensures len(active_validators) + len(pending_inactive_validators) == len(post_active_validators)
+    + len(post_pending_inactive_validators);
 aborts_if !validator_find_bool && !option::spec_is_some(spec_find_validator(active_validators, pool_address));
 aborts_if !validator_find_bool && vector::length(validator_set.active_validators) <= option::spec_borrow(spec_find_validator(active_validators, pool_address));
 aborts_if !validator_find_bool && vector::length(validator_set.active_validators) < 2;
@@ -4371,6 +4838,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
pragma verify_duration_estimate = 120;
 pragma disable_invariants_in_body;
 include ResourceRequirement;
+include GetReconfigStartTimeRequirement;
 include staking_config::StakingRewardsConfigRequirement;
 include aptos_framework::aptos_coin::ExistsAptosCoin;
 // This enforces high-level requirement 4:
@@ -4379,19 +4847,290 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
-
+
 
-### Function `update_stake_pool`
+### Function `next_validator_consensus_infos`
 
 
-
fun update_stake_pool(validator_perf: &stake::ValidatorPerformance, pool_address: address, staking_config: &staking_config::StakingConfig)
+
public fun next_validator_consensus_infos(): vector<validator_consensus_info::ValidatorConsensusInfo>
 
-
pragma verify_duration_estimate = 120;
+
aborts_if false;
 include ResourceRequirement;
+include GetReconfigStartTimeRequirement;
+include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement;
+
+ + + + + +### Function `validator_consensus_infos_from_validator_set` + + +
fun validator_consensus_infos_from_validator_set(validator_set: &stake::ValidatorSet): vector<validator_consensus_info::ValidatorConsensusInfo>
+
+ + + + +
aborts_if false;
+invariant spec_validator_indices_are_valid_config(validator_set.active_validators,
+    len(validator_set.active_validators) + len(validator_set.pending_inactive));
+invariant len(validator_set.pending_inactive) == 0 ||
+    spec_validator_indices_are_valid_config(validator_set.pending_inactive,
+        len(validator_set.active_validators) + len(validator_set.pending_inactive));
+
+ + + + + + + +
schema AddStakeWithCapAbortsIfAndEnsures {
+    owner_cap: OwnerCapability;
+    amount: u64;
+    let pool_address = owner_cap.pool_address;
+    aborts_if !exists<StakePool>(pool_address);
+    let config = global<staking_config::StakingConfig>(@aptos_framework);
+    let validator_set = global<ValidatorSet>(@aptos_framework);
+    let voting_power_increase_limit = config.voting_power_increase_limit;
+    let post post_validator_set = global<ValidatorSet>(@aptos_framework);
+    let update_voting_power_increase = amount != 0 && (spec_contains(validator_set.active_validators, pool_address)
+                                                       || spec_contains(validator_set.pending_active, pool_address));
+    aborts_if update_voting_power_increase && validator_set.total_joining_power + amount > MAX_U128;
+    ensures update_voting_power_increase ==> post_validator_set.total_joining_power == validator_set.total_joining_power + amount;
+    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
+            && validator_set.total_voting_power * voting_power_increase_limit > MAX_U128;
+    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
+            && validator_set.total_joining_power + amount > validator_set.total_voting_power * voting_power_increase_limit / 100;
+    let stake_pool = global<StakePool>(pool_address);
+    let post post_stake_pool = global<StakePool>(pool_address);
+    let value_pending_active = stake_pool.pending_active.value;
+    let value_active = stake_pool.active.value;
+    ensures amount != 0 && spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.pending_active.value == value_pending_active + amount;
+    ensures amount != 0 && !spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.active.value == value_active + amount;
+    let maximum_stake = config.maximum_stake;
+    let value_pending_inactive = stake_pool.pending_inactive.value;
+    let next_epoch_voting_power = value_pending_active + value_active + value_pending_inactive;
+    let voting_power = next_epoch_voting_power + amount;
+    aborts_if amount != 0 && voting_power > MAX_U64;
+    aborts_if amount != 0 && voting_power > maximum_stake;
+}
+
+ + + + + + + +
schema AddStakeAbortsIfAndEnsures {
+    owner: signer;
+    amount: u64;
+    let owner_address = signer::address_of(owner);
+    aborts_if !exists<OwnerCapability>(owner_address);
+    include coin::WithdrawAbortsIf<AptosCoin>{ account: owner };
+    let coin_store = global<coin::CoinStore<AptosCoin>>(owner_address);
+    let balance = coin_store.coin.value;
+    let post coin_post = global<coin::CoinStore<AptosCoin>>(owner_address).coin.value;
+    ensures coin_post == balance - amount;
+    let owner_cap = global<OwnerCapability>(owner_address);
+    include AddStakeWithCapAbortsIfAndEnsures { owner_cap };
+}
+
+ + + + + + + +
fun spec_is_allowed(account: address): bool {
+   if (!exists<AllowedValidators>(@aptos_framework)) {
+       true
+   } else {
+       let allowed = global<AllowedValidators>(@aptos_framework);
+       contains(allowed.accounts, account)
+   }
+}
+
+ + + + + + + +
fun spec_find_validator(v: vector<ValidatorInfo>, addr: address): Option<u64>;
+
+ + + + + + + +
fun spec_validators_are_initialized(validators: vector<ValidatorInfo>): bool {
+   forall i in 0..len(validators):
+       spec_has_stake_pool(validators[i].addr) &&
+           spec_has_validator_config(validators[i].addr)
+}
+
+ + + + + + + +
fun spec_validators_are_initialized_addrs(addrs: vector<address>): bool {
+   forall i in 0..len(addrs):
+       spec_has_stake_pool(addrs[i]) &&
+           spec_has_validator_config(addrs[i])
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid(validators: vector<ValidatorInfo>): bool {
+   spec_validator_indices_are_valid_addr(validators, spec_validator_index_upper_bound()) &&
+       spec_validator_indices_are_valid_config(validators, spec_validator_index_upper_bound())
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid_addr(validators: vector<ValidatorInfo>, upper_bound: u64): bool {
+   forall i in 0..len(validators):
+       global<ValidatorConfig>(validators[i].addr).validator_index < upper_bound
+}
+
+ + + + + + + +
fun spec_validator_indices_are_valid_config(validators: vector<ValidatorInfo>, upper_bound: u64): bool {
+   forall i in 0..len(validators):
+       validators[i].config.validator_index < upper_bound
+}
+
+ + + + + + + +
fun spec_validator_indices_active_pending_inactive(validator_set: ValidatorSet): bool {
+   len(validator_set.pending_inactive) + len(validator_set.active_validators) == spec_validator_index_upper_bound()
+}
+
+ + + + + + + +
fun spec_validator_index_upper_bound(): u64 {
+   len(global<ValidatorPerformance>(@aptos_framework).validators)
+}
+
+ + + + + + + +
fun spec_has_stake_pool(a: address): bool {
+   exists<StakePool>(a)
+}
+
+ + + + + + + +
fun spec_has_validator_config(a: address): bool {
+   exists<ValidatorConfig>(a)
+}
+
+ + + + + + + +
fun spec_rewards_amount(
+   stake_amount: u64,
+   num_successful_proposals: u64,
+   num_total_proposals: u64,
+   rewards_rate: u64,
+   rewards_rate_denominator: u64,
+): u64;
+
+ + + + + + + +
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
+   exists i in 0..len(validators): validators[i].addr == addr
+}
+
+ + + + + + + +
fun spec_is_current_epoch_validator(pool_address: address): bool {
+   let validator_set = global<ValidatorSet>(@aptos_framework);
+   !spec_contains(validator_set.pending_active, pool_address)
+       && (spec_contains(validator_set.active_validators, pool_address)
+       || spec_contains(validator_set.pending_inactive, pool_address))
+}
+
+ + + + + +### Function `update_stake_pool` + + +
fun update_stake_pool(validator_perf: &stake::ValidatorPerformance, pool_address: address, staking_config: &staking_config::StakingConfig)
+
+ + + + +
pragma verify_duration_estimate = 120;
+include ResourceRequirement;
+include GetReconfigStartTimeRequirement;
 include staking_config::StakingRewardsConfigRequirement;
 include UpdateStakePoolAbortsIf;
 let stake_pool = global<StakePool>(pool_address);
@@ -4424,7 +5163,7 @@ Returns validator's next epoch voting power, including pending_active, active, a
 } else {
     post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value
 };
-ensures if (timestamp::spec_now_seconds() >= stake_pool.locked_until_secs) {
+ensures if (spec_get_reconfig_start_time_secs() >= stake_pool.locked_until_secs) {
     post_pending_inactive_value == 0 &&
     post_inactive_value == stake_pool.inactive.value + stake_pool.pending_inactive.value + rewards_amount_2
 } else {
@@ -4454,6 +5193,49 @@ Returns validator's next epoch voting power, including pending_active, active, a
 
 
 
+
+
+### Function `get_reconfig_start_time_secs`
+
+
+
fun get_reconfig_start_time_secs(): u64
+
+ + + + +
include GetReconfigStartTimeRequirement;
+
+ + + + + + + +
schema GetReconfigStartTimeRequirement {
+    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+    include reconfiguration_state::StartTimeSecsRequirement;
+}
+
+ + + + + + + +
fun spec_get_reconfig_start_time_secs(): u64 {
+   if (exists<reconfiguration_state::State>(@aptos_framework)) {
+       reconfiguration_state::spec_start_time_secs()
+   } else {
+       timestamp::spec_now_seconds()
+   }
+}
+
+ + + ### Function `calculate_rewards_amount` @@ -4524,32 +5306,6 @@ Returns validator's next epoch voting power, including pending_active, active, a - - - - -
schema DistributeRewardsAbortsIf {
-    stake: Coin<AptosCoin>;
-    num_successful_proposals: num;
-    num_total_proposals: num;
-    rewards_rate: num;
-    rewards_rate_denominator: num;
-    let stake_amount = coin::value(stake);
-    let rewards_amount = if (stake_amount > 0) {
-        spec_rewards_amount(stake_amount, num_successful_proposals, num_total_proposals, rewards_rate, rewards_rate_denominator)
-    } else {
-        0
-    };
-    let amount = rewards_amount;
-    let addr = type_info::type_of<AptosCoin>().account_address;
-    aborts_if (rewards_amount > 0) && !exists<coin::CoinInfo<AptosCoin>>(addr);
-    modifies global<coin::CoinInfo<AptosCoin>>(addr);
-    include (rewards_amount > 0) ==> coin::CoinAddAbortsIf<AptosCoin> { amount: amount };
-}
-
- - - ### Function `append` @@ -4671,256 +5427,4 @@ Returns validator's next epoch voting power, including pending_active, active, a
- - - - - -
schema AddStakeWithCapAbortsIfAndEnsures {
-    owner_cap: OwnerCapability;
-    amount: u64;
-    let pool_address = owner_cap.pool_address;
-    aborts_if !exists<StakePool>(pool_address);
-    let config = global<staking_config::StakingConfig>(@aptos_framework);
-    let validator_set = global<ValidatorSet>(@aptos_framework);
-    let voting_power_increase_limit = config.voting_power_increase_limit;
-    let post post_validator_set = global<ValidatorSet>(@aptos_framework);
-    let update_voting_power_increase = amount != 0 && (spec_contains(validator_set.active_validators, pool_address)
-                                                       || spec_contains(validator_set.pending_active, pool_address));
-    aborts_if update_voting_power_increase && validator_set.total_joining_power + amount > MAX_U128;
-    ensures update_voting_power_increase ==> post_validator_set.total_joining_power == validator_set.total_joining_power + amount;
-    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
-            && validator_set.total_voting_power * voting_power_increase_limit > MAX_U128;
-    aborts_if update_voting_power_increase && validator_set.total_voting_power > 0
-            && validator_set.total_joining_power + amount > validator_set.total_voting_power * voting_power_increase_limit / 100;
-    let stake_pool = global<StakePool>(pool_address);
-    let post post_stake_pool = global<StakePool>(pool_address);
-    let value_pending_active = stake_pool.pending_active.value;
-    let value_active = stake_pool.active.value;
-    ensures amount != 0 && spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.pending_active.value == value_pending_active + amount;
-    ensures amount != 0 && !spec_is_current_epoch_validator(pool_address) ==> post_stake_pool.active.value == value_active + amount;
-    let maximum_stake = config.maximum_stake;
-    let value_pending_inactive = stake_pool.pending_inactive.value;
-    let next_epoch_voting_power = value_pending_active + value_active + value_pending_inactive;
-    let voting_power = next_epoch_voting_power + amount;
-    aborts_if amount != 0 && voting_power > MAX_U64;
-    aborts_if amount != 0 && voting_power > maximum_stake;
-}
-
- - - - - - - -
schema AddStakeAbortsIfAndEnsures {
-    owner: signer;
-    amount: u64;
-    let owner_address = signer::address_of(owner);
-    aborts_if !exists<OwnerCapability>(owner_address);
-    include coin::WithdrawAbortsIf<AptosCoin>{ account: owner };
-    let coin_store = global<coin::CoinStore<AptosCoin>>(owner_address);
-    let balance = coin_store.coin.value;
-    let post coin_post = global<coin::CoinStore<AptosCoin>>(owner_address).coin.value;
-    ensures coin_post == balance - amount;
-    let owner_cap = global<OwnerCapability>(owner_address);
-    include AddStakeWithCapAbortsIfAndEnsures { owner_cap };
-}
-
- - - - - - - -
fun spec_is_allowed(account: address): bool {
-   if (!exists<AllowedValidators>(@aptos_framework)) {
-       true
-   } else {
-       let allowed = global<AllowedValidators>(@aptos_framework);
-       contains(allowed.accounts, account)
-   }
-}
-
- - - - - - - -
fun spec_find_validator(v: vector<ValidatorInfo>, addr: address): Option<u64>;
-
- - - - - - - -
fun spec_validators_are_initialized(validators: vector<ValidatorInfo>): bool {
-   forall i in 0..len(validators):
-       spec_has_stake_pool(validators[i].addr) &&
-           spec_has_validator_config(validators[i].addr)
-}
-
- - - - - - - -
fun spec_validator_indices_are_valid(validators: vector<ValidatorInfo>): bool {
-   forall i in 0..len(validators):
-       global<ValidatorConfig>(validators[i].addr).validator_index < spec_validator_index_upper_bound()
-}
-
- - - - - - - -
fun spec_validator_index_upper_bound(): u64 {
-   len(global<ValidatorPerformance>(@aptos_framework).validators)
-}
-
- - - - - - - -
fun spec_has_stake_pool(a: address): bool {
-   exists<StakePool>(a)
-}
-
- - - - - - - -
fun spec_has_validator_config(a: address): bool {
-   exists<ValidatorConfig>(a)
-}
-
- - - - - - - -
fun spec_rewards_amount(
-   stake_amount: u64,
-   num_successful_proposals: u64,
-   num_total_proposals: u64,
-   rewards_rate: u64,
-   rewards_rate_denominator: u64,
-): u64;
-
- - - - - - - -
fun spec_contains(validators: vector<ValidatorInfo>, addr: address): bool {
-   exists i in 0..len(validators): validators[i].addr == addr
-}
-
- - - - - - - -
fun spec_is_current_epoch_validator(pool_address: address): bool {
-   let validator_set = global<ValidatorSet>(@aptos_framework);
-   !spec_contains(validator_set.pending_active, pool_address)
-       && (spec_contains(validator_set.active_validators, pool_address)
-       || spec_contains(validator_set.pending_inactive, pool_address))
-}
-
- - - - - - - -
schema ResourceRequirement {
-    requires exists<AptosCoinCapabilities>(@aptos_framework);
-    requires exists<ValidatorPerformance>(@aptos_framework);
-    requires exists<ValidatorSet>(@aptos_framework);
-    requires exists<StakingConfig>(@aptos_framework);
-    requires exists<StakingRewardsConfig>(@aptos_framework) || !features::spec_periodical_reward_rate_decrease_enabled();
-    requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
-    requires exists<ValidatorFees>(@aptos_framework);
-}
-
- - - - - - - -
fun spec_get_reward_rate_1(config: StakingConfig): num {
-   if (features::spec_periodical_reward_rate_decrease_enabled()) {
-       let epoch_rewards_rate = global<staking_config::StakingRewardsConfig>(@aptos_framework).rewards_rate;
-       if (epoch_rewards_rate.value == 0) {
-           0
-       } else {
-           let denominator_0 = aptos_std::fixed_point64::spec_divide_u128(staking_config::MAX_REWARDS_RATE, epoch_rewards_rate);
-           let denominator = if (denominator_0 > MAX_U64) {
-               MAX_U64
-           } else {
-               denominator_0
-           };
-           let nominator = aptos_std::fixed_point64::spec_multiply_u128(denominator, epoch_rewards_rate);
-           nominator
-       }
-   } else {
-           config.rewards_rate
-   }
-}
-
- - - - - - - -
fun spec_get_reward_rate_2(config: StakingConfig): num {
-   if (features::spec_periodical_reward_rate_decrease_enabled()) {
-       let epoch_rewards_rate = global<staking_config::StakingRewardsConfig>(@aptos_framework).rewards_rate;
-       if (epoch_rewards_rate.value == 0) {
-           1
-       } else {
-           let denominator_0 = aptos_std::fixed_point64::spec_divide_u128(staking_config::MAX_REWARDS_RATE, epoch_rewards_rate);
-           let denominator = if (denominator_0 > MAX_U64) {
-               MAX_U64
-           } else {
-               denominator_0
-           };
-           denominator
-       }
-   } else {
-           config.rewards_rate_denominator
-   }
-}
-
- - [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/staking_config.md b/aptos-move/framework/aptos-framework/doc/staking_config.md index 219365bee5213..0f89c9afc8de1 100644 --- a/aptos-move/framework/aptos-framework/doc/staking_config.md +++ b/aptos-move/framework/aptos-framework/doc/staking_config.md @@ -1226,7 +1226,8 @@ Abort at any condition in StakingRewardsConfigValidationAborts. StakingRewardsConfig does not exist under the aptos_framework before creating it. -
requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
+
pragma verify_duration_estimate = 120;
+requires exists<timestamp::CurrentTimeMicroseconds>(@aptos_framework);
 let addr = signer::address_of(aptos_framework);
 // This enforces high-level requirement 1:
 aborts_if addr != @aptos_framework;
diff --git a/aptos-move/framework/aptos-framework/doc/staking_contract.md b/aptos-move/framework/aptos-framework/doc/staking_contract.md
index a00c1023f7a69..a9a2f52f1182c 100644
--- a/aptos-move/framework/aptos-framework/doc/staking_contract.md
+++ b/aptos-move/framework/aptos-framework/doc/staking_contract.md
@@ -2485,6 +2485,7 @@ Staking_contract exists the stacker/operator pair.
 
 
pragma verify_duration_estimate = 120;
 include stake::ResourceRequirement;
+aborts_if reconfiguration_state::spec_is_in_progress();
 let staker_address = signer::address_of(staker);
 include ContractExistsAbortsIf { staker: staker_address };
 let store = global<Store>(staker_address);
diff --git a/aptos-move/framework/aptos-framework/doc/storage_gas.md b/aptos-move/framework/aptos-framework/doc/storage_gas.md
index 6e6f3e7f560e7..e9097dcf52518 100644
--- a/aptos-move/framework/aptos-framework/doc/storage_gas.md
+++ b/aptos-move/framework/aptos-framework/doc/storage_gas.md
@@ -1172,6 +1172,47 @@ target utilization.
 ## Specification
 
 
+
+
+
+
+
fun spec_calculate_gas(max_usage: u64, current_usage: u64, curve: GasCurve): u64;
+
+ + + + + + + +
schema NewGasCurveAbortsIf {
+    min_gas: u64;
+    max_gas: u64;
+    aborts_if max_gas < min_gas;
+    aborts_if max_gas > MAX_U64 / BASIS_POINT_DENOMINATION;
+}
+
+ + +A non decreasing curve must ensure that next is greater than cur. + + + + + +
schema ValidatePointsAbortsIf {
+    points: vector<Point>;
+    // This enforces high-level requirement 2:
+    aborts_if exists i in 0..len(points) - 1: (
+        points[i].x >= points[i + 1].x || points[i].y > points[i + 1].y
+    );
+    aborts_if len(points) > 0 && points[0].x == 0;
+    aborts_if len(points) > 0 && points[len(points) - 1].x == BASIS_POINT_DENOMINATION;
+}
+
+ + + ### Struct `Point` @@ -1416,7 +1457,8 @@ that is, the gas-curve is a monotonically increasing function. A non decreasing curve must ensure that next is greater than cur. -
include NewGasCurveAbortsIf;
+
pragma verify_duration_estimate = 120;
+include NewGasCurveAbortsIf;
 include ValidatePointsAbortsIf;
 // This enforces high-level requirement 3:
 ensures result == GasCurve {
@@ -1525,7 +1567,7 @@ A non decreasing curve must ensure that next is greater than cur.
 
 
 
pragma aborts_if_is_strict = false;
-pragma verify = false;
+pragma verify_duration_estimate = 120;
 pragma opaque;
 include ValidatePointsAbortsIf;
 
@@ -1590,45 +1632,4 @@ Address @aptos_framework must exist StorageGasConfig and StorageGas and StateSto
- - - - - -
fun spec_calculate_gas(max_usage: u64, current_usage: u64, curve: GasCurve): u64;
-
- - - - - - - -
schema NewGasCurveAbortsIf {
-    min_gas: u64;
-    max_gas: u64;
-    aborts_if max_gas < min_gas;
-    aborts_if max_gas > MAX_U64 / BASIS_POINT_DENOMINATION;
-}
-
- - -A non decreasing curve must ensure that next is greater than cur. - - - - - -
schema ValidatePointsAbortsIf {
-    points: vector<Point>;
-    // This enforces high-level requirement 2:
-    aborts_if exists i in 0..len(points) - 1: (
-        points[i].x >= points[i + 1].x || points[i].y > points[i + 1].y
-    );
-    aborts_if len(points) > 0 && points[0].x == 0;
-    aborts_if len(points) > 0 && points[len(points) - 1].x == BASIS_POINT_DENOMINATION;
-}
-
- - [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/transaction_context.md b/aptos-move/framework/aptos-framework/doc/transaction_context.md index 4e755c20ae0d3..6c7b65cb802c9 100644 --- a/aptos-move/framework/aptos-framework/doc/transaction_context.md +++ b/aptos-move/framework/aptos-framework/doc/transaction_context.md @@ -282,6 +282,8 @@ the generated unique address wrapped in the AUID class.
pragma opaque;
+aborts_if [abstract] false;
+ensures result == spec_get_txn_hash();
 // This enforces high-level requirement 1:
 ensures [abstract] len(result) == 32;
 
diff --git a/aptos-move/framework/aptos-framework/doc/validator_consensus_info.md b/aptos-move/framework/aptos-framework/doc/validator_consensus_info.md new file mode 100644 index 0000000000000..4bdf814325893 --- /dev/null +++ b/aptos-move/framework/aptos-framework/doc/validator_consensus_info.md @@ -0,0 +1,205 @@ + + + +# Module `0x1::validator_consensus_info` + +Common type: ValidatorConsensusInfo. + + +- [Struct `ValidatorConsensusInfo`](#0x1_validator_consensus_info_ValidatorConsensusInfo) +- [Function `default`](#0x1_validator_consensus_info_default) +- [Function `new`](#0x1_validator_consensus_info_new) +- [Function `get_addr`](#0x1_validator_consensus_info_get_addr) +- [Function `get_pk_bytes`](#0x1_validator_consensus_info_get_pk_bytes) +- [Function `get_voting_power`](#0x1_validator_consensus_info_get_voting_power) +- [Specification](#@Specification_0) + + +
+ + + + + +## Struct `ValidatorConsensusInfo` + +Information about a validator that participates consensus. + + +
struct ValidatorConsensusInfo has copy, drop, store
+
+ + + +
+Fields + + +
+
+addr: address +
+
+ +
+
+pk_bytes: vector<u8> +
+
+ +
+
+voting_power: u64 +
+
+ +
+
+ + +
+ + + +## Function `default` + +Create a default ValidatorConsensusInfo object. Value may be invalid. Only for place holding prupose. + + +
public fun default(): validator_consensus_info::ValidatorConsensusInfo
+
+ + + +
+Implementation + + +
public fun default(): ValidatorConsensusInfo {
+    ValidatorConsensusInfo {
+        addr: @vm,
+        pk_bytes: vector[],
+        voting_power: 0,
+    }
+}
+
+ + + +
+ + + +## Function `new` + +Create a ValidatorConsensusInfo object. + + +
public fun new(addr: address, pk_bytes: vector<u8>, voting_power: u64): validator_consensus_info::ValidatorConsensusInfo
+
+ + + +
+Implementation + + +
public fun new(addr: address, pk_bytes: vector<u8>, voting_power: u64): ValidatorConsensusInfo {
+    ValidatorConsensusInfo {
+        addr,
+        pk_bytes,
+        voting_power,
+    }
+}
+
+ + + +
+ + + +## Function `get_addr` + +Get ValidatorConsensusInfo.addr. + + +
public fun get_addr(vci: &validator_consensus_info::ValidatorConsensusInfo): address
+
+ + + +
+Implementation + + +
public fun get_addr(vci: &ValidatorConsensusInfo): address {
+    vci.addr
+}
+
+ + + +
+ + + +## Function `get_pk_bytes` + +Get ValidatorConsensusInfo.pk_bytes. + + +
public fun get_pk_bytes(vci: &validator_consensus_info::ValidatorConsensusInfo): vector<u8>
+
+ + + +
+Implementation + + +
public fun get_pk_bytes(vci: &ValidatorConsensusInfo): vector<u8> {
+    vci.pk_bytes
+}
+
+ + + +
+ + + +## Function `get_voting_power` + +Get ValidatorConsensusInfo.voting_power. + + +
public fun get_voting_power(vci: &validator_consensus_info::ValidatorConsensusInfo): u64
+
+ + + +
+Implementation + + +
public fun get_voting_power(vci: &ValidatorConsensusInfo): u64 {
+    vci.voting_power
+}
+
+ + + +
+ + + +## Specification + + + +
pragma verify = true;
+
+ + +[move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-framework/doc/version.md b/aptos-move/framework/aptos-framework/doc/version.md index ddb964461d9ab..d155216e24a5d 100644 --- a/aptos-move/framework/aptos-framework/doc/version.md +++ b/aptos-move/framework/aptos-framework/doc/version.md @@ -11,16 +11,21 @@ Maintains the version number for the blockchain. - [Constants](#@Constants_0) - [Function `initialize`](#0x1_version_initialize) - [Function `set_version`](#0x1_version_set_version) +- [Function `set_for_next_epoch`](#0x1_version_set_for_next_epoch) +- [Function `on_new_epoch`](#0x1_version_on_new_epoch) - [Function `initialize_for_test`](#0x1_version_initialize_for_test) - [Specification](#@Specification_1) - [High-level Requirements](#high-level-req) - [Module-level Specification](#module-level-spec) - [Function `initialize`](#@Specification_1_initialize) - [Function `set_version`](#@Specification_1_set_version) + - [Function `set_for_next_epoch`](#@Specification_1_set_for_next_epoch) + - [Function `on_new_epoch`](#@Specification_1_on_new_epoch) - [Function `initialize_for_test`](#@Specification_1_initialize_for_test) -
use 0x1::error;
+
use 0x1::config_buffer;
+use 0x1::error;
 use 0x1::reconfiguration;
 use 0x1::signer;
 use 0x1::system_addresses;
@@ -34,7 +39,7 @@ Maintains the version number for the blockchain.
 
 
 
-
struct Version has key
+
struct Version has drop, store, key
 
@@ -142,8 +147,11 @@ Publishes the Version config. ## Function `set_version` -Updates the major version to a larger version. -This can be called by on chain governance. +Deprecated by set_for_next_epoch(). + +WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + +TODO: update all the tests that reference this function, then disable this function.
public entry fun set_version(account: &signer, major: u64)
@@ -171,6 +179,66 @@ This can be called by on chain governance.
 
 
 
+
+
+
+
+## Function `set_for_next_epoch`
+
+Used in on-chain governances to update the major version for the next epoch.
+Example usage:
+```
+aptos_framework::version::set_for_next_epoch(&framework_signer, new_version);
+aptos_framework::aptos_governance::reconfigure(&framework_signer);
+```
+
+
+
public fun set_for_next_epoch(account: &signer, major: u64)
+
+ + + +
+Implementation + + +
public fun set_for_next_epoch(account: &signer, major: u64) acquires Version {
+    assert!(exists<SetVersionCapability>(signer::address_of(account)), error::permission_denied(ENOT_AUTHORIZED));
+    let old_major = borrow_global<Version>(@aptos_framework).major;
+    assert!(old_major < major, error::invalid_argument(EINVALID_MAJOR_VERSION_NUMBER));
+    config_buffer::upsert(Version {major});
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Only used in reconfigurations to apply the pending Version, if there is any. + + +
public(friend) fun on_new_epoch()
+
+ + + +
+Implementation + + +
public(friend) fun on_new_epoch() acquires Version {
+    if (config_buffer::does_exist<Version>()) {
+        *borrow_global_mut<Version>(@aptos_framework) = config_buffer::extract<Version>();
+    }
+}
+
+ + +
@@ -300,6 +368,41 @@ Abort if resource already exists in @aptos_framwork when initializi + + +### Function `set_for_next_epoch` + + +
public fun set_for_next_epoch(account: &signer, major: u64)
+
+ + + + +
aborts_if !exists<SetVersionCapability>(signer::address_of(account));
+aborts_if !exists<Version>(@aptos_framework);
+aborts_if global<Version>(@aptos_framework).major >= major;
+aborts_if !exists<config_buffer::PendingConfigs>(@aptos_framework);
+
+ + + + + +### Function `on_new_epoch` + + +
public(friend) fun on_new_epoch()
+
+ + + + +
include config_buffer::OnNewEpochAbortsIf<Version>;
+
+ + + ### Function `initialize_for_test` diff --git a/aptos-move/framework/aptos-framework/sources/aptos_governance.move b/aptos-move/framework/aptos-framework/sources/aptos_governance.move index e2a649a7c4adb..54475964388cb 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_governance.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_governance.move @@ -26,11 +26,12 @@ module aptos_framework::aptos_governance { use aptos_framework::coin; use aptos_framework::event::{Self, EventHandle}; use aptos_framework::governance_proposal::{Self, GovernanceProposal}; - use aptos_framework::reconfiguration; use aptos_framework::stake; use aptos_framework::staking_config; use aptos_framework::system_addresses; use aptos_framework::aptos_coin::{Self, AptosCoin}; + use aptos_framework::consensus_config; + use aptos_framework::reconfiguration_with_dkg; use aptos_framework::timestamp; use aptos_framework::voting; @@ -536,17 +537,40 @@ module aptos_framework::aptos_governance { }; } - /// Force reconfigure. To be called at the end of a proposal that alters on-chain configs. + /// Manually reconfigure. Called at the end of a governance txn that alters on-chain configs. + /// + /// WARNING: this function always ensures a reconfiguration starts, but when the reconfiguration finishes depends. + /// - If feature `RECONFIGURE_WITH_DKG` is disabled, it finishes immediately. + /// - At the end of the calling transaction, we will be in a new epoch. + /// - If feature `RECONFIGURE_WITH_DKG` is enabled, it starts DKG, and the new epoch will start in a block prologue after DKG finishes. + /// + /// This behavior affects when an update of an on-chain config (e.g. `ConsensusConfig`, `Features`) takes effect, + /// since such updates are applied whenever we enter an new epoch. public fun reconfigure(aptos_framework: &signer) { system_addresses::assert_aptos_framework(aptos_framework); - reconfiguration::reconfigure(); + if (consensus_config::validator_txn_enabled() && features::reconfigure_with_dkg_enabled()) { + reconfiguration_with_dkg::try_start(); + } else { + reconfiguration_with_dkg::finish(aptos_framework); + } + } + + /// Change epoch immediately. + /// If `RECONFIGURE_WITH_DKG` is enabled and we are in the middle of a DKG, + /// stop waiting for DKG and enter the new epoch without randomness. + /// + /// WARNING: currently only used by tests. In most cases you should use `reconfigure()` instead. + /// TODO: migrate these tests to be aware of async reconfiguration. + public fun force_end_epoch(aptos_framework: &signer) { + system_addresses::assert_aptos_framework(aptos_framework); + reconfiguration_with_dkg::finish(aptos_framework); } /// Update feature flags and also trigger reconfiguration. public fun toggle_features(aptos_framework: &signer, enable: vector, disable: vector) { system_addresses::assert_aptos_framework(aptos_framework); - features::change_feature_flags(aptos_framework, enable, disable); - reconfiguration::reconfigure(); + features::change_feature_flags_for_next_epoch(aptos_framework, enable, disable); + reconfigure(aptos_framework); } /// Only called in testnet where the core resources account exists and has been granted power to mint Aptos coins. diff --git a/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move b/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move index 749601bee40d6..a89bf5ed807b4 100644 --- a/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aptos_governance.spec.move @@ -139,11 +139,13 @@ spec aptos_framework::aptos_governance { use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; use aptos_framework::transaction_fee; - - pragma verify_duration_estimate = 200; + pragma verify_duration_estimate = 600; let addr = signer::address_of(aptos_framework); aborts_if addr != @aptos_framework; - + include reconfiguration_with_dkg::FinishRequirement { + account: aptos_framework + }; + include stake::GetReconfigStartTimeRequirement; include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires chain_status::is_operating(); requires exists(@aptos_framework); @@ -577,9 +579,12 @@ spec aptos_framework::aptos_governance { use aptos_framework::coin::CoinInfo; use aptos_framework::aptos_coin::AptosCoin; use aptos_framework::transaction_fee; - - pragma verify_duration_estimate = 120; // TODO: set because of timeout (property proved) + pragma verify_duration_estimate = 600; // TODO: set because of timeout (property proved) aborts_if !system_addresses::is_aptos_framework_address(signer::address_of(aptos_framework)); + include reconfiguration_with_dkg::FinishRequirement { + account: aptos_framework + }; + include stake::GetReconfigStartTimeRequirement; include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires chain_status::is_operating(); @@ -829,6 +834,16 @@ spec aptos_framework::aptos_governance { include VotingInitializationAbortIfs; } + spec force_end_epoch(aptos_framework: &signer) { + use aptos_framework::reconfiguration_with_dkg; + use std::signer; + pragma verify_duration_estimate = 600; + let address = signer::address_of(aptos_framework); + include reconfiguration_with_dkg::FinishRequirement { + account: aptos_framework + }; + } + spec schema VotingInitializationAbortIfs { aborts_if features::spec_partial_governance_voting_enabled() && !exists(@aptos_framework); } diff --git a/aptos-move/framework/aptos-framework/sources/block.move b/aptos-move/framework/aptos-framework/sources/block.move index 325b5cbd10c29..0c63dc249e56c 100644 --- a/aptos-move/framework/aptos-framework/sources/block.move +++ b/aptos-move/framework/aptos-framework/sources/block.move @@ -5,10 +5,14 @@ module aptos_framework::block { use std::vector; use std::option; use aptos_std::table_with_length::{Self, TableWithLength}; + use std::option::Option; + use aptos_framework::randomness; use aptos_framework::account; + use aptos_framework::dkg; use aptos_framework::event::{Self, EventHandle}; use aptos_framework::reconfiguration; + use aptos_framework::reconfiguration_with_dkg; use aptos_framework::stake; use aptos_framework::state_storage; use aptos_framework::system_addresses; @@ -122,10 +126,9 @@ module aptos_framework::block { borrow_global(@aptos_framework).epoch_interval / 1000000 } - /// Set the metadata for the current block. - /// The runtime always runs this before executing the transactions in a block. - fun block_prologue( - vm: signer, + + fun block_prologue_common( + vm: &signer, hash: address, epoch: u64, round: u64, @@ -133,9 +136,9 @@ module aptos_framework::block { failed_proposer_indices: vector, previous_block_votes_bitvec: vector, timestamp: u64 - ) acquires BlockResource, CommitHistory { + ): u64 acquires BlockResource, CommitHistory { // Operational constraint: can only be invoked by the VM. - system_addresses::assert_vm(&vm); + system_addresses::assert_vm(vm); // Blocks can only be produced by a valid proposer or by the VM itself for Nil blocks (no user txs). assert!( @@ -161,7 +164,7 @@ module aptos_framework::block { failed_proposer_indices, time_microseconds: timestamp, }; - emit_new_block_event(&vm, &mut block_metadata_ref.new_block_events, new_block_event); + emit_new_block_event(vm, &mut block_metadata_ref.new_block_events, new_block_event); if (features::collect_and_distribute_gas_fees()) { // Assign the fees collected from the previous block to the previous block proposer. @@ -177,11 +180,57 @@ module aptos_framework::block { stake::update_performance_statistics(proposer_index, failed_proposer_indices); state_storage::on_new_block(reconfiguration::current_epoch()); - if (timestamp - reconfiguration::last_reconfiguration_time() >= block_metadata_ref.epoch_interval) { + block_metadata_ref.epoch_interval + } + + /// Set the metadata for the current block. + /// The runtime always runs this before executing the transactions in a block. + fun block_prologue( + vm: signer, + hash: address, + epoch: u64, + round: u64, + proposer: address, + failed_proposer_indices: vector, + previous_block_votes_bitvec: vector, + timestamp: u64 + ) acquires BlockResource, CommitHistory { + let epoch_interval = block_prologue_common(&vm, hash, epoch, round, proposer, failed_proposer_indices, previous_block_votes_bitvec, timestamp); + randomness::on_new_block(&vm, epoch, round, option::none()); + if (timestamp - reconfiguration::last_reconfiguration_time() >= epoch_interval) { reconfiguration::reconfigure(); }; } + /// `block_prologue()` but trigger reconfiguration with DKG after epoch timed out. + fun block_prologue_ext( + vm: signer, + hash: address, + epoch: u64, + round: u64, + proposer: address, + failed_proposer_indices: vector, + previous_block_votes_bitvec: vector, + timestamp: u64, + randomness_seed: Option>, + ) acquires BlockResource, CommitHistory { + let epoch_interval = block_prologue_common( + &vm, + hash, + epoch, + round, + proposer, + failed_proposer_indices, + previous_block_votes_bitvec, + timestamp + ); + randomness::on_new_block(&vm, epoch, round, randomness_seed); + + if (!dkg::in_progress() && timestamp - reconfiguration::last_reconfiguration_time() >= epoch_interval) { + reconfiguration_with_dkg::try_start(); + } + } + #[view] /// Get the current block height public fun get_current_block_height(): u64 acquires BlockResource { diff --git a/aptos-move/framework/aptos-framework/sources/block.spec.move b/aptos-move/framework/aptos-framework/sources/block.spec.move index ce991fce86efd..ffd4d60139a21 100644 --- a/aptos-move/framework/aptos-framework/sources/block.spec.move +++ b/aptos-move/framework/aptos-framework/sources/block.spec.move @@ -51,27 +51,26 @@ spec aptos_framework::block { invariant epoch_interval > 0; } - spec block_prologue { - use aptos_framework::chain_status; - use aptos_framework::coin::CoinInfo; - use aptos_framework::aptos_coin::AptosCoin; - use aptos_framework::transaction_fee; - use aptos_framework::staking_config; + spec block_prologue_common { + pragma verify_duration_estimate = 1000; // TODO: set because of timeout (property proved) + include BlockRequirement; + aborts_if false; + } - pragma verify_duration_estimate = 120; // TODO: set because of timeout (property proved) + spec block_prologue { - requires chain_status::is_operating(); - requires system_addresses::is_vm(vm); - /// [high-level-req-4] - requires proposer == @vm_reserved || stake::spec_is_current_epoch_validator(proposer); + pragma verify_duration_estimate = 1000; // TODO: set because of timeout (property proved) requires timestamp >= reconfiguration::last_reconfiguration_time(); - requires (proposer == @vm_reserved) ==> (timestamp::spec_now_microseconds() == timestamp); - requires (proposer != @vm_reserved) ==> (timestamp::spec_now_microseconds() < timestamp); - requires exists(@aptos_framework); - requires exists>(@aptos_framework); - include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; - include staking_config::StakingRewardsConfigRequirement; + include BlockRequirement; + aborts_if false; + } + spec block_prologue_ext { + pragma verify_duration_estimate = 1000; // TODO: set because of timeout (property proved) + requires timestamp >= reconfiguration::last_reconfiguration_time(); + include BlockRequirement; + include stake::ResourceRequirement; + include stake::GetReconfigStartTimeRequirement; aborts_if false; } @@ -118,6 +117,34 @@ spec aptos_framework::block { aborts_if account.guid_creation_num + 2 >= account::MAX_GUID_CREATION_NUM; } + spec schema BlockRequirement { + use aptos_framework::chain_status; + use aptos_framework::coin::CoinInfo; + use aptos_framework::aptos_coin::AptosCoin; + use aptos_framework::transaction_fee; + use aptos_framework::staking_config; + + vm: signer; + hash: address; + epoch: u64; + round: u64; + proposer: address; + failed_proposer_indices: vector; + previous_block_votes_bitvec: vector; + timestamp: u64; + + requires chain_status::is_operating(); + requires system_addresses::is_vm(vm); + /// [high-level-req-4] + requires proposer == @vm_reserved || stake::spec_is_current_epoch_validator(proposer); + requires (proposer == @vm_reserved) ==> (timestamp::spec_now_microseconds() == timestamp); + requires (proposer != @vm_reserved) ==> (timestamp::spec_now_microseconds() < timestamp); + requires exists(@aptos_framework); + requires exists>(@aptos_framework); + include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; + include staking_config::StakingRewardsConfigRequirement; + } + spec schema Initialize { use std::signer; aptos_framework: signer; diff --git a/aptos-move/framework/aptos-framework/sources/configs/config_buffer.move b/aptos-move/framework/aptos-framework/sources/configs/config_buffer.move new file mode 100644 index 0000000000000..bea8715618311 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/configs/config_buffer.move @@ -0,0 +1,92 @@ +/// This wrapper helps store an on-chain config for the next epoch. +/// +/// Once reconfigure with DKG is introduced, every on-chain config `C` should do the following. +/// - Support async update when DKG is enabled. This is typically done by 3 steps below. +/// - Implement `C::set_for_next_epoch()` using `upsert()` function in this module. +/// - Implement `C::on_new_epoch()` using `extract()` function in this module. +/// - Update `0x1::reconfiguration_with_dkg::finish()` to call `C::on_new_epoch()`. +/// - Support sychronous update when DKG is disabled. +/// This is typically done by implementing `C::set()` to update the config resource directly. +/// +/// NOTE: on-chain config `0x1::state::ValidatorSet` implemented its own buffer. +module aptos_framework::config_buffer { + use std::string::String; + use aptos_std::any; + use aptos_std::any::Any; + use aptos_std::simple_map; + use aptos_std::simple_map::SimpleMap; + use aptos_std::type_info; + + friend aptos_framework::consensus_config; + friend aptos_framework::execution_config; + friend aptos_framework::gas_schedule; + friend aptos_framework::jwks; + friend aptos_framework::version; + + /// Config buffer operations failed with permission denied. + const ESTD_SIGNER_NEEDED: u64 = 1; + + struct PendingConfigs has key { + configs: SimpleMap, + } + + public fun initialize(aptos_framework: &signer) { + move_to(aptos_framework, PendingConfigs { + configs: simple_map::new(), + }) + } + + /// Check whether there is a pending config payload for `T`. + public fun does_exist(): bool acquires PendingConfigs { + if (exists(@aptos_framework)) { + let config = borrow_global(@aptos_framework); + simple_map::contains_key(&config.configs, &type_info::type_name()) + } else { + false + } + } + + /// Upsert an on-chain config to the buffer for the next epoch. + /// + /// Typically used in `X::set_for_next_epoch()` where X is an on-chain config. + public(friend) fun upsert(config: T) acquires PendingConfigs { + let configs = borrow_global_mut(@aptos_framework); + let key = type_info::type_name(); + let value = any::pack(config); + simple_map::upsert(&mut configs.configs, key, value); + } + + /// Take the buffered config `T` out (buffer cleared). Abort if the buffer is empty. + /// Should only be used at the end of a reconfiguration. + /// + /// Typically used in `X::on_new_epoch()` where X is an on-chaon config. + public fun extract(): T acquires PendingConfigs { + let configs = borrow_global_mut(@aptos_framework); + let key = type_info::type_name(); + let (_, value_packed) = simple_map::remove(&mut configs.configs, &key); + any::unpack(value_packed) + } + + #[test_only] + struct DummyConfig has drop, store { + data: u64, + } + + #[test(fx = @std)] + fun test_config_buffer_basic(fx: &signer) acquires PendingConfigs { + initialize(fx); + // Initially nothing in the buffer. + assert!(!does_exist(), 1); + + // Insert should work. + upsert(DummyConfig { data: 888 }); + assert!(does_exist(), 1); + + // Update and extract should work. + upsert(DummyConfig { data: 999 }); + assert!(does_exist(), 1); + let config = extract(); + assert!(config == DummyConfig { data: 999 }, 1); + assert!(!does_exist(), 1); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/configs/config_buffer.spec.move b/aptos-move/framework/aptos-framework/sources/configs/config_buffer.spec.move new file mode 100644 index 0000000000000..907e1c89db123 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/configs/config_buffer.spec.move @@ -0,0 +1,75 @@ +spec aptos_framework::config_buffer { + spec module { + pragma verify = true; + } + + spec initialize(aptos_framework: &signer) { + use std::signer; + aborts_if exists(signer::address_of(aptos_framework)); + } + + spec does_exist(): bool { + aborts_if false; + let type_name = type_info::type_name(); + ensures result == spec_fun_does_exist(type_name); + } + + spec fun spec_fun_does_exist(type_name: String): bool { + if (exists(@aptos_framework)) { + let config = global(@aptos_framework); + simple_map::spec_contains_key(config.configs, type_name) + } else { + false + } + } + + spec upsert(config: T) { + aborts_if !exists(@aptos_framework); + } + + spec extract(): T { + aborts_if !exists(@aptos_framework); + include ExtractAbortsIf; + } + + spec schema ExtractAbortsIf { + let configs = global(@aptos_framework); + let key = type_info::type_name(); + aborts_if !simple_map::spec_contains_key(configs.configs, key); + include any::UnpackAbortsIf { + x: simple_map::spec_get(configs.configs, key) + }; + } + + spec schema SetForNextEpochAbortsIf { + account: &signer; + config: vector; + let account_addr = std::signer::address_of(account); + aborts_if account_addr != @aptos_framework; + aborts_if len(config) == 0; + aborts_if !exists(@aptos_framework); + } + + spec schema OnNewEpochAbortsIf { + use aptos_std::type_info; + let type_name = type_info::type_name(); + aborts_if spec_fun_does_exist(type_name) && !exists(@aptos_framework); + let configs = global(@aptos_framework); + // TODO(#12015) + include spec_fun_does_exist(type_name) ==> any::UnpackAbortsIf { + x: simple_map::spec_get(configs.configs, type_name) + }; + } + + spec schema OnNewEpochRequirement { + use aptos_std::type_info; + let type_name = type_info::type_name(); + requires spec_fun_does_exist(type_name) ==> exists(@aptos_framework); + let configs = global(@aptos_framework); + // TODO(#12015) + include spec_fun_does_exist(type_name) ==> any::UnpackRequirement { + x: simple_map::spec_get(configs.configs, type_name) + }; + } + +} diff --git a/aptos-move/framework/aptos-framework/sources/configs/consensus_config.move b/aptos-move/framework/aptos-framework/sources/configs/consensus_config.move index f75410f6ab85a..5d13ee46a88db 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/consensus_config.move +++ b/aptos-move/framework/aptos-framework/sources/configs/consensus_config.move @@ -3,13 +3,15 @@ module aptos_framework::consensus_config { use std::error; use std::vector; + use aptos_framework::config_buffer; use aptos_framework::reconfiguration; use aptos_framework::system_addresses; friend aptos_framework::genesis; + friend aptos_framework::reconfiguration_with_dkg; - struct ConsensusConfig has key { + struct ConsensusConfig has drop, key, store { config: vector, } @@ -23,7 +25,11 @@ module aptos_framework::consensus_config { move_to(aptos_framework, ConsensusConfig { config }); } - /// This can be called by on-chain governance to update on-chain consensus configs. + /// Deprecated by `set_for_next_epoch()`. + /// + /// WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + /// + /// TODO: update all the tests that reference this function, then disable this function. public fun set(account: &signer, config: vector) acquires ConsensusConfig { system_addresses::assert_aptos_framework(account); assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG)); @@ -34,4 +40,30 @@ module aptos_framework::consensus_config { // Need to trigger reconfiguration so validator nodes can sync on the updated configs. reconfiguration::reconfigure(); } + + /// This can be called by on-chain governance to update on-chain consensus configs for the next epoch. + /// Example usage: + /// ``` + /// aptos_framework::consensus_config::set_for_next_epoch(&framework_signer, some_config_bytes); + /// aptos_framework::aptos_governance::reconfigure(&framework_signer); + /// ``` + public fun set_for_next_epoch(account: &signer, config: vector) { + system_addresses::assert_aptos_framework(account); + assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG)); + std::config_buffer::upsert(ConsensusConfig {config}); + } + + /// Only used in reconfigurations to apply the pending `ConsensusConfig`, if there is any. + public(friend) fun on_new_epoch() acquires ConsensusConfig { + if (config_buffer::does_exist()) { + *borrow_global_mut(@aptos_framework) = config_buffer::extract(); + } + } + + public fun validator_txn_enabled(): bool acquires ConsensusConfig { + let config_bytes = borrow_global(@aptos_framework).config; + validator_txn_enabled_internal(config_bytes) + } + + native fun validator_txn_enabled_internal(config_bytes: vector): bool; } diff --git a/aptos-move/framework/aptos-framework/sources/configs/consensus_config.spec.move b/aptos-move/framework/aptos-framework/sources/configs/consensus_config.spec.move index 48fe7894c4a2f..13ba15e2d95b1 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/consensus_config.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/consensus_config.spec.move @@ -22,8 +22,10 @@ spec aptos_framework::consensus_config { /// /// spec module { + use aptos_framework::chain_status; pragma verify = true; pragma aborts_if_is_strict; + invariant [suspendable] chain_status::is_operating() ==> exists(@aptos_framework); } /// Ensure caller is admin. @@ -52,7 +54,7 @@ spec aptos_framework::consensus_config { use aptos_framework::staking_config; // TODO: set because of timeout (property proved) - pragma verify_duration_estimate = 120; + pragma verify_duration_estimate = 600; include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; include staking_config::StakingRewardsConfigRequirement; let addr = signer::address_of(account); @@ -68,4 +70,26 @@ spec aptos_framework::consensus_config { requires exists>(@aptos_framework); ensures global(@aptos_framework).config == config; } + + spec set_for_next_epoch(account: &signer, config: vector) { + include config_buffer::SetForNextEpochAbortsIf; + } + + spec on_new_epoch() { + include config_buffer::OnNewEpochAbortsIf; + } + + spec validator_txn_enabled(): bool { + pragma opaque; + aborts_if !exists(@aptos_framework); + ensures [abstract] result == spec_validator_txn_enabled_internal(global(@aptos_framework).config); + } + + spec validator_txn_enabled_internal(config_bytes: vector): bool { + pragma opaque; + ensures [abstract] result == spec_validator_txn_enabled_internal(config_bytes); + } + + spec fun spec_validator_txn_enabled_internal(config_bytes: vector): bool; + } diff --git a/aptos-move/framework/aptos-framework/sources/configs/execution_config.move b/aptos-move/framework/aptos-framework/sources/configs/execution_config.move index 54103c7f8b32b..c1ce2fc554b98 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/execution_config.move +++ b/aptos-move/framework/aptos-framework/sources/configs/execution_config.move @@ -1,22 +1,27 @@ /// Maintains the execution config for the blockchain. The config is stored in a /// Reconfiguration, and may be updated by root. module aptos_framework::execution_config { + use aptos_framework::config_buffer; use std::error; use std::vector; use aptos_framework::reconfiguration; use aptos_framework::system_addresses; - friend aptos_framework::genesis; + friend aptos_framework::reconfiguration_with_dkg; - struct ExecutionConfig has key { + struct ExecutionConfig has drop, key, store { config: vector, } /// The provided on chain config bytes are empty or invalid const EINVALID_CONFIG: u64 = 1; - /// This can be called by on-chain governance to update on-chain execution configs. + /// Deprecated by `set_for_next_epoch()`. + /// + /// WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + /// + /// TODO: update all the tests that reference this function, then disable this function. public fun set(account: &signer, config: vector) acquires ExecutionConfig { system_addresses::assert_aptos_framework(account); assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG)); @@ -30,4 +35,24 @@ module aptos_framework::execution_config { // Need to trigger reconfiguration so validator nodes can sync on the updated configs. reconfiguration::reconfigure(); } + + /// This can be called by on-chain governance to update on-chain execution configs for the next epoch. + /// Example usage: + /// ``` + /// aptos_framework::execution_config::set_for_next_epoch(&framework_signer, some_config_bytes); + /// aptos_framework::aptos_governance::reconfigure(&framework_signer); + /// ``` + public fun set_for_next_epoch(account: &signer, config: vector) { + system_addresses::assert_aptos_framework(account); + assert!(vector::length(&config) > 0, error::invalid_argument(EINVALID_CONFIG)); + config_buffer::upsert(ExecutionConfig { config }); + } + + /// Only used in reconfigurations to apply the pending `ExecutionConfig`, if there is any. + public(friend) fun on_new_epoch() acquires ExecutionConfig { + if (config_buffer::does_exist()) { + let config = config_buffer::extract(); + *borrow_global_mut(@aptos_framework) = config; + } + } } diff --git a/aptos-move/framework/aptos-framework/sources/configs/execution_config.spec.move b/aptos-move/framework/aptos-framework/sources/configs/execution_config.spec.move index a2212190cb28c..75c3244974951 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/execution_config.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/execution_config.spec.move @@ -17,7 +17,7 @@ spec aptos_framework::execution_config { use aptos_framework::aptos_coin; // TODO: set because of timeout (property proved) - pragma verify_duration_estimate = 120; + pragma verify_duration_estimate = 600; let addr = signer::address_of(account); include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; requires chain_status::is_operating(); @@ -31,4 +31,12 @@ spec aptos_framework::execution_config { ensures exists(@aptos_framework); } + + spec set_for_next_epoch(account: &signer, config: vector) { + include config_buffer::SetForNextEpochAbortsIf; + } + + spec on_new_epoch() { + include config_buffer::OnNewEpochAbortsIf; + } } diff --git a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.move b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.move index 36be0caf9c311..d8072b910ce84 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.move +++ b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.move @@ -4,6 +4,7 @@ module aptos_framework::gas_schedule { use std::error; use std::string::String; use std::vector; + use aptos_framework::config_buffer; use aptos_framework::reconfiguration; use aptos_framework::system_addresses; @@ -12,6 +13,7 @@ module aptos_framework::gas_schedule { use aptos_framework::storage_gas; friend aptos_framework::genesis; + friend aptos_framework::reconfiguration_with_dkg; /// The provided gas schedule bytes are empty or invalid const EINVALID_GAS_SCHEDULE: u64 = 1; @@ -26,7 +28,7 @@ module aptos_framework::gas_schedule { entries: vector } - struct GasScheduleV2 has key, copy, drop { + struct GasScheduleV2 has key, copy, drop, store { feature_version: u64, entries: vector, } @@ -41,7 +43,11 @@ module aptos_framework::gas_schedule { move_to(aptos_framework, gas_schedule); } - /// This can be called by on-chain governance to update the gas schedule. + /// Deprecated by `set_for_next_epoch()`. + /// + /// WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + /// + /// TODO: update all the tests that reference this function, then disable this function. public fun set_gas_schedule(aptos_framework: &signer, gas_schedule_blob: vector) acquires GasSchedule, GasScheduleV2 { system_addresses::assert_aptos_framework(aptos_framework); assert!(!vector::is_empty(&gas_schedule_blob), error::invalid_argument(EINVALID_GAS_SCHEDULE)); @@ -67,10 +73,36 @@ module aptos_framework::gas_schedule { reconfiguration::reconfigure(); } + /// Set the gas schedule for the next epoch, typically called by on-chain governance. + /// Example usage: + /// ``` + /// aptos_framework::gas_schedule::set_for_next_epoch(&framework_signer, some_gas_schedule_blob); + /// aptos_framework::aptos_governance::reconfigure(&framework_signer); + /// ``` + public fun set_for_next_epoch(aptos_framework: &signer, gas_schedule_blob: vector) { + system_addresses::assert_aptos_framework(aptos_framework); + assert!(!vector::is_empty(&gas_schedule_blob), error::invalid_argument(EINVALID_GAS_SCHEDULE)); + let new_gas_schedule: GasScheduleV2 = from_bytes(gas_schedule_blob); + config_buffer::upsert(new_gas_schedule); + } + + /// Only used in reconfigurations to apply the pending `GasScheduleV2`, if there is any. + public(friend) fun on_new_epoch() acquires GasScheduleV2 { + if (config_buffer::does_exist()) { + let new_gas_schedule: GasScheduleV2 = config_buffer::extract(); + let gas_schedule = borrow_global_mut(@aptos_framework); + *gas_schedule = new_gas_schedule; + } + } + public fun set_storage_gas_config(aptos_framework: &signer, config: StorageGasConfig) { storage_gas::set_config(aptos_framework, config); // Need to trigger reconfiguration so the VM is guaranteed to load the new gas fee starting from the next // transaction. reconfiguration::reconfigure(); } + + public fun set_storage_gas_config_for_next_epoch(aptos_framework: &signer, config: StorageGasConfig) { + storage_gas::set_config(aptos_framework, config); + } } diff --git a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move index bf71bb6c8e8a6..a25acf4c16864 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/gas_schedule.spec.move @@ -56,7 +56,7 @@ spec aptos_framework::gas_schedule { use aptos_framework::staking_config; // TODO: set because of timeout (property proved) - pragma verify_duration_estimate = 120; + pragma verify_duration_estimate = 600; requires exists(@aptos_framework); requires exists>(@aptos_framework); include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; @@ -82,7 +82,7 @@ spec aptos_framework::gas_schedule { use aptos_framework::staking_config; // TODO: set because of timeout (property proved). - pragma verify_duration_estimate = 120; + pragma verify_duration_estimate = 600; requires exists(@aptos_framework); requires exists>(@aptos_framework); include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework }; @@ -91,4 +91,25 @@ spec aptos_framework::gas_schedule { aborts_if !exists(@aptos_framework); ensures global(@aptos_framework) == config; } + + spec set_for_next_epoch(aptos_framework: &signer, gas_schedule_blob: vector) { + include config_buffer::SetForNextEpochAbortsIf { + account: aptos_framework, + config: gas_schedule_blob + }; + } + + spec on_new_epoch() { + include config_buffer::OnNewEpochAbortsIf; + } + + spec set_storage_gas_config(aptos_framework: &signer, config: storage_gas::StorageGasConfig) { + include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework }; + aborts_if !exists(@aptos_framework); + } + + spec set_storage_gas_config_for_next_epoch(aptos_framework: &signer, config: storage_gas::StorageGasConfig) { + include system_addresses::AbortsIfNotAptosFramework{ account: aptos_framework }; + aborts_if !exists(@aptos_framework); + } } diff --git a/aptos-move/framework/aptos-framework/sources/configs/staking_config.spec.move b/aptos-move/framework/aptos-framework/sources/configs/staking_config.spec.move index e80ac7279f8cc..878da8fa9daa2 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/staking_config.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/staking_config.spec.move @@ -133,6 +133,7 @@ spec aptos_framework::staking_config { rewards_rate_decrease_rate: FixedPoint64, ) { use std::signer; + pragma verify_duration_estimate = 120; requires exists(@aptos_framework); let addr = signer::address_of(aptos_framework); /// [high-level-req-1.2] diff --git a/aptos-move/framework/aptos-framework/sources/configs/version.move b/aptos-move/framework/aptos-framework/sources/configs/version.move index 47b16921d9556..d3d04c5beb1df 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/version.move +++ b/aptos-move/framework/aptos-framework/sources/configs/version.move @@ -2,13 +2,15 @@ module aptos_framework::version { use std::error; use std::signer; + use aptos_framework::config_buffer; use aptos_framework::reconfiguration; use aptos_framework::system_addresses; friend aptos_framework::genesis; + friend aptos_framework::reconfiguration_with_dkg; - struct Version has key { + struct Version has drop, key, store { major: u64, } @@ -30,8 +32,11 @@ module aptos_framework::version { move_to(aptos_framework, SetVersionCapability {}); } - /// Updates the major version to a larger version. - /// This can be called by on chain governance. + /// Deprecated by `set_for_next_epoch()`. + /// + /// WARNING: calling this while randomness is enabled will trigger a new epoch without randomness! + /// + /// TODO: update all the tests that reference this function, then disable this function. public entry fun set_version(account: &signer, major: u64) acquires Version { assert!(exists(signer::address_of(account)), error::permission_denied(ENOT_AUTHORIZED)); @@ -45,6 +50,26 @@ module aptos_framework::version { reconfiguration::reconfigure(); } + /// Used in on-chain governances to update the major version for the next epoch. + /// Example usage: + /// ``` + /// aptos_framework::version::set_for_next_epoch(&framework_signer, new_version); + /// aptos_framework::aptos_governance::reconfigure(&framework_signer); + /// ``` + public fun set_for_next_epoch(account: &signer, major: u64) acquires Version { + assert!(exists(signer::address_of(account)), error::permission_denied(ENOT_AUTHORIZED)); + let old_major = borrow_global(@aptos_framework).major; + assert!(old_major < major, error::invalid_argument(EINVALID_MAJOR_VERSION_NUMBER)); + config_buffer::upsert(Version {major}); + } + + /// Only used in reconfigurations to apply the pending `Version`, if there is any. + public(friend) fun on_new_epoch() acquires Version { + if (config_buffer::does_exist()) { + *borrow_global_mut(@aptos_framework) = config_buffer::extract(); + } + } + /// Only called in tests and testnets. This allows the core resources account, which only exists in tests/testnets, /// to update the version. fun initialize_for_test(core_resources: &signer) { diff --git a/aptos-move/framework/aptos-framework/sources/configs/version.spec.move b/aptos-move/framework/aptos-framework/sources/configs/version.spec.move index 636b8fc653b47..bc90e0e0ba6a1 100644 --- a/aptos-move/framework/aptos-framework/sources/configs/version.spec.move +++ b/aptos-move/framework/aptos-framework/sources/configs/version.spec.move @@ -66,6 +66,17 @@ spec aptos_framework::version { ensures global(@aptos_framework) == SetVersionCapability {}; } + spec set_for_next_epoch(account: &signer, major: u64) { + aborts_if !exists(signer::address_of(account)); + aborts_if !exists(@aptos_framework); + aborts_if global(@aptos_framework).major >= major; + aborts_if !exists(@aptos_framework); + } + + spec on_new_epoch() { + include config_buffer::OnNewEpochAbortsIf; + } + /// This module turns on `aborts_if_is_strict`, so need to add spec for test function `initialize_for_test`. spec initialize_for_test { // Don't verify test functions. diff --git a/aptos-move/framework/aptos-framework/sources/dkg.move b/aptos-move/framework/aptos-framework/sources/dkg.move new file mode 100644 index 0000000000000..1ba379911f364 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/dkg.move @@ -0,0 +1,145 @@ +/// DKG on-chain states and helper functions. +module aptos_framework::dkg { + use std::error; + use std::option; + use std::option::Option; + use aptos_framework::event::emit; + use aptos_framework::system_addresses; + use aptos_framework::timestamp; + use aptos_framework::validator_consensus_info::ValidatorConsensusInfo; + friend aptos_framework::block; + friend aptos_framework::reconfiguration_with_dkg; + + const EDKG_IN_PROGRESS: u64 = 1; + const EDKG_NOT_IN_PROGRESS: u64 = 2; + + /// If this resource is present under 0x1, validators should not do DKG (so the epoch change get stuck). + /// This is test-only. + struct FailureInjectionBlockDKG has drop, key {} + + /// If this resource is present under 0x1, validators should not provider randomness to block (so the execution get stuck). + /// This is test-only. + struct FailureInjectionBlockRandomness has drop, key {} + + /// This can be considered as the public input of DKG. + struct DKGSessionMetadata has copy, drop, store { + dealer_epoch: u64, + dealer_validator_set: vector, + target_validator_set: vector, + block_dkg: bool, + block_randomness: bool, + } + + #[event] + struct DKGStartEvent has drop, store { + session_metadata: DKGSessionMetadata, + start_time_us: u64, + } + + /// The input and output of a DKG session. + /// The validator set of epoch `x` works together for an DKG output for the target validator set of epoch `x+1`. + struct DKGSessionState has copy, store, drop { + metadata: DKGSessionMetadata, + start_time_us: u64, + transcript: vector, + } + + /// The completed and in-progress DKG sessions. + struct DKGState has key { + last_completed: Option, + in_progress: Option, + } + + public fun block_dkg(framework: &signer) { + system_addresses::assert_aptos_framework(framework); + if (!exists(@aptos_framework)) { + move_to(framework, FailureInjectionBlockDKG {}) + } + } + + public fun unblock_dkg(framework: &signer) acquires FailureInjectionBlockDKG { + system_addresses::assert_aptos_framework(framework); + if (exists(@aptos_framework)) { + move_from(@aptos_framework); + } + } + + public fun block_randomness(framework: &signer) { + system_addresses::assert_aptos_framework(framework); + if (!exists(@aptos_framework)) { + move_to(framework, FailureInjectionBlockRandomness {}) + } + } + + public fun unblock_randomness(framework: &signer) acquires FailureInjectionBlockRandomness { + system_addresses::assert_aptos_framework(framework); + if (!exists(@aptos_framework)) { + move_from(@aptos_framework); + } + } + + /// Called in genesis to initialize on-chain states. + public fun initialize(aptos_framework: &signer) { + system_addresses::assert_aptos_framework(aptos_framework); + move_to( + aptos_framework, + DKGState { + last_completed: std::option::none(), + in_progress: std::option::none(), + } + ); + } + + /// Mark on-chain DKG state as in-progress. Notify validators to start DKG. + /// Abort if a DKG is already in progress. + public(friend) fun start( + dealer_epoch: u64, + dealer_validator_set: vector, + target_validator_set: vector, + ) acquires DKGState { + let dkg_state = borrow_global_mut(@aptos_framework); + assert!(std::option::is_none(&dkg_state.in_progress), error::invalid_state(EDKG_IN_PROGRESS)); + let new_session_metadata = DKGSessionMetadata { + dealer_epoch, + dealer_validator_set, + target_validator_set, + block_dkg: exists(@aptos_framework), + block_randomness: exists(@aptos_framework), + }; + let start_time_us = timestamp::now_microseconds(); + dkg_state.in_progress = std::option::some(DKGSessionState { + metadata: new_session_metadata, + start_time_us, + transcript: vector[], + }); + + emit(DKGStartEvent { + start_time_us, + session_metadata: new_session_metadata, + }); + } + + /// Update the current DKG state at the beginning of every block in `block_prologue_ext()`, + /// or when DKG result is available. + /// + /// Return true if and only if this update completes/aborts the DKG and we should proceed to the next epoch. + /// + /// Abort if DKG is not in progress. + public(friend) fun finish(transcript: vector) acquires DKGState { + let dkg_state = borrow_global_mut(@aptos_framework); + assert!(option::is_some(&dkg_state.in_progress), error::invalid_state(EDKG_NOT_IN_PROGRESS)); + let session = option::extract(&mut dkg_state.in_progress); + session.transcript = transcript; + dkg_state.last_completed = option::some(session); + dkg_state.in_progress = option::none(); + } + + /// Return whether a DKG is in progress. + public(friend) fun in_progress(): bool acquires DKGState { + if (exists(@aptos_framework)) { + option::is_some(&borrow_global(@aptos_framework).in_progress) + } else { + false + } + } +} diff --git a/aptos-move/framework/aptos-framework/sources/dkg.spec.move b/aptos-move/framework/aptos-framework/sources/dkg.spec.move new file mode 100644 index 0000000000000..cef9de50cf79b --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/dkg.spec.move @@ -0,0 +1,45 @@ +spec aptos_framework::dkg { + + spec module { + use aptos_framework::chain_status; + invariant [suspendable] chain_status::is_operating() ==> exists(@aptos_framework); + } + + spec initialize(aptos_framework: &signer) { + use std::signer; + let aptos_framework_addr = signer::address_of(aptos_framework); + aborts_if aptos_framework_addr != @aptos_framework; + aborts_if exists(@aptos_framework); + } + + spec start( + dealer_epoch: u64, + dealer_validator_set: vector, + target_validator_set: vector, + ) { + use std::option; + aborts_if !exists(@aptos_framework); + aborts_if option::is_some(global(@aptos_framework).in_progress); + aborts_if !exists(@aptos_framework); + } + + spec finish(transcript: vector) { + use std::option; + aborts_if !exists(@aptos_framework); + aborts_if option::is_none(global(@aptos_framework).in_progress); + } + + spec in_progress(): bool { + aborts_if false; + ensures result == spec_in_progress(); + } + + spec fun spec_in_progress(): bool { + if (exists(@aptos_framework)) { + option::spec_is_some(global(@aptos_framework).in_progress) + } else { + false + } + } + +} diff --git a/aptos-move/framework/aptos-framework/sources/genesis.move b/aptos-move/framework/aptos-framework/sources/genesis.move index 4a01ea1532eb6..e30268df92b4a 100644 --- a/aptos-move/framework/aptos-framework/sources/genesis.move +++ b/aptos-move/framework/aptos-framework/sources/genesis.move @@ -17,7 +17,6 @@ module aptos_framework::genesis { use aptos_framework::execution_config; use aptos_framework::create_signer::create_signer; use aptos_framework::gas_schedule; - use aptos_framework::jwks; use aptos_framework::reconfiguration; use aptos_framework::stake; use aptos_framework::staking_contract; @@ -131,7 +130,6 @@ module aptos_framework::genesis { block::initialize(&aptos_framework_account, epoch_interval_microsecs); state_storage::initialize(&aptos_framework_account); timestamp::set_time_has_started(&aptos_framework_account); - jwks::initialize(&aptos_framework_account); } /// Genesis step 2: Initialize Aptos coin. diff --git a/aptos-move/framework/aptos-framework/sources/jwks.move b/aptos-move/framework/aptos-framework/sources/jwks.move index 745fec4c6f25c..f8f6fcd07ef14 100644 --- a/aptos-move/framework/aptos-framework/sources/jwks.move +++ b/aptos-move/framework/aptos-framework/sources/jwks.move @@ -14,6 +14,7 @@ module aptos_framework::jwks { use aptos_std::comparator::{compare_u8_vector, is_greater_than, is_equal}; use aptos_std::copyable_any; use aptos_std::copyable_any::Any; + use aptos_framework::config_buffer; use aptos_framework::event::emit; use aptos_framework::reconfiguration; use aptos_framework::system_addresses; @@ -21,6 +22,7 @@ module aptos_framework::jwks { use aptos_framework::account::create_account_for_test; friend aptos_framework::genesis; + friend aptos_framework::reconfiguration_with_dkg; const EUNEXPECTED_EPOCH: u64 = 1; const EUNEXPECTED_VERSION: u64 = 2; @@ -36,7 +38,7 @@ module aptos_framework::jwks { const ENATIVE_NOT_ENOUGH_VOTING_POWER: u64 = 0x0105; /// An OIDC provider. - struct OIDCProvider has drop, store { + struct OIDCProvider has copy, drop, store { /// The utf-8 encoded issuer string. E.g., b"https://www.facebook.com". name: vector, @@ -46,7 +48,7 @@ module aptos_framework::jwks { } /// A list of OIDC providers whose JWKs should be watched by validators. Maintained by governance proposals. - struct SupportedOIDCProviders has key { + struct SupportedOIDCProviders has copy, drop, key, store { providers: vector, } @@ -171,9 +173,9 @@ module aptos_framework::jwks { try_get_jwk_by_issuer(jwks, issuer, jwk_id) } - /// Upsert an OIDC provider metadata into the `SupportedOIDCProviders` resource. - /// Can only be called in a governance proposal. - /// Returns the old config URL of the provider, if any, as an `Option`. + /// Deprecated by `upsert_oidc_provider_for_next_epoch()`. + /// + /// TODO: update all the tests that reference this function, then disable this function. public fun upsert_oidc_provider(fx: &signer, name: vector, config_url: vector): Option> acquires SupportedOIDCProviders { system_addresses::assert_aptos_framework(fx); @@ -184,13 +186,34 @@ module aptos_framework::jwks { old_config_url } - /// Remove an OIDC provider from the `SupportedOIDCProviders` resource. - /// Can only be called in a governance proposal. - /// Returns the old config URL of the provider, if any, as an `Option`. + /// Used in on-chain governances to update the supported OIDC providers, effective starting next epoch. + /// Example usage: + /// ``` + /// aptos_framework::jwks::upsert_oidc_provider_for_next_epoch( + /// &framework_signer, + /// b"https://accounts.google.com", + /// b"https://accounts.google.com/.well-known/openid-configuration" + /// ); + /// aptos_framework::aptos_governance::reconfigure(&framework_signer); + /// ``` + public fun upsert_oidc_provider_for_next_epoch(fx: &signer, name: vector, config_url: vector): Option> acquires SupportedOIDCProviders { + system_addresses::assert_aptos_framework(fx); + + let provider_set = if (config_buffer::does_exist()) { + config_buffer::extract() + } else { + *borrow_global_mut(@aptos_framework) + }; + + let old_config_url = remove_oidc_provider_internal(&mut provider_set, name); + vector::push_back(&mut provider_set.providers, OIDCProvider { name, config_url }); + config_buffer::upsert(provider_set); + old_config_url + } + + /// Deprecated by `remove_oidc_provider_for_next_epoch()`. /// - /// NOTE: this only stops validators from watching the provider and generate updates to `ObservedJWKs`. - /// It does NOT touch `ObservedJWKs` or `Patches`. - /// If you are disabling a provider, you probably also need `remove_issuer_from_observed_jwks()` and possibly `set_patches()`. + /// TODO: update all the tests that reference this function, then disable this function. public fun remove_oidc_provider(fx: &signer, name: vector): Option> acquires SupportedOIDCProviders { system_addresses::assert_aptos_framework(fx); @@ -198,6 +221,35 @@ module aptos_framework::jwks { remove_oidc_provider_internal(provider_set, name) } + /// Used in on-chain governances to update the supported OIDC providers, effective starting next epoch. + /// Example usage: + /// ``` + /// aptos_framework::jwks::remove_oidc_provider_for_next_epoch( + /// &framework_signer, + /// b"https://accounts.google.com", + /// ); + /// aptos_framework::aptos_governance::reconfigure(&framework_signer); + /// ``` + public fun remove_oidc_provider_for_next_epoch(fx: &signer, name: vector): Option> acquires SupportedOIDCProviders { + system_addresses::assert_aptos_framework(fx); + + let provider_set = if (config_buffer::does_exist()) { + config_buffer::extract() + } else { + *borrow_global_mut(@aptos_framework) + }; + let ret = remove_oidc_provider_internal(&mut provider_set, name); + config_buffer::upsert(provider_set); + ret + } + + /// Only used in reconfigurations to apply the pending `SupportedOIDCProviders`, if there is any. + public(friend) fun on_new_epoch() acquires SupportedOIDCProviders { + if (config_buffer::does_exist()) { + *borrow_global_mut(@aptos_framework) = config_buffer::extract(); + } + } + /// Set the `Patches`. Only called in governance proposals. public fun set_patches(fx: &signer, patches: vector) acquires Patches, PatchedJWKs, ObservedJWKs { system_addresses::assert_aptos_framework(fx); diff --git a/aptos-move/framework/aptos-framework/sources/randomness.move b/aptos-move/framework/aptos-framework/sources/randomness.move new file mode 100644 index 0000000000000..4679146cd2713 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/randomness.move @@ -0,0 +1,579 @@ +/// This module provides access to *instant* secure randomness generated by the Aptos validators, as documented in +/// [AIP-41](https://github.com/aptos-foundation/AIPs/blob/main/aips/aip-41.md). +/// +/// Secure randomness means (1) the randomness cannot be predicted ahead of time by validators, developers or users +/// and (2) the randomness cannot be biased in any way by validators, developers or users. +/// +/// Security holds under the same proof-of-stake assumption that secures the Aptos network. +module aptos_framework::randomness { + use std::hash; + use std::option; + use std::option::Option; + use std::vector; + use aptos_framework::event; + use aptos_framework::system_addresses; + use aptos_framework::transaction_context; + #[test_only] + use aptos_std::debug; + #[test_only] + use aptos_std::table_with_length; + + friend aptos_framework::block; + + const DST: vector = b"APTOS_RANDOMNESS"; + + /// Randomness APIs calls must originate from a private entry function. Otherwise, test-and-abort attacks are possible. + const E_API_USE_SUSCEPTIBLE_TO_TEST_AND_ABORT: u64 = 1; + + const MAX_U256: u256 = 115792089237316195423570985008687907853269984665640564039457584007913129639935; + + /// 32-byte randomness seed unique to every block. + /// This resource is updated in every block prologue. + struct PerBlockRandomness has drop, key { + epoch: u64, + round: u64, + seed: Option>, + } + + #[event] + /// Event emitted every time a public randomness API in this module is called. + struct RandomnessGeneratedEvent has store, drop { + } + + /// Called in genesis.move. + /// Must be called in tests to initialize the `PerBlockRandomness` resource. + public fun initialize(framework: &signer) { + system_addresses::assert_aptos_framework(framework); + move_to(framework, PerBlockRandomness { + epoch: 0, + round: 0, + seed: option::none(), + }); + } + + #[test_only] + public fun initialize_for_testing(framework: &signer) acquires PerBlockRandomness { + initialize(framework); + set_seed(x"0000000000000000000000000000000000000000000000000000000000000000"); + } + + /// Invoked in block prologues to update the block-level randomness seed. + public(friend) fun on_new_block(vm: &signer, epoch: u64, round: u64, seed_for_new_block: Option>) acquires PerBlockRandomness { + system_addresses::assert_vm(vm); + if (exists(@aptos_framework)) { + let randomness = borrow_global_mut(@aptos_framework); + randomness.epoch = epoch; + randomness.round = round; + randomness.seed = seed_for_new_block; + } + } + + /// Generate the next 32 random bytes. Repeated calls will yield different results (assuming the collision-resistance + /// of the hash function). + fun next_32_bytes(): vector acquires PerBlockRandomness { + assert!(is_safe_call(), E_API_USE_SUSCEPTIBLE_TO_TEST_AND_ABORT); + + let input = DST; + let randomness = borrow_global(@aptos_framework); + let seed = *option::borrow(&randomness.seed); + + vector::append(&mut input, seed); + vector::append(&mut input, transaction_context::get_transaction_hash()); + vector::append(&mut input, fetch_and_increment_txn_counter()); + hash::sha3_256(input) + } + + /// Generates a sequence of bytes uniformly at random + public fun bytes(n: u64): vector acquires PerBlockRandomness { + let v = vector[]; + let c = 0; + while (c < n) { + let blob = next_32_bytes(); + vector::append(&mut v, blob); + + c = c + 32; + }; + + if (c > n) { + vector::trim(&mut v, n); + }; + + event::emit(RandomnessGeneratedEvent {}); + + v + } + + /// Generates an u8 uniformly at random. + public fun u8_integer(): u8 acquires PerBlockRandomness { + let raw = next_32_bytes(); + let ret: u8 = vector::pop_back(&mut raw); + + event::emit(RandomnessGeneratedEvent {}); + + ret + } + + /// Generates an u16 uniformly at random. + public fun u16_integer(): u16 acquires PerBlockRandomness { + let raw = next_32_bytes(); + let i = 0; + let ret: u16 = 0; + while (i < 2) { + ret = ret * 256 + (vector::pop_back(&mut raw) as u16); + i = i + 1; + }; + + event::emit(RandomnessGeneratedEvent {}); + + ret + } + + /// Generates an u32 uniformly at random. + public fun u32_integer(): u32 acquires PerBlockRandomness { + let raw = next_32_bytes(); + let i = 0; + let ret: u32 = 0; + while (i < 4) { + ret = ret * 256 + (vector::pop_back(&mut raw) as u32); + i = i + 1; + }; + + event::emit(RandomnessGeneratedEvent {}); + + ret + } + + /// Generates an u64 uniformly at random. + public fun u64_integer(): u64 acquires PerBlockRandomness { + let raw = next_32_bytes(); + let i = 0; + let ret: u64 = 0; + while (i < 8) { + ret = ret * 256 + (vector::pop_back(&mut raw) as u64); + i = i + 1; + }; + + event::emit(RandomnessGeneratedEvent {}); + + ret + } + + /// Generates an u128 uniformly at random. + public fun u128_integer(): u128 acquires PerBlockRandomness { + let raw = next_32_bytes(); + let i = 0; + let ret: u128 = 0; + while (i < 16) { + spec { + // TODO: Prove these with proper loop invaraints. + assume ret * 256 + 255 <= MAX_U256; + assume len(raw) > 0; + }; + ret = ret * 256 + (vector::pop_back(&mut raw) as u128); + i = i + 1; + }; + + event::emit(RandomnessGeneratedEvent {}); + + ret + } + + /// Generates a u256 uniformly at random. + public fun u256_integer(): u256 acquires PerBlockRandomness { + event::emit(RandomnessGeneratedEvent {}); + u256_integer_internal() + } + + /// Generates a u256 uniformly at random. + fun u256_integer_internal(): u256 acquires PerBlockRandomness { + let raw = next_32_bytes(); + let i = 0; + let ret: u256 = 0; + while (i < 32) { + spec { + // TODO: Prove these with proper loop invaraints. + assume ret * 256 + 255 <= MAX_U256; + assume len(raw) > 0; + }; + ret = ret * 256 + (vector::pop_back(&mut raw) as u256); + i = i + 1; + }; + ret + } + + /// Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + /// + /// NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. + /// If you need perfect uniformity, consider implement your own via rejection sampling. + public fun u8_range(min_incl: u8, max_excl: u8): u8 acquires PerBlockRandomness { + let range = ((max_excl - min_incl) as u256); + let sample = ((u256_integer_internal() % range) as u8); + + event::emit(RandomnessGeneratedEvent {}); + + min_incl + sample + } + + /// Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + /// + /// NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. + /// If you need perfect uniformity, consider implement your own via rejection sampling. + public fun u16_range(min_incl: u16, max_excl: u16): u16 acquires PerBlockRandomness { + let range = ((max_excl - min_incl) as u256); + let sample = ((u256_integer_internal() % range) as u16); + + event::emit(RandomnessGeneratedEvent {}); + + min_incl + sample + } + + /// Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + /// + /// NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. + /// If you need perfect uniformity, consider implement your own via rejection sampling. + public fun u32_range(min_incl: u32, max_excl: u32): u32 acquires PerBlockRandomness { + let range = ((max_excl - min_incl) as u256); + let sample = ((u256_integer_internal() % range) as u32); + + event::emit(RandomnessGeneratedEvent {}); + + min_incl + sample + } + + /// Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + /// + /// NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. + /// If you need perfect uniformity, consider implement your own via rejection sampling. + public fun u64_range(min_incl: u64, max_excl: u64): u64 acquires PerBlockRandomness { + event::emit(RandomnessGeneratedEvent {}); + + u64_range_internal(min_incl, max_excl) + } + + public fun u64_range_internal(min_incl: u64, max_excl: u64): u64 acquires PerBlockRandomness { + let range = ((max_excl - min_incl) as u256); + let sample = ((u256_integer_internal() % range) as u64); + + min_incl + sample + } + + /// Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + /// + /// NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. + /// If you need perfect uniformity, consider implement your own via rejection sampling. + public fun u128_range(min_incl: u128, max_excl: u128): u128 acquires PerBlockRandomness { + let range = ((max_excl - min_incl) as u256); + let sample = ((u256_integer_internal() % range) as u128); + + event::emit(RandomnessGeneratedEvent {}); + + min_incl + sample + } + + /// Generates a number $n \in [min_incl, max_excl)$ uniformly at random. + /// + /// NOTE: The uniformity is not perfect, but it can be proved that the bias is negligible. + /// If you need perfect uniformity, consider implement your own with `u256_integer()` + rejection sampling. + public fun u256_range(min_incl: u256, max_excl: u256): u256 acquires PerBlockRandomness { + let range = max_excl - min_incl; + let r0 = u256_integer_internal(); + let r1 = u256_integer_internal(); + + // Will compute sample := (r0 + r1*2^256) % range. + + let sample = r1 % range; + let i = 0; + while ({ + spec { + invariant sample >= 0 && sample < max_excl - min_incl; + }; + i < 256 + }) { + sample = safe_add_mod(sample, sample, range); + i = i + 1; + }; + + let sample = safe_add_mod(sample, r0 % range, range); + spec { + assert sample >= 0 && sample < max_excl - min_incl; + }; + + event::emit(RandomnessGeneratedEvent {}); + + min_incl + sample + } + + /// Generate a permutation of `[0, 1, ..., n-1]` uniformly at random. + /// If n is 0, returns the empty vector. + public fun permutation(n: u64): vector acquires PerBlockRandomness { + let values = vector[]; + + if(n == 0) { + return vector[] + }; + + // Initialize into [0, 1, ..., n-1]. + let i = 0; + while ({ + spec { + invariant i <= n; + invariant len(values) == i; + }; + i < n + }) { + std::vector::push_back(&mut values, i); + i = i + 1; + }; + spec { + assert len(values) == n; + }; + + // Shuffle. + let tail = n - 1; + while ({ + spec { + invariant tail >= 0 && tail < len(values); + }; + tail > 0 + }) { + let pop_position = u64_range_internal(0, tail + 1); + spec { + assert pop_position < len(values); + }; + std::vector::swap(&mut values, pop_position, tail); + tail = tail - 1; + }; + + event::emit(RandomnessGeneratedEvent {}); + + values + } + + #[test_only] + public fun set_seed(seed: vector) acquires PerBlockRandomness { + assert!(vector::length(&seed) == 32, 0); + let randomness = borrow_global_mut(@aptos_framework); + randomness.seed = option::some(seed); + } + + /// Compute `(a + b) % m`, assuming `m >= 1, 0 <= a < m, 0<= b < m`. + inline fun safe_add_mod(a: u256, b: u256, m: u256): u256 { + let neg_b = m - b; + if (a < neg_b) { + a + b + } else { + a - neg_b + } + } + + #[verify_only] + fun safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256 { + let neg_b = m - b; + if (a < neg_b) { + a + b + } else { + a - neg_b + } + } + + /// Fetches and increments a transaction-specific 32-byte randomness-related counter. + native fun fetch_and_increment_txn_counter(): vector; + + /// Called in each randomness generation function to ensure certain safety invariants. + /// 1. Ensure that the TXN that led to the call of this function had a private (or friend) entry function as its TXN payload. + /// 2. TBA + native fun is_safe_call(): bool; + + #[test] + fun test_safe_add_mod() { + assert!(2 == safe_add_mod(3, 4, 5), 1); + assert!(2 == safe_add_mod(4, 3, 5), 1); + assert!(7 == safe_add_mod(3, 4, 9), 1); + assert!(7 == safe_add_mod(4, 3, 9), 1); + assert!(0xfffffffffffffffffffffffffffffffffffffffffffffffe == safe_add_mod(0xfffffffffffffffffffffffffffffffffffffffffffffffd, 0x000000000000000000000000000000000000000000000001, 0xffffffffffffffffffffffffffffffffffffffffffffffff), 1); + assert!(0xfffffffffffffffffffffffffffffffffffffffffffffffe == safe_add_mod(0x000000000000000000000000000000000000000000000001, 0xfffffffffffffffffffffffffffffffffffffffffffffffd, 0xffffffffffffffffffffffffffffffffffffffffffffffff), 1); + assert!(0x000000000000000000000000000000000000000000000000 == safe_add_mod(0xfffffffffffffffffffffffffffffffffffffffffffffffd, 0x000000000000000000000000000000000000000000000002, 0xffffffffffffffffffffffffffffffffffffffffffffffff), 1); + assert!(0x000000000000000000000000000000000000000000000000 == safe_add_mod(0x000000000000000000000000000000000000000000000002, 0xfffffffffffffffffffffffffffffffffffffffffffffffd, 0xffffffffffffffffffffffffffffffffffffffffffffffff), 1); + assert!(0x000000000000000000000000000000000000000000000001 == safe_add_mod(0xfffffffffffffffffffffffffffffffffffffffffffffffd, 0x000000000000000000000000000000000000000000000003, 0xffffffffffffffffffffffffffffffffffffffffffffffff), 1); + assert!(0x000000000000000000000000000000000000000000000001 == safe_add_mod(0x000000000000000000000000000000000000000000000003, 0xfffffffffffffffffffffffffffffffffffffffffffffffd, 0xffffffffffffffffffffffffffffffffffffffffffffffff), 1); + assert!(0xfffffffffffffffffffffffffffffffffffffffffffffffd == safe_add_mod(0xfffffffffffffffffffffffffffffffffffffffffffffffe, 0xfffffffffffffffffffffffffffffffffffffffffffffffe, 0xffffffffffffffffffffffffffffffffffffffffffffffff), 1); + } + + #[test(fx = @aptos_framework)] + fun randomness_smoke_test(fx: signer) acquires PerBlockRandomness { + initialize(&fx); + set_seed(x"0000000000000000000000000000000000000000000000000000000000000000"); + // Test cases should always be a safe place to do a randomness call from. + assert!(is_safe_call(), 0); + let num = u64_integer(); + debug::print(&num); + } + + #[test_only] + fun assert_event_count_equals(count: u64) { + let events = event::emitted_events(); + assert!(vector::length(&events) == count, 0); + } + + #[test(fx = @aptos_framework)] + fun test_emit_events(fx: signer) acquires PerBlockRandomness { + initialize_for_testing(&fx); + + let c = 0; + assert_event_count_equals(c); + + let _ = bytes(1); + c = c + 1; + assert_event_count_equals(c); + + let _ = u8_integer(); + c = c + 1; + assert_event_count_equals(c); + + let _ = u16_integer(); + c = c + 1; + assert_event_count_equals(c); + + let _ = u32_integer(); + c = c + 1; + assert_event_count_equals(c); + + let _ = u64_integer(); + c = c + 1; + assert_event_count_equals(c); + + let _ = u128_integer(); + c = c + 1; + assert_event_count_equals(c); + + let _ = u256_integer(); + c = c + 1; + assert_event_count_equals(c); + + let _ = u8_range(0, 255); + c = c + 1; + assert_event_count_equals(c); + + let _ = u16_range(0, 255); + c = c + 1; + assert_event_count_equals(c); + + let _ = u32_range(0, 255); + c = c + 1; + assert_event_count_equals(c); + + let _ = u64_range(0, 255); + c = c + 1; + assert_event_count_equals(c); + + let _ = u128_range(0, 255); + c = c + 1; + assert_event_count_equals(c); + + let _ = u256_range(0, 255); + c = c + 1; + assert_event_count_equals(c); + + let _ = permutation(6); + c = c + 1; + assert_event_count_equals(c); + } + + #[test(fx = @aptos_framework)] + fun test_bytes(fx: signer) acquires PerBlockRandomness { + initialize_for_testing(&fx); + + let v = bytes(0); + assert!(vector::length(&v) == 0, 0); + + let v = bytes(1); + assert!(vector::length(&v) == 1, 0); + let v = bytes(2); + assert!(vector::length(&v) == 2, 0); + let v = bytes(3); + assert!(vector::length(&v) == 3, 0); + let v = bytes(4); + assert!(vector::length(&v) == 4, 0); + let v = bytes(30); + assert!(vector::length(&v) == 30, 0); + let v = bytes(31); + assert!(vector::length(&v) == 31, 0); + let v = bytes(32); + assert!(vector::length(&v) == 32, 0); + + let v = bytes(33); + assert!(vector::length(&v) == 33, 0); + let v = bytes(50); + assert!(vector::length(&v) == 50, 0); + let v = bytes(63); + assert!(vector::length(&v) == 63, 0); + let v = bytes(64); + assert!(vector::length(&v) == 64, 0); + } + + #[test_only] + fun is_permutation(v: &vector): bool { + let present = vector[]; + + // Mark all elements from 0 to n-1 as not present + let n = vector::length(v); + for (i in 0..n) { + vector::push_back(&mut present, false); + }; + + for (i in 0..n) { + let e = vector::borrow(v, i); + let bit = vector::borrow_mut(&mut present, *e); + *bit = true; + }; + + for (i in 0..n) { + let bit = vector::borrow(&present, i); + if(*bit == false) { + return false + }; + }; + + true + } + + #[test(fx = @aptos_framework)] + fun test_permutation(fx: signer) acquires PerBlockRandomness { + initialize_for_testing(&fx); + + let v = permutation(0); + assert!(vector::length(&v) == 0, 0); + + test_permutation_internal(1); + test_permutation_internal(2); + test_permutation_internal(3); + test_permutation_internal(4); + } + + #[test_only] + /// WARNING: Do not call this with a large `size`, since execution time will be \Omega(size!), where ! is the factorial + /// operator. + fun test_permutation_internal(size: u64) acquires PerBlockRandomness { + let num_permutations = 1; + let c = 1; + for (i in 0..size) { + num_permutations = num_permutations * c; + c = c + 1; + }; + + let permutations = table_with_length::new, bool>(); + + // This loop will not exit until all permutations are created + while(table_with_length::length(&permutations) < num_permutations) { + let v = permutation(size); + assert!(vector::length(&v) == size, 0); + assert!(is_permutation(&v), 0); + + if(table_with_length::contains(&permutations, v) == false) { + table_with_length::add(&mut permutations, v, true); + } + }; + + table_with_length::drop_unchecked(permutations); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/randomness.spec.move b/aptos-move/framework/aptos-framework/sources/randomness.spec.move new file mode 100644 index 0000000000000..b0b060005dce5 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/randomness.spec.move @@ -0,0 +1,136 @@ +spec aptos_framework::randomness { + + spec module { + use aptos_framework::chain_status; + pragma verify = true; + invariant [suspendable] chain_status::is_operating() ==> exists(@aptos_framework); + global var: vector; + } + + spec fetch_and_increment_txn_counter(): vector { + pragma opaque; + aborts_if [abstract] false; + ensures [abstract] result == spec_fetch_and_increment_txn_counter(); + } + + spec fun spec_fetch_and_increment_txn_counter(): vector; + + spec is_safe_call(): bool { + pragma opaque; + aborts_if [abstract] false; + ensures [abstract] result == spec_is_safe_call(); + } + + spec fun spec_is_safe_call(): bool; + + spec initialize(framework: &signer) { + use std::option; + use std::signer; + let framework_addr = signer::address_of(framework); + aborts_if framework_addr != @aptos_framework; + aborts_if exists(framework_addr); + ensures global(framework_addr).seed == option::spec_none>(); + } + + spec on_new_block(vm: &signer, epoch: u64, round: u64, seed_for_new_block: Option>) { + use std::signer; + aborts_if signer::address_of(vm) != @vm; + ensures exists(@aptos_framework) ==> global(@aptos_framework).seed == seed_for_new_block; + ensures exists(@aptos_framework) ==> global(@aptos_framework).epoch == epoch; + ensures exists(@aptos_framework) ==> global(@aptos_framework).round == round; + } + + spec next_32_bytes(): vector { + use std::hash; + include NextBlobAbortsIf; + let input = b"APTOS_RANDOMNESS"; + let randomness = global(@aptos_framework); + let seed = option::spec_borrow(randomness.seed); + let txn_hash = transaction_context::spec_get_txn_hash(); + let txn_counter = spec_fetch_and_increment_txn_counter(); + ensures len(result) == 32; + ensures result == hash::sha3_256(concat(concat(concat(input, seed), txn_hash), txn_counter)); + } + + spec schema NextBlobAbortsIf { + let randomness = global(@aptos_framework); + aborts_if option::spec_is_none(randomness.seed); + aborts_if !spec_is_safe_call(); + aborts_if !exists(@aptos_framework); + } + + spec u8_integer(): u8 { + include NextBlobAbortsIf; + } + + spec u16_integer(): u16 { + pragma unroll = 2; + include NextBlobAbortsIf; + } + + spec u32_integer(): u32 { + pragma unroll = 4; + include NextBlobAbortsIf; + } + + spec u64_integer(): u64 { + pragma unroll = 8; + include NextBlobAbortsIf; + } + + spec u128_integer(): u128 { + pragma unroll = 16; + include NextBlobAbortsIf; + } + + spec u256_integer(): u256 { + pragma unroll = 32; + include NextBlobAbortsIf; + ensures [abstract] result == spec_u256_integer(); + } + + spec fun spec_u256_integer(): u256; + + spec u8_range(min_incl: u8, max_excl: u8): u8 { + pragma verify_duration_estimate = 120; // TODO: set because of timeout (property proved). + pragma opaque; + include NextBlobAbortsIf; + aborts_if min_incl >= max_excl; + ensures result >= min_incl && result < max_excl; + } + + + spec u64_range(min_incl: u64, max_excl: u64): u64 { + include NextBlobAbortsIf; + aborts_if min_incl >= max_excl; + ensures result >= min_incl && result < max_excl; + } + + spec u256_range(min_incl: u256, max_excl: u256): u256 { + pragma verify_duration_estimate = 120; + include NextBlobAbortsIf; + aborts_if min_incl >= max_excl; + ensures result >= min_incl && result < max_excl; + } + + spec permutation(n: u64): vector { + pragma aborts_if_is_partial; + // TODO(tengzhang): complete the aborts_if conditions + // include n > 1 ==> NextBlobAbortsIf; + // aborts_if n > 1 && !exists(@aptos_framework); + } + + spec safe_add_mod_for_verification(a: u256, b: u256, m: u256): u256 { + aborts_if m < b; + aborts_if a < m - b && a + b > MAX_U256; + ensures result == spec_safe_add_mod(a, b, m); + } + + spec fun spec_safe_add_mod(a: u256, b: u256, m: u256): u256 { + if (a < m - b) { + a + b + } else { + a - (m - b) + } + } +} diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration.move b/aptos-move/framework/aptos-framework/sources/reconfiguration.move index deec0364228c8..86f96c05ca097 100644 --- a/aptos-move/framework/aptos-framework/sources/reconfiguration.move +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration.move @@ -11,6 +11,7 @@ module aptos_framework::reconfiguration { use aptos_framework::system_addresses; use aptos_framework::timestamp; use aptos_framework::chain_status; + use aptos_framework::reconfiguration_state; use aptos_framework::storage_gas; use aptos_framework::transaction_fee; @@ -21,6 +22,7 @@ module aptos_framework::reconfiguration { friend aptos_framework::gas_schedule; friend aptos_framework::genesis; friend aptos_framework::version; + friend aptos_framework::reconfiguration_with_dkg; /// Event that signals consensus to start a new epoch, /// with new configuration information. This is also called a @@ -118,6 +120,8 @@ module aptos_framework::reconfiguration { return }; + reconfiguration_state::on_reconfig_start(); + // Reconfiguration "forces the block" to end, as mentioned above. Therefore, we must process the collected fees // explicitly so that staking can distribute them. // @@ -149,6 +153,8 @@ module aptos_framework::reconfiguration { epoch: config_ref.epoch, }, ); + + reconfiguration_state::on_reconfig_finish(); } public fun last_reconfiguration_time(): u64 acquires Configuration { diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration.spec.move b/aptos-move/framework/aptos-framework/sources/reconfiguration.spec.move index 943bc07e7b1c8..4bea1460fdb54 100644 --- a/aptos-move/framework/aptos-framework/sources/reconfiguration.spec.move +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration.spec.move @@ -131,7 +131,8 @@ spec aptos_framework::reconfiguration { use aptos_framework::staking_config; // TODO: set because of timeout (property proved) - pragma verify_duration_estimate = 120; + pragma verify = true; + pragma verify_duration_estimate = 600; requires exists(@aptos_framework); let success = !(chain_status::is_genesis() || timestamp::spec_now_microseconds() == 0 || !reconfiguration_enabled()) diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration_state.move b/aptos-move/framework/aptos-framework/sources/reconfiguration_state.move new file mode 100644 index 0000000000000..4818dd2a1ac00 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration_state.move @@ -0,0 +1,132 @@ +/// Reconfiguration meta-state resources and util functions. +/// +/// WARNING: `reconfiguration_state::initialize()` is required before `RECONFIGURE_WITH_DKG` can be enabled. +module aptos_framework::reconfiguration_state { + use std::error; + use std::string; + use aptos_std::copyable_any; + use aptos_std::copyable_any::Any; + use aptos_framework::system_addresses; + use aptos_framework::timestamp; + + friend aptos_framework::reconfiguration; + friend aptos_framework::reconfiguration_with_dkg; + friend aptos_framework::stake; + + const ERECONFIG_NOT_IN_PROGRESS: u64 = 1; + + /// Reconfiguration drivers update this resources to notify other modules of some reconfiguration state. + struct State has key { + /// The state variant packed as an `Any`. + /// Currently the variant type is one of the following. + /// - `ReconfigStateInactive` + /// - `ReconfigStateActive` + variant: Any, + } + + /// A state variant indicating no reconfiguration is in progress. + struct StateInactive has copy, drop, store {} + + /// A state variant indicating a reconfiguration is in progress. + struct StateActive has copy, drop, store { + start_time_secs: u64, + } + + public fun is_initialized(): bool { + exists(@aptos_framework) + } + + public fun initialize(fx: &signer) { + system_addresses::assert_aptos_framework(fx); + if (!exists(@aptos_framework)) { + move_to(fx, State { + variant: copyable_any::pack(StateInactive {}) + }) + } + } + + public fun initialize_for_testing(fx: &signer) { + initialize(fx) + } + + /// Return whether the reconfiguration state is marked "in progress". + public(friend) fun is_in_progress(): bool acquires State { + if (!exists(@aptos_framework)) { + return false + }; + + let state = borrow_global(@aptos_framework); + let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant)); + variant_type_name == b"0x1::reconfiguration_state::StateActive" + } + + /// Called at the beginning of a reconfiguration (either immediate or async) + /// to mark the reconfiguration state "in progress" if it is currently "stopped". + /// + /// Also record the current time as the reconfiguration start time. (Some module, e.g., `stake.move`, needs this info). + public(friend) fun on_reconfig_start() acquires State { + if (exists(@aptos_framework)) { + let state = borrow_global_mut(@aptos_framework); + let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant)); + if (variant_type_name == b"0x1::reconfiguration_state::StateInactive") { + state.variant = copyable_any::pack(StateActive { + start_time_secs: timestamp::now_seconds() + }); + } + }; + } + + /// Get the unix time when the currently in-progress reconfiguration started. + /// Abort if the reconfiguration state is not "in progress". + public(friend) fun start_time_secs(): u64 acquires State { + let state = borrow_global(@aptos_framework); + let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant)); + if (variant_type_name == b"0x1::reconfiguration_state::StateActive") { + let active = copyable_any::unpack(state.variant); + active.start_time_secs + } else { + abort(error::invalid_state(ERECONFIG_NOT_IN_PROGRESS)) + } + } + + /// Called at the end of every reconfiguration to mark the state as "stopped". + /// Abort if the current state is not "in progress". + public(friend) fun on_reconfig_finish() acquires State { + if (exists(@aptos_framework)) { + let state = borrow_global_mut(@aptos_framework); + let variant_type_name = *string::bytes(copyable_any::type_name(&state.variant)); + if (variant_type_name == b"0x1::reconfiguration_state::StateActive") { + state.variant = copyable_any::pack(StateInactive {}); + } else { + abort(error::invalid_state(ERECONFIG_NOT_IN_PROGRESS)) + } + } + } + + #[test(fx = @aptos_framework)] + fun basic(fx: &signer) acquires State { + // Setip. + timestamp::set_time_has_started_for_testing(fx); + initialize(fx); + + // Initially no reconfig is in progress. + assert!(!is_in_progress(), 1); + + // "try_start" should work. + timestamp::fast_forward_seconds(123); + on_reconfig_start(); + assert!(is_in_progress(), 1); + assert!(123 == start_time_secs(), 1); + + // Redundant `try_start` should be no-op. + timestamp::fast_forward_seconds(1); + on_reconfig_start(); + assert!(is_in_progress(), 1); + assert!(123 == start_time_secs(), 1); + + // A `finish` call should work when the state is marked "in progess". + timestamp::fast_forward_seconds(10); + on_reconfig_finish(); + assert!(!is_in_progress(), 1); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration_state.spec.move b/aptos-move/framework/aptos-framework/sources/reconfiguration_state.spec.move new file mode 100644 index 0000000000000..799669225fe96 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration_state.spec.move @@ -0,0 +1,109 @@ +spec aptos_framework::reconfiguration_state { + + spec module { + use aptos_framework::chain_status; + invariant [suspendable] chain_status::is_operating() ==> exists(@aptos_framework); + } + + spec initialize(fx: &signer) { + use std::signer; + use aptos_std::from_bcs; + aborts_if signer::address_of(fx) != @aptos_framework; + let post post_state = global(@aptos_framework); + ensures exists(@aptos_framework); + ensures !exists(@aptos_framework) ==> from_bcs::deserializable(post_state.variant.data); + } + + spec initialize_for_testing(fx: &signer) { + use std::signer; + aborts_if signer::address_of(fx) != @aptos_framework; + } + + spec is_in_progress(): bool { + aborts_if false; + } + + spec fun spec_is_in_progress(): bool { + if (!exists(@aptos_framework)) { + false + } else { + copyable_any::type_name(global(@aptos_framework).variant).bytes == b"0x1::reconfiguration_state::StateActive" + } + } + + spec State { + use aptos_std::from_bcs; + use aptos_std::type_info; + invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive" || + copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive"; + invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive" + ==> from_bcs::deserializable(variant.data); + invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive" + ==> from_bcs::deserializable(variant.data); + invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateActive" ==> + type_info::type_name() == variant.type_name; + invariant copyable_any::type_name(variant).bytes == b"0x1::reconfiguration_state::StateInactive" ==> + type_info::type_name() == variant.type_name; + } + + spec on_reconfig_start { + use aptos_std::from_bcs; + use aptos_std::type_info; + use std::bcs; + aborts_if false; + requires exists(@aptos_framework); + let state = Any { + type_name: type_info::type_name(), + data: bcs::serialize(StateActive { + start_time_secs: timestamp::spec_now_seconds() + }) + }; + let pre_state = global(@aptos_framework); + let post post_state = global(@aptos_framework); + ensures (exists(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes + == b"0x1::reconfiguration_state::StateInactive") ==> copyable_any::type_name(post_state.variant).bytes + == b"0x1::reconfiguration_state::StateActive"; + ensures (exists(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes + == b"0x1::reconfiguration_state::StateInactive") ==> post_state.variant == state; + ensures (exists(@aptos_framework) && copyable_any::type_name(pre_state.variant).bytes + == b"0x1::reconfiguration_state::StateInactive") ==> from_bcs::deserializable(post_state.variant.data); + } + + spec start_time_secs(): u64 { + include StartTimeSecsAbortsIf; + } + + spec fun spec_start_time_secs(): u64 { + use aptos_std::from_bcs; + let state = global(@aptos_framework); + from_bcs::deserialize(state.variant.data).start_time_secs + } + + spec schema StartTimeSecsRequirement { + requires exists(@aptos_framework); + requires copyable_any::type_name(global(@aptos_framework).variant).bytes + == b"0x1::reconfiguration_state::StateActive"; + include UnpackRequiresStateActive { + x: global(@aptos_framework).variant + }; + } + + spec schema UnpackRequiresStateActive { + use aptos_std::from_bcs; + use aptos_std::type_info; + x: Any; + requires type_info::type_name() == x.type_name && from_bcs::deserializable(x.data); + } + + spec schema StartTimeSecsAbortsIf { + aborts_if !exists(@aptos_framework); + include copyable_any::type_name(global(@aptos_framework).variant).bytes + == b"0x1::reconfiguration_state::StateActive" ==> + copyable_any::UnpackAbortsIf { + x: global(@aptos_framework).variant + }; + aborts_if copyable_any::type_name(global(@aptos_framework).variant).bytes + != b"0x1::reconfiguration_state::StateActive"; + } + +} diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.move b/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.move new file mode 100644 index 0000000000000..3a59267f67c39 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.move @@ -0,0 +1,47 @@ +/// Reconfiguration with DKG helper functions. +module aptos_framework::reconfiguration_with_dkg { + use std::features; + use aptos_framework::consensus_config; + use aptos_framework::dkg; + use aptos_framework::execution_config; + use aptos_framework::gas_schedule; + use aptos_framework::jwks; + use aptos_framework::reconfiguration; + use aptos_framework::reconfiguration_state; + use aptos_framework::stake; + friend aptos_framework::block; + friend aptos_framework::aptos_governance; + + /// Trigger a reconfiguration with DKG. + /// Do nothing if one is already in progress. + public(friend) fun try_start() { + if (dkg::in_progress()) { return }; + reconfiguration_state::on_reconfig_start(); + let cur_epoch = reconfiguration::current_epoch(); + dkg::start( + cur_epoch, + stake::cur_validator_consensus_infos(), + stake::next_validator_consensus_infos(), + ); + } + + /// Apply buffered on-chain configs (except for ValidatorSet, which is done inside `reconfiguration::reconfigure()`). + /// Re-enable validator set changes. + /// Run the default reconfiguration to enter the new epoch. + public(friend) fun finish(account: &signer) { + consensus_config::on_new_epoch(); + execution_config::on_new_epoch(); + gas_schedule::on_new_epoch(); + std::version::on_new_epoch(); + jwks::on_new_epoch(); + features::on_new_epoch(account); + reconfiguration::reconfigure(); + } + + /// Complete the current reconfiguration with DKG. + /// Abort if no DKG is in progress. + fun finish_with_dkg_result(account: &signer, dkg_result: vector) { + dkg::finish(dkg_result); + finish(account); + } +} diff --git a/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.spec.move b/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.spec.move new file mode 100644 index 0000000000000..2ca4ded927fff --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/reconfiguration_with_dkg.spec.move @@ -0,0 +1,59 @@ +spec aptos_framework::reconfiguration_with_dkg { + spec module { + pragma verify = true; + } + + spec try_start() { + use aptos_framework::chain_status; + use aptos_framework::staking_config; + requires chain_status::is_operating(); + include stake::ResourceRequirement; + include stake::GetReconfigStartTimeRequirement; + include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement; + aborts_if false; + } + + spec finish(account: &signer) { + pragma verify_duration_estimate = 600; + include FinishRequirement; + } + + spec schema FinishRequirement { + use aptos_framework::chain_status; + use std::signer; + use std::features; + use aptos_framework::stake; + use aptos_framework::coin::CoinInfo; + use aptos_framework::aptos_coin::AptosCoin; + use aptos_framework::transaction_fee; + use aptos_framework::staking_config; + use aptos_framework::config_buffer; + use aptos_framework::version; + use aptos_framework::consensus_config; + use aptos_framework::execution_config; + use aptos_framework::gas_schedule; + account: signer; + requires signer::address_of(account) == @aptos_framework; + requires chain_status::is_operating(); + requires exists>(@aptos_framework); + include staking_config::StakingRewardsConfigRequirement; + requires exists(@aptos_framework); + include transaction_fee::RequiresCollectedFeesPerValueLeqBlockAptosSupply; + requires exists(@std); + include config_buffer::OnNewEpochRequirement; + include config_buffer::OnNewEpochRequirement; + include config_buffer::OnNewEpochRequirement; + include config_buffer::OnNewEpochRequirement; + aborts_if false; + } + + spec finish_with_dkg_result(account: &signer, dkg_result: vector) { + use aptos_framework::dkg; + pragma verify = true; // TODO: set because of timeout (property proved). + pragma verify_duration_estimate = 600; + include FinishRequirement; + requires dkg::spec_in_progress(); + aborts_if false; + } + +} diff --git a/aptos-move/framework/aptos-framework/sources/stake.move b/aptos-move/framework/aptos-framework/sources/stake.move index 7d4f23d0319f1..9793ab2a08715 100644 --- a/aptos-move/framework/aptos-framework/sources/stake.move +++ b/aptos-move/framework/aptos-framework/sources/stake.move @@ -38,6 +38,7 @@ module aptos_framework::stake { friend aptos_framework::block; friend aptos_framework::genesis; friend aptos_framework::reconfiguration; + friend aptos_framework::reconfiguration_with_dkg; friend aptos_framework::transaction_fee; /// Validator Config not published. @@ -78,6 +79,8 @@ module aptos_framework::stake { const EINVALID_LOCKUP: u64 = 18; /// Table to store collected transaction fees for each validator already exists. const EFEES_TABLE_ALREADY_EXISTS: u64 = 19; + /// Validator set change temporarily disabled because of in-progress reconfiguration. + const ERECONFIGURATION_IN_PROGRESS: u64 = 20; /// Validator status enum. We can switch to proper enum later once Move supports it. const VALIDATOR_STATUS_PENDING_ACTIVE: u64 = 1; @@ -168,7 +171,7 @@ module aptos_framework::stake { /// 1. join_validator_set adds to pending_active queue. /// 2. leave_valdiator_set moves from active to pending_inactive queue. /// 3. on_new_epoch processes two pending queues and refresh ValidatorInfo from the owner's address. - struct ValidatorSet has key { + struct ValidatorSet has copy, key, drop, store { consensus_scheme: u8, // Active validators for the current epoch. active_validators: vector, @@ -431,29 +434,38 @@ module aptos_framework::stake { aptos_framework: &signer, validators: &vector
, ) acquires ValidatorSet { + assert_reconfig_not_in_progress(); system_addresses::assert_aptos_framework(aptos_framework); - let validator_set = borrow_global_mut(@aptos_framework); let active_validators = &mut validator_set.active_validators; let pending_inactive = &mut validator_set.pending_inactive; - let len = vector::length(validators); + spec { + update ghost_active_num = len(active_validators); + update ghost_pending_inactive_num = len(pending_inactive); + }; + let len_validators = vector::length(validators); let i = 0; // Remove each validator from the validator set. while ({ spec { - invariant i <= len; + invariant i <= len_validators; invariant spec_validators_are_initialized(active_validators); invariant spec_validator_indices_are_valid(active_validators); invariant spec_validators_are_initialized(pending_inactive); invariant spec_validator_indices_are_valid(pending_inactive); + invariant ghost_active_num + ghost_pending_inactive_num == len(active_validators) + len(pending_inactive); }; - i < len + i < len_validators }) { let validator = *vector::borrow(validators, i); let validator_index = find_validator(active_validators, validator); if (option::is_some(&validator_index)) { let validator_info = vector::swap_remove(active_validators, *option::borrow(&validator_index)); vector::push_back(pending_inactive, validator_info); + spec { + update ghost_active_num = ghost_active_num - 1; + update ghost_pending_inactive_num = ghost_pending_inactive_num + 1; + }; }; i = i + 1; }; @@ -616,6 +628,7 @@ module aptos_framework::stake { /// Add `coins` into `pool_address`. this requires the corresponding `owner_cap` to be passed in. public fun add_stake_with_cap(owner_cap: &OwnerCapability, coins: Coin) acquires StakePool, ValidatorSet { + assert_reconfig_not_in_progress(); let pool_address = owner_cap.pool_address; assert_stake_pool_exists(pool_address); @@ -659,6 +672,7 @@ module aptos_framework::stake { /// Move `amount` of coins from pending_inactive to active. public entry fun reactivate_stake(owner: &signer, amount: u64) acquires OwnerCapability, StakePool { + assert_reconfig_not_in_progress(); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); let ownership_cap = borrow_global(owner_address); @@ -666,6 +680,7 @@ module aptos_framework::stake { } public fun reactivate_stake_with_cap(owner_cap: &OwnerCapability, amount: u64) acquires StakePool { + assert_reconfig_not_in_progress(); let pool_address = owner_cap.pool_address; assert_stake_pool_exists(pool_address); @@ -696,7 +711,9 @@ module aptos_framework::stake { new_consensus_pubkey: vector, proof_of_possession: vector, ) acquires StakePool, ValidatorConfig { + assert_reconfig_not_in_progress(); assert_stake_pool_exists(pool_address); + let stake_pool = borrow_global_mut(pool_address); assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR)); @@ -728,10 +745,10 @@ module aptos_framework::stake { new_network_addresses: vector, new_fullnode_addresses: vector, ) acquires StakePool, ValidatorConfig { + assert_reconfig_not_in_progress(); assert_stake_pool_exists(pool_address); let stake_pool = borrow_global_mut(pool_address); assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR)); - assert!(exists(pool_address), error::not_found(EVALIDATOR_CONFIG)); let validator_info = borrow_global_mut(pool_address); let old_network_addresses = validator_info.network_addresses; @@ -805,6 +822,7 @@ module aptos_framework::stake { operator: &signer, pool_address: address ) acquires StakePool, ValidatorConfig, ValidatorSet { + assert_reconfig_not_in_progress(); assert_stake_pool_exists(pool_address); let stake_pool = borrow_global_mut(pool_address); assert!(signer::address_of(operator) == stake_pool.operator_address, error::unauthenticated(ENOT_OPERATOR)); @@ -840,6 +858,7 @@ module aptos_framework::stake { /// Similar to unlock_with_cap but will use ownership capability from the signing account. public entry fun unlock(owner: &signer, amount: u64) acquires OwnerCapability, StakePool { + assert_reconfig_not_in_progress(); let owner_address = signer::address_of(owner); assert_owner_cap_exists(owner_address); let ownership_cap = borrow_global(owner_address); @@ -848,6 +867,7 @@ module aptos_framework::stake { /// Unlock `amount` from the active stake. Only possible if the lockup has expired. public fun unlock_with_cap(amount: u64, owner_cap: &OwnerCapability) acquires StakePool { + assert_reconfig_not_in_progress(); // Short-circuit if amount to unlock is 0 so we don't emit events. if (amount == 0) { return @@ -889,6 +909,7 @@ module aptos_framework::stake { owner_cap: &OwnerCapability, withdraw_amount: u64 ): Coin acquires StakePool, ValidatorSet { + assert_reconfig_not_in_progress(); let pool_address = owner_cap.pool_address; assert_stake_pool_exists(pool_address); let stake_pool = borrow_global_mut(pool_address); @@ -926,6 +947,7 @@ module aptos_framework::stake { operator: &signer, pool_address: address ) acquires StakePool, ValidatorSet { + assert_reconfig_not_in_progress(); let config = staking_config::get(); assert!( staking_config::get_allow_validator_set_change(&config), @@ -1032,7 +1054,7 @@ module aptos_framework::stake { }; } - /// Triggers at epoch boundary. This function shouldn't abort. + /// Triggered during a reconfiguration. This function shouldn't abort. /// /// 1. Distribute transaction fees and rewards to stake pools of active and pending inactive validators (requested /// to leave but not yet removed). @@ -1077,6 +1099,7 @@ module aptos_framework::stake { while ({ spec { invariant spec_validators_are_initialized(next_epoch_validators); + invariant i <= vlen; }; i < vlen }) { @@ -1115,6 +1138,8 @@ module aptos_framework::stake { invariant vlen == len(validator_set.active_validators); invariant forall i in 0..validator_index: global(validator_set.active_validators[i].addr).validator_index < validator_index; + invariant forall i in 0..validator_index: + validator_set.active_validators[i].config.validator_index < validator_index; invariant len(validator_perf.validators) == validator_index; }; validator_index < vlen @@ -1132,12 +1157,17 @@ module aptos_framework::stake { // Automatically renew a validator's lockup for validators that will still be in the validator set in the // next epoch. let stake_pool = borrow_global_mut(validator_info.addr); - if (stake_pool.locked_until_secs <= timestamp::now_seconds()) { + let now_secs = timestamp::now_seconds(); + let reconfig_start_secs = if (chain_status::is_operating()) { + get_reconfig_start_time_secs() + } else { + now_secs + }; + if (stake_pool.locked_until_secs <= reconfig_start_secs) { spec { - assume timestamp::spec_now_seconds() + recurring_lockup_duration_secs <= MAX_U64; + assume now_secs + recurring_lockup_duration_secs <= MAX_U64; }; - stake_pool.locked_until_secs = - timestamp::now_seconds() + recurring_lockup_duration_secs; + stake_pool.locked_until_secs = now_secs + recurring_lockup_duration_secs; }; validator_index = validator_index + 1; @@ -1149,7 +1179,197 @@ module aptos_framework::stake { }; } - /// Update individual validator's stake pool + /// Return the `ValidatorConsensusInfo` of each current validator, sorted by current validator index. + public fun cur_validator_consensus_infos(): vector acquires ValidatorSet { + let validator_set = borrow_global(@aptos_framework); + validator_consensus_infos_from_validator_set(validator_set) + } + + + public fun next_validator_consensus_infos(): vector acquires ValidatorSet, ValidatorPerformance, StakePool, ValidatorFees, ValidatorConfig { + // Init. + let cur_validator_set = borrow_global(@aptos_framework); + let staking_config = staking_config::get(); + let validator_perf = borrow_global(@aptos_framework); + let (minimum_stake, _) = staking_config::get_required_stake(&staking_config); + let (rewards_rate, rewards_rate_denominator) = staking_config::get_reward_rate(&staking_config); + + // Compute new validator set. + let new_active_validators = vector[]; + let num_new_actives = 0; + let candidate_idx = 0; + let new_total_power = 0; + let num_cur_actives = vector::length(&cur_validator_set.active_validators); + let num_cur_pending_actives = vector::length(&cur_validator_set.pending_active); + spec { + assume num_cur_actives + num_cur_pending_actives <= MAX_U64; + }; + let num_candidates = num_cur_actives + num_cur_pending_actives; + while ({ + spec { + invariant candidate_idx <= num_candidates; + invariant spec_validators_are_initialized(new_active_validators); + invariant len(new_active_validators) == num_new_actives; + invariant forall i in 0..len(new_active_validators): + new_active_validators[i].config.validator_index == i; + invariant num_new_actives <= candidate_idx; + invariant spec_validators_are_initialized(new_active_validators); + }; + candidate_idx < num_candidates + }) { + let candidate_in_current_validator_set = candidate_idx < num_cur_actives; + let candidate = if (candidate_idx < num_cur_actives) { + vector::borrow(&cur_validator_set.active_validators, candidate_idx) + } else { + vector::borrow(&cur_validator_set.pending_active, candidate_idx - num_cur_actives) + }; + let stake_pool = borrow_global(candidate.addr); + let cur_active = coin::value(&stake_pool.active); + let cur_pending_active = coin::value(&stake_pool.pending_active); + let cur_pending_inactive = coin::value(&stake_pool.pending_inactive); + + let cur_reward = if (candidate_in_current_validator_set && cur_active > 0) { + spec { + assert candidate.config.validator_index < len(validator_perf.validators); + }; + let cur_perf = vector::borrow(&validator_perf.validators, candidate.config.validator_index); + spec { + assume cur_perf.successful_proposals + cur_perf.failed_proposals <= MAX_U64; + }; + calculate_rewards_amount(cur_active, cur_perf.successful_proposals, cur_perf.successful_proposals + cur_perf.failed_proposals, rewards_rate, rewards_rate_denominator) + } else { + 0 + }; + + let cur_fee = 0; + if (features::collect_and_distribute_gas_fees()) { + let fees_table = &borrow_global(@aptos_framework).fees_table; + if (table::contains(fees_table, candidate.addr)) { + let fee_coin = table::borrow(fees_table, candidate.addr); + cur_fee = coin::value(fee_coin); + } + }; + + let lockup_expired = get_reconfig_start_time_secs() >= stake_pool.locked_until_secs; + spec { + assume cur_active + cur_pending_active + cur_reward + cur_fee <= MAX_U64; + assume cur_active + cur_pending_inactive + cur_pending_active + cur_reward + cur_fee <= MAX_U64; + }; + let new_voting_power = + cur_active + + if (lockup_expired) { 0 } else { cur_pending_inactive } + + cur_pending_active + + cur_reward + cur_fee; + + if (new_voting_power >= minimum_stake) { + let config = *borrow_global(candidate.addr); + config.validator_index = num_new_actives; + let new_validator_info = ValidatorInfo { + addr: candidate.addr, + voting_power: new_voting_power, + config, + }; + + // Update ValidatorSet. + spec { + assume new_total_power + new_voting_power <= MAX_U128; + }; + new_total_power = new_total_power + (new_voting_power as u128); + vector::push_back(&mut new_active_validators, new_validator_info); + num_new_actives = num_new_actives + 1; + + }; + candidate_idx = candidate_idx + 1; + }; + + let new_validator_set = ValidatorSet { + consensus_scheme: cur_validator_set.consensus_scheme, + active_validators: new_active_validators, + pending_inactive: vector[], + pending_active: vector[], + total_voting_power: new_total_power, + total_joining_power: 0, + }; + + validator_consensus_infos_from_validator_set(&new_validator_set) + } + + fun validator_consensus_infos_from_validator_set(validator_set: &ValidatorSet): vector { + let validator_consensus_infos = vector[]; + + let num_active = vector::length(&validator_set.active_validators); + let num_pending_inactive = vector::length(&validator_set.pending_inactive); + spec { + assume num_active + num_pending_inactive <= MAX_U64; + }; + let total = num_active + num_pending_inactive; + + // Pre-fill the return value with dummy values. + let idx = 0; + while ({ + spec { + invariant idx <= len(validator_set.active_validators) + len(validator_set.pending_inactive); + invariant len(validator_consensus_infos) == idx; + invariant len(validator_consensus_infos) <= len(validator_set.active_validators) + len(validator_set.pending_inactive); + }; + idx < total + }) { + vector::push_back(&mut validator_consensus_infos, validator_consensus_info::default()); + idx = idx + 1; + }; + spec { + assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive); + assert spec_validator_indices_are_valid_config(validator_set.active_validators, + len(validator_set.active_validators) + len(validator_set.pending_inactive)); + }; + + vector::for_each_ref(&validator_set.active_validators, |obj| { + let vi: &ValidatorInfo = obj; + spec { + assume len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive); + assert vi.config.validator_index < len(validator_consensus_infos); + }; + let vci = vector::borrow_mut(&mut validator_consensus_infos, vi.config.validator_index); + *vci = validator_consensus_info::new( + vi.addr, + vi.config.consensus_pubkey, + vi.voting_power + ); + spec { + assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive); + }; + }); + + vector::for_each_ref(&validator_set.pending_inactive, |obj| { + let vi: &ValidatorInfo = obj; + spec { + assume len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive); + assert vi.config.validator_index < len(validator_consensus_infos); + }; + let vci = vector::borrow_mut(&mut validator_consensus_infos, vi.config.validator_index); + *vci = validator_consensus_info::new( + vi.addr, + vi.config.consensus_pubkey, + vi.voting_power + ); + spec { + assert len(validator_consensus_infos) == len(validator_set.active_validators) + len(validator_set.pending_inactive); + }; + }); + + validator_consensus_infos + } + + fun addresses_from_validator_infos(infos: &vector): vector
{ + vector::map_ref(infos, |obj| { + let info: &ValidatorInfo = obj; + info.addr + }) + } + + /// Calculate the stake amount of a stake pool for the next epoch. + /// Update individual validator's stake pool if `commit == true`. + /// /// 1. distribute transaction fees to active/pending_inactive delegations /// 2. distribute rewards to active/pending_inactive delegations /// 3. process pending_active, pending_inactive correspondingly @@ -1202,7 +1422,7 @@ module aptos_framework::stake { // Pending inactive stake is only fully unlocked and moved into inactive if the current lockup cycle has expired let current_lockup_expiration = stake_pool.locked_until_secs; - if (timestamp::now_seconds() >= current_lockup_expiration) { + if (get_reconfig_start_time_secs() >= current_lockup_expiration) { coin::merge( &mut stake_pool.inactive, coin::extract_all(&mut stake_pool.pending_inactive), @@ -1218,6 +1438,15 @@ module aptos_framework::stake { ); } + /// Assuming we are in a middle of a reconfiguration (no matter it is immediate or async), get its start time. + fun get_reconfig_start_time_secs(): u64 { + if (reconfiguration_state::is_initialized()) { + reconfiguration_state::start_time_secs() + } else { + timestamp::now_seconds() + } + } + /// Calculate the rewards amount. fun calculate_rewards_amount( stake_amount: u64, @@ -1359,9 +1588,16 @@ module aptos_framework::stake { assert!(exists(owner), error::not_found(EOWNER_CAP_NOT_FOUND)); } + fun assert_reconfig_not_in_progress() { + assert!(!reconfiguration_state::is_in_progress(), error::invalid_state(ERECONFIGURATION_IN_PROGRESS)); + } + #[test_only] use aptos_framework::aptos_coin; use aptos_std::bls12381::proof_of_possession_from_bytes; + use aptos_framework::reconfiguration_state; + use aptos_framework::validator_consensus_info; + use aptos_framework::validator_consensus_info::ValidatorConsensusInfo; #[test_only] use aptos_std::fixed_point64; @@ -1373,6 +1609,7 @@ module aptos_framework::stake { #[test_only] public fun initialize_for_test(aptos_framework: &signer) { + reconfiguration_state::initialize(aptos_framework); initialize_for_test_custom(aptos_framework, 100, 10000, LOCKUP_CYCLE_SECONDS, true, 1, 100, 1000000); } @@ -1414,6 +1651,7 @@ module aptos_framework::stake { voting_power_increase_limit: u64, ) { timestamp::set_time_has_started_for_testing(aptos_framework); + reconfiguration_state::initialize(aptos_framework); if (!exists(@aptos_framework)) { initialize(aptos_framework); }; @@ -2296,6 +2534,79 @@ module aptos_framework::stake { leave_validator_set(validator, validator_address); } + #[test( + aptos_framework = @aptos_framework, + validator_1 = @aptos_framework, + validator_2 = @0x2, + validator_3 = @0x3, + validator_4 = @0x4, + validator_5 = @0x5 + )] + fun test_validator_consensus_infos_from_validator_set( + aptos_framework: &signer, + validator_1: &signer, + validator_2: &signer, + validator_3: &signer, + validator_4: &signer, + validator_5: &signer, + ) acquires AllowedValidators, AptosCoinCapabilities, OwnerCapability, StakePool, ValidatorConfig, ValidatorPerformance, ValidatorSet, ValidatorFees { + let v1_addr = signer::address_of(validator_1); + let v2_addr = signer::address_of(validator_2); + let v3_addr = signer::address_of(validator_3); + let v4_addr = signer::address_of(validator_4); + let v5_addr = signer::address_of(validator_5); + + initialize_for_test(aptos_framework); + + let (_sk_1, pk_1, pop_1) = generate_identity(); + let (_sk_2, pk_2, pop_2) = generate_identity(); + let (_sk_3, pk_3, pop_3) = generate_identity(); + let (_sk_4, pk_4, pop_4) = generate_identity(); + let (_sk_5, pk_5, pop_5) = generate_identity(); + let pk_1_bytes = bls12381::public_key_to_bytes(&pk_1); + let pk_3_bytes = bls12381::public_key_to_bytes(&pk_3); + let pk_5_bytes = bls12381::public_key_to_bytes(&pk_5); + + initialize_test_validator(&pk_1, &pop_1, validator_1, 101, false, false); + initialize_test_validator(&pk_2, &pop_2, validator_2, 102, false, false); + initialize_test_validator(&pk_3, &pop_3, validator_3, 103, false, false); + initialize_test_validator(&pk_4, &pop_4, validator_4, 104, false, false); + initialize_test_validator(&pk_5, &pop_5, validator_5, 105, false, false); + + join_validator_set(validator_3, v3_addr); + join_validator_set(validator_1, v1_addr); + join_validator_set(validator_5, v5_addr); + end_epoch(); + let vci_vec_0 = validator_consensus_infos_from_validator_set(borrow_global(@aptos_framework)); + let vci_addrs = vector::map_ref(&vci_vec_0, |obj|{ + let vci: &ValidatorConsensusInfo = obj; + validator_consensus_info::get_addr(vci) + }); + let vci_pks = vector::map_ref(&vci_vec_0, |obj|{ + let vci: &ValidatorConsensusInfo = obj; + validator_consensus_info::get_pk_bytes(vci) + }); + let vci_voting_powers = vector::map_ref(&vci_vec_0, |obj|{ + let vci: &ValidatorConsensusInfo = obj; + validator_consensus_info::get_voting_power(vci) + }); + assert!(vector[@0x5, @aptos_framework, @0x3] == vci_addrs, 1); + assert!(vector[pk_5_bytes, pk_1_bytes, pk_3_bytes] == vci_pks, 2); + assert!(vector[105, 101, 103] == vci_voting_powers, 3); + leave_validator_set(validator_3, v3_addr); + let vci_vec_1 = validator_consensus_infos_from_validator_set(borrow_global(@aptos_framework)); + assert!(vci_vec_0 == vci_vec_1, 11); + join_validator_set(validator_2, v2_addr); + let vci_vec_2 = validator_consensus_infos_from_validator_set(borrow_global(@aptos_framework)); + assert!(vci_vec_0 == vci_vec_2, 12); + leave_validator_set(validator_1, v1_addr); + let vci_vec_3 = validator_consensus_infos_from_validator_set(borrow_global(@aptos_framework)); + assert!(vci_vec_0 == vci_vec_3, 13); + join_validator_set(validator_4, v4_addr); + let vci_vec_4 = validator_consensus_infos_from_validator_set(borrow_global(@aptos_framework)); + assert!(vci_vec_0 == vci_vec_4, 14); + } + #[test( aptos_framework = @aptos_framework, validator_1 = @aptos_framework, @@ -2446,7 +2757,7 @@ module aptos_framework::stake { staking_config::initialize_rewards( aptos_framework, fixed_point64::create_from_rational(1, 100), - fixed_point64::create_from_rational(3, 1000), + fixed_point64::create_from_rational(3, 1000), one_year_in_secs, genesis_time_in_secs, fixed_point64::create_from_rational(50, 100), @@ -2621,7 +2932,9 @@ module aptos_framework::stake { // Set the number of blocks to 1, to give out rewards to non-failing validators. set_validator_perf_at_least_one_block(); timestamp::fast_forward_seconds(EPOCH_DURATION); + reconfiguration_state::on_reconfig_start(); on_new_epoch(); + reconfiguration_state::on_reconfig_finish(); } #[test_only] diff --git a/aptos-move/framework/aptos-framework/sources/stake.spec.move b/aptos-move/framework/aptos-framework/sources/stake.spec.move index 64e229f81bd22..8b6f5c5a07b80 100644 --- a/aptos-move/framework/aptos-framework/sources/stake.spec.move +++ b/aptos-move/framework/aptos-framework/sources/stake.spec.move @@ -41,6 +41,7 @@ spec aptos_framework::stake { // Global invariants // ----------------- spec module { + pragma verify = true; // The validator set should satisfy its desired invariant. invariant [suspendable] exists(@aptos_framework) ==> validator_set_is_valid(); // After genesis, `AptosCoinCapabilities`, `ValidatorPerformance` and `ValidatorSet` exist. @@ -54,6 +55,8 @@ spec aptos_framework::stake { // ghost variable global ghost_valid_perf: ValidatorPerformance; global ghost_proposer_idx: Option; + global ghost_active_num: u64; + global ghost_pending_inactive_num: u64; } // property 1: the validator set resource stores consensus information for each validator. @@ -82,11 +85,16 @@ spec aptos_framework::stake { // A desired invariant for the validator set. spec fun validator_set_is_valid(): bool { let validator_set = global(@aptos_framework); + validator_set_is_valid_impl(validator_set) + } + + spec fun validator_set_is_valid_impl(validator_set: ValidatorSet): bool { spec_validators_are_initialized(validator_set.active_validators) && spec_validators_are_initialized(validator_set.pending_inactive) && spec_validators_are_initialized(validator_set.pending_active) && spec_validator_indices_are_valid(validator_set.active_validators) && spec_validator_indices_are_valid(validator_set.pending_inactive) + && spec_validator_indices_active_pending_inactive(validator_set) } @@ -135,6 +143,7 @@ spec aptos_framework::stake { // `Validator` is initialized once. spec initialize(aptos_framework: &signer) { + pragma disable_invariants_in_body; let aptos_addr = signer::address_of(aptos_framework); aborts_if !system_addresses::is_aptos_framework_address(aptos_addr); aborts_if exists(aptos_addr); @@ -156,6 +165,7 @@ spec aptos_framework::stake { aborts_if !exists(pool_address); aborts_if !exists(@aptos_framework); aborts_if !exists(@aptos_framework); + aborts_if reconfiguration_state::spec_is_in_progress(); let stake_pool = global(pool_address); let validator_set = global(@aptos_framework); @@ -201,6 +211,7 @@ spec aptos_framework::stake { withdraw_amount: u64 ) { + aborts_if reconfiguration_state::spec_is_in_progress(); let addr = signer::address_of(owner); let ownership_cap = global(addr); let pool_address = ownership_cap.pool_address; @@ -237,8 +248,9 @@ spec aptos_framework::stake { operator: &signer, pool_address: address ) { + pragma disable_invariants_in_body; requires chain_status::is_operating(); - + aborts_if reconfiguration_state::spec_is_in_progress(); let config = staking_config::get(); aborts_if !staking_config::get_allow_validator_set_change(config); aborts_if !exists(pool_address); @@ -252,6 +264,13 @@ spec aptos_framework::stake { let active_validators = validator_set.active_validators; let pending_active = validator_set.pending_active; + let post post_validator_set = global(@aptos_framework); + let post post_active_validators = post_validator_set.active_validators; + let pending_inactive_validators = validator_set.pending_inactive; + let post post_pending_inactive_validators = post_validator_set.pending_inactive; + ensures len(active_validators) + len(pending_inactive_validators) == len(post_active_validators) + + len(post_pending_inactive_validators); + aborts_if !validator_find_bool && !option::spec_is_some(spec_find_validator(active_validators, pool_address)); aborts_if !validator_find_bool && vector::length(validator_set.active_validators) <= option::spec_borrow(spec_find_validator(active_validators, pool_address)); aborts_if !validator_find_bool && vector::length(validator_set.active_validators) < 2; @@ -280,6 +299,7 @@ spec aptos_framework::stake { let pool_address = owner_cap.pool_address; let pre_stake_pool = global(pool_address); let post stake_pool = global(pool_address); + aborts_if reconfiguration_state::spec_is_in_progress(); aborts_if amount != 0 && !exists(pool_address); modifies global(pool_address); include StakedValueNochange; @@ -320,6 +340,7 @@ spec aptos_framework::stake { modifies global(pool_address); include StakedValueNochange; + aborts_if reconfiguration_state::spec_is_in_progress(); // Only the true operator address can update the network and full node addresses of the validator. aborts_if !exists(pool_address); aborts_if !exists(pool_address); @@ -342,6 +363,7 @@ spec aptos_framework::stake { let pool_address = owner_cap.pool_address; include StakedValueNochange; + aborts_if reconfiguration_state::spec_is_in_progress(); aborts_if !stake_pool_exists(pool_address); let pre_stake_pool = global(pool_address); @@ -361,6 +383,7 @@ spec aptos_framework::stake { ) { let pre_stake_pool = global(pool_address); let post validator_info = global(pool_address); + aborts_if reconfiguration_state::spec_is_in_progress(); aborts_if !exists(pool_address); aborts_if signer::address_of(operator) != pre_stake_pool.operator_address; aborts_if !exists(pool_address); @@ -390,6 +413,7 @@ spec aptos_framework::stake { // The following resource requirement cannot be discharged by the global // invariants because this function is called during genesis. include ResourceRequirement; + include GetReconfigStartTimeRequirement; include staking_config::StakingRewardsConfigRequirement; include aptos_framework::aptos_coin::ExistsAptosCoin; // This function should never abort. @@ -411,10 +435,18 @@ spec aptos_framework::stake { validator_perf.validators[option::spec_borrow(ghost_proposer_idx)].successful_proposals + 1); } + spec next_validator_consensus_infos { + aborts_if false; + include ResourceRequirement; + include GetReconfigStartTimeRequirement; + include features::spec_periodical_reward_rate_decrease_enabled() ==> staking_config::StakingRewardsConfigEnabledRequirement; + } + spec update_stake_pool { // TODO: set because of timeout (property proved) pragma verify_duration_estimate = 120; include ResourceRequirement; + include GetReconfigStartTimeRequirement; include staking_config::StakingRewardsConfigRequirement; include UpdateStakePoolAbortsIf; @@ -451,7 +483,7 @@ spec aptos_framework::stake { post_active_value == stake_pool.active.value + rewards_amount_1 + stake_pool.pending_active.value }; // when current lockup cycle has expired, pending inactive should be fully unlocked and moved into inactive - ensures if (timestamp::spec_now_seconds() >= stake_pool.locked_until_secs) { + ensures if (spec_get_reconfig_start_time_secs() >= stake_pool.locked_until_secs) { post_pending_inactive_value == 0 && post_inactive_value == stake_pool.inactive.value + stake_pool.pending_inactive.value + rewards_amount_2 } else { @@ -527,6 +559,23 @@ spec aptos_framework::stake { include (rewards_amount > 0) ==> coin::CoinAddAbortsIf { amount: amount }; } + spec get_reconfig_start_time_secs(): u64 { + include GetReconfigStartTimeRequirement; + } + + spec schema GetReconfigStartTimeRequirement { + requires exists(@aptos_framework); + include reconfiguration_state::StartTimeSecsRequirement; + } + + spec fun spec_get_reconfig_start_time_secs(): u64 { + if (exists(@aptos_framework)) { + reconfiguration_state::spec_start_time_secs() + } else { + timestamp::spec_now_seconds() + } + } + spec calculate_rewards_amount { pragma opaque; requires rewards_rate <= MAX_REWARDS_RATE; @@ -573,11 +622,16 @@ spec aptos_framework::stake { spec remove_validators { requires chain_status::is_operating(); - let validator_set = global(@aptos_framework); + let post post_validator_set = global(@aptos_framework); let active_validators = validator_set.active_validators; + let post post_active_validators = post_validator_set.active_validators; + let pending_inactive_validators = validator_set.pending_inactive; + let post post_pending_inactive_validators = post_validator_set.pending_inactive; invariant len(active_validators) > 0; + ensures len(active_validators) + len(pending_inactive_validators) == len(post_active_validators) + + len(post_pending_inactive_validators); } spec is_current_epoch_validator { @@ -602,10 +656,12 @@ spec aptos_framework::stake { spec add_stake_with_cap { include ResourceRequirement; let amount = coins.value; + aborts_if reconfiguration_state::spec_is_in_progress(); include AddStakeWithCapAbortsIfAndEnsures { amount }; } spec add_stake { + aborts_if reconfiguration_state::spec_is_in_progress(); include ResourceRequirement; include AddStakeAbortsIfAndEnsures; } @@ -683,6 +739,15 @@ spec aptos_framework::stake { aborts_if !exists(owner); } + spec validator_consensus_infos_from_validator_set(validator_set: &ValidatorSet): vector { + aborts_if false; + invariant spec_validator_indices_are_valid_config(validator_set.active_validators, + len(validator_set.active_validators) + len(validator_set.pending_inactive)); + invariant len(validator_set.pending_inactive) == 0 || + spec_validator_indices_are_valid_config(validator_set.pending_inactive, + len(validator_set.active_validators) + len(validator_set.pending_inactive)); + } + // --------------------------------- // Spec helper functions and schemas // --------------------------------- @@ -755,10 +820,31 @@ spec aptos_framework::stake { spec_has_validator_config(validators[i].addr) } + spec fun spec_validators_are_initialized_addrs(addrs: vector
): bool { + forall i in 0..len(addrs): + spec_has_stake_pool(addrs[i]) && + spec_has_validator_config(addrs[i]) + } + + // A predicate that the validator index of each given validator in-range. spec fun spec_validator_indices_are_valid(validators: vector): bool { + spec_validator_indices_are_valid_addr(validators, spec_validator_index_upper_bound()) && + spec_validator_indices_are_valid_config(validators, spec_validator_index_upper_bound()) + } + + spec fun spec_validator_indices_are_valid_addr(validators: vector, upper_bound: u64): bool { + forall i in 0..len(validators): + global(validators[i].addr).validator_index < upper_bound + } + + spec fun spec_validator_indices_are_valid_config(validators: vector, upper_bound: u64): bool { forall i in 0..len(validators): - global(validators[i].addr).validator_index < spec_validator_index_upper_bound() + validators[i].config.validator_index < upper_bound + } + + spec fun spec_validator_indices_active_pending_inactive(validator_set: ValidatorSet): bool { + len(validator_set.pending_inactive) + len(validator_set.active_validators) == spec_validator_index_upper_bound() } // The upper bound of validator indices. diff --git a/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move b/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move index 68c8937f6a0e5..7001e2bd37328 100644 --- a/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move +++ b/aptos-move/framework/aptos-framework/sources/staking_contract.spec.move @@ -186,9 +186,11 @@ spec aptos_framework::staking_contract { /// Account is not frozen and sufficient to withdraw. /// Staking_contract exists the stacker/operator pair. spec add_stake(staker: &signer, operator: address, amount: u64) { + use aptos_framework::reconfiguration_state; pragma verify_duration_estimate = 120; // TODO: this function times out include stake::ResourceRequirement; + aborts_if reconfiguration_state::spec_is_in_progress(); let staker_address = signer::address_of(staker); include ContractExistsAbortsIf { staker: staker_address }; diff --git a/aptos-move/framework/aptos-framework/sources/storage_gas.spec.move b/aptos-move/framework/aptos-framework/sources/storage_gas.spec.move index 603ee966f0f03..36a08057e8af3 100644 --- a/aptos-move/framework/aptos-framework/sources/storage_gas.spec.move +++ b/aptos-move/framework/aptos-framework/sources/storage_gas.spec.move @@ -88,6 +88,7 @@ spec aptos_framework::storage_gas { /// A non decreasing curve must ensure that next is greater than cur. spec new_gas_curve(min_gas: u64, max_gas: u64, points: vector): GasCurve { + pragma verify_duration_estimate = 120; // TODO: set because of timeout (property proved). include NewGasCurveAbortsIf; include ValidatePointsAbortsIf; /// [high-level-req-3] @@ -139,7 +140,7 @@ spec aptos_framework::storage_gas { /// A non decreasing curve must ensure that next is greater than cur. spec validate_points(points: &vector) { pragma aborts_if_is_strict = false; - pragma verify = false; // TODO: Disabled. Investigate why this fails. + pragma verify_duration_estimate = 120; // TODO: set because of timeout (property proved). pragma opaque; include ValidatePointsAbortsIf; } diff --git a/aptos-move/framework/aptos-framework/sources/transaction_context.spec.move b/aptos-move/framework/aptos-framework/sources/transaction_context.spec.move index aeb022f51e9a7..a175984c99bb5 100644 --- a/aptos-move/framework/aptos-framework/sources/transaction_context.spec.move +++ b/aptos-move/framework/aptos-framework/sources/transaction_context.spec.move @@ -50,6 +50,8 @@ spec aptos_framework::transaction_context { spec fun spec_get_txn_hash(): vector; spec get_transaction_hash(): vector { pragma opaque; + aborts_if [abstract] false; + ensures result == spec_get_txn_hash(); // property 1: Fetching the transaction hash should return a vector with 32 bytes, if the auid feature flag is enabled. /// [high-level-req-1] ensures [abstract] len(result) == 32; diff --git a/aptos-move/framework/aptos-framework/sources/validator_consensus_info.move b/aptos-move/framework/aptos-framework/sources/validator_consensus_info.move new file mode 100644 index 0000000000000..3a2abdd0b7769 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/validator_consensus_info.move @@ -0,0 +1,42 @@ +/// Common type: `ValidatorConsensusInfo`. +module aptos_framework::validator_consensus_info { + /// Information about a validator that participates consensus. + struct ValidatorConsensusInfo has copy, drop, store { + addr: address, + pk_bytes: vector, + voting_power: u64, + } + + /// Create a default `ValidatorConsensusInfo` object. Value may be invalid. Only for place holding prupose. + public fun default(): ValidatorConsensusInfo { + ValidatorConsensusInfo { + addr: @vm, + pk_bytes: vector[], + voting_power: 0, + } + } + + /// Create a `ValidatorConsensusInfo` object. + public fun new(addr: address, pk_bytes: vector, voting_power: u64): ValidatorConsensusInfo { + ValidatorConsensusInfo { + addr, + pk_bytes, + voting_power, + } + } + + /// Get `ValidatorConsensusInfo.addr`. + public fun get_addr(vci: &ValidatorConsensusInfo): address { + vci.addr + } + + /// Get `ValidatorConsensusInfo.pk_bytes`. + public fun get_pk_bytes(vci: &ValidatorConsensusInfo): vector { + vci.pk_bytes + } + + /// Get `ValidatorConsensusInfo.voting_power`. + public fun get_voting_power(vci: &ValidatorConsensusInfo): u64 { + vci.voting_power + } +} diff --git a/aptos-move/framework/aptos-framework/sources/validator_consensus_info.spec.move b/aptos-move/framework/aptos-framework/sources/validator_consensus_info.spec.move new file mode 100644 index 0000000000000..30d9a6e7e7cc0 --- /dev/null +++ b/aptos-move/framework/aptos-framework/sources/validator_consensus_info.spec.move @@ -0,0 +1,5 @@ +spec aptos_framework::validator_consensus_info { + spec module { + pragma verify = true; + } +} diff --git a/aptos-move/framework/aptos-stdlib/doc/any.md b/aptos-move/framework/aptos-stdlib/doc/any.md index 3b86f2a84b841..e8198a8d9bdbd 100644 --- a/aptos-move/framework/aptos-stdlib/doc/any.md +++ b/aptos-move/framework/aptos-stdlib/doc/any.md @@ -185,6 +185,7 @@ Returns the type name of this Any type_name: type_info::type_name<T>(), data: bcs::serialize<T>(x) }; +ensures [abstract] from_bcs::deserializable<T>(result.data);
@@ -200,13 +201,38 @@ Returns the type name of this Any -
aborts_if type_info::type_name<T>() != x.type_name;
-aborts_if !from_bcs::deserializable<T>(x.data);
+
include UnpackAbortsIf<T>;
 ensures result == from_bcs::deserialize<T>(x.data);
 
+ + + + +
schema UnpackAbortsIf<T> {
+    x: Any;
+    aborts_if type_info::type_name<T>() != x.type_name;
+    aborts_if !from_bcs::deserializable<T>(x.data);
+}
+
+ + + + + + + +
schema UnpackRequirement<T> {
+    x: Any;
+    requires type_info::type_name<T>() == x.type_name;
+    requires from_bcs::deserializable<T>(x.data);
+}
+
+ + + ### Function `type_name` diff --git a/aptos-move/framework/aptos-stdlib/doc/copyable_any.md b/aptos-move/framework/aptos-stdlib/doc/copyable_any.md index fe4b5192f454b..7ac120437e652 100644 --- a/aptos-move/framework/aptos-stdlib/doc/copyable_any.md +++ b/aptos-move/framework/aptos-stdlib/doc/copyable_any.md @@ -171,10 +171,12 @@ Returns the type name of this Any
aborts_if false;
+pragma opaque;
 ensures result == Any {
     type_name: type_info::type_name<T>(),
     data: bcs::serialize<T>(x)
 };
+ensures [abstract] from_bcs::deserializable<T>(result.data);
 
@@ -190,13 +192,25 @@ Returns the type name of this Any -
aborts_if type_info::type_name<T>() != x.type_name;
-aborts_if !from_bcs::deserializable<T>(x.data);
+
include UnpackAbortsIf<T>;
 ensures result == from_bcs::deserialize<T>(x.data);
 
+ + + + +
schema UnpackAbortsIf<T> {
+    x: Any;
+    aborts_if type_info::type_name<T>() != x.type_name;
+    aborts_if !from_bcs::deserializable<T>(x.data);
+}
+
+ + + ### Function `type_name` diff --git a/aptos-move/framework/aptos-stdlib/doc/simple_map.md b/aptos-move/framework/aptos-stdlib/doc/simple_map.md index 844de13914d3c..ba98d23617a26 100644 --- a/aptos-move/framework/aptos-stdlib/doc/simple_map.md +++ b/aptos-move/framework/aptos-stdlib/doc/simple_map.md @@ -734,6 +734,7 @@ Remove a key/value pair from the map. The key must exist.
pragma intrinsic;
 pragma opaque;
+aborts_if [abstract] false;
 ensures [abstract] spec_len(result) == 0;
 ensures [abstract] forall k: Key: !spec_contains_key(result, k);
 
@@ -753,6 +754,7 @@ Remove a key/value pair from the map. The key must exist.
pragma intrinsic;
 pragma opaque;
+aborts_if [abstract] false;
 ensures [abstract] spec_len(result) == len(keys);
 ensures [abstract] forall k: Key: spec_contains_key(result, k) <==> vector::spec_contains(keys, k);
 ensures [abstract] forall i in 0..len(keys):
@@ -887,6 +889,7 @@ Remove a key/value pair from the map. The key must exist.
 
 
pragma intrinsic;
 pragma opaque;
+aborts_if [abstract] false;
 ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_1);
 ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_2);
 ensures [abstract] spec_contains_key(map, key);
diff --git a/aptos-move/framework/aptos-stdlib/sources/any.spec.move b/aptos-move/framework/aptos-stdlib/sources/any.spec.move
index 5a09041eef17a..2e55009e4bed3 100644
--- a/aptos-move/framework/aptos-stdlib/sources/any.spec.move
+++ b/aptos-move/framework/aptos-stdlib/sources/any.spec.move
@@ -6,18 +6,33 @@ spec aptos_std::any {
 
     spec pack(x: T): Any {
         use std::bcs;
+        use aptos_std::from_bcs;
         aborts_if false;
         ensures result == Any {
             type_name: type_info::type_name(),
             data: bcs::serialize(x)
         };
+        ensures [abstract] from_bcs::deserializable(result.data);
     }
 
     spec unpack(x: Any): T {
         use aptos_std::from_bcs;
+        include UnpackAbortsIf;
+        ensures result == from_bcs::deserialize(x.data);
+    }
+
+    spec schema UnpackAbortsIf {
+        use aptos_std::from_bcs;
+        x: Any;
         aborts_if type_info::type_name() != x.type_name;
         aborts_if !from_bcs::deserializable(x.data);
-        ensures result == from_bcs::deserialize(x.data);
+    }
+
+    spec schema UnpackRequirement {
+        use aptos_std::from_bcs;
+        x: Any;
+        requires type_info::type_name() == x.type_name;
+        requires from_bcs::deserializable(x.data);
     }
 
     spec type_name(x: &Any): &String {
diff --git a/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move b/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move
index 39ac330cccb44..d1d64a81a4fac 100644
--- a/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move
+++ b/aptos-move/framework/aptos-stdlib/sources/copyable_any.spec.move
@@ -6,18 +6,27 @@ spec aptos_std::copyable_any {
 
     spec pack(x: T): Any {
         use std::bcs;
+        use aptos_std::from_bcs;
         aborts_if false;
+        pragma opaque;
         ensures result == Any {
             type_name: type_info::type_name(),
             data: bcs::serialize(x)
         };
+        ensures [abstract] from_bcs::deserializable(result.data);
     }
 
     spec unpack(x: Any): T {
         use aptos_std::from_bcs;
+        include UnpackAbortsIf;
+        ensures result == from_bcs::deserialize(x.data);
+    }
+
+    spec schema UnpackAbortsIf {
+        use aptos_std::from_bcs;
+        x: Any;
         aborts_if type_info::type_name() != x.type_name;
         aborts_if !from_bcs::deserializable(x.data);
-        ensures result == from_bcs::deserialize(x.data);
     }
 
     spec type_name(x: &Any): &String {
diff --git a/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move b/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move
index 9071fc043610c..35258eb37532d 100644
--- a/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move
+++ b/aptos-move/framework/aptos-stdlib/sources/simple_map.spec.move
@@ -71,6 +71,7 @@ spec aptos_std::simple_map {
     spec new(): SimpleMap {
         pragma intrinsic;
         pragma opaque;
+        aborts_if [abstract] false;
         ensures [abstract] spec_len(result) == 0;
         ensures [abstract] forall k: Key: !spec_contains_key(result, k);
     }
@@ -81,6 +82,7 @@ spec aptos_std::simple_map {
     ): SimpleMap {
         pragma intrinsic;
         pragma opaque;
+        aborts_if [abstract] false;
         ensures [abstract] spec_len(result) == len(keys);
         ensures [abstract] forall k: Key: spec_contains_key(result, k) <==> vector::spec_contains(keys, k);
         ensures [abstract] forall i in 0..len(keys):
@@ -104,6 +106,7 @@ spec aptos_std::simple_map {
         ): (std::option::Option, std::option::Option) {
         pragma intrinsic;
         pragma opaque;
+        aborts_if [abstract] false;
         ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_1);
         ensures [abstract] !spec_contains_key(old(map), key) ==> option::is_none(result_2);
         ensures [abstract] spec_contains_key(map, key);
diff --git a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs
index 045a27c1a664a..c55f002469df4 100644
--- a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs
+++ b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs
@@ -814,8 +814,11 @@ pub enum EntryFunctionCall {
         new_voter: AccountAddress,
     },
 
-    /// Updates the major version to a larger version.
-    /// This can be called by on chain governance.
+    /// Deprecated by `set_for_next_epoch()`.
+    ///
+    /// WARNING: calling this while randomness is enabled will trigger a new epoch without randomness!
+    ///
+    /// TODO: update all the tests that reference this function, then disable this function.
     VersionSetVersion {
         major: u64,
     },
@@ -3748,8 +3751,11 @@ pub fn staking_proxy_set_voter(
     ))
 }
 
-/// Updates the major version to a larger version.
-/// This can be called by on chain governance.
+/// Deprecated by `set_for_next_epoch()`.
+///
+/// WARNING: calling this while randomness is enabled will trigger a new epoch without randomness!
+///
+/// TODO: update all the tests that reference this function, then disable this function.
 pub fn version_set_version(major: u64) -> TransactionPayload {
     TransactionPayload::EntryFunction(EntryFunction::new(
         ModuleId::new(
diff --git a/aptos-move/framework/move-stdlib/Move.toml b/aptos-move/framework/move-stdlib/Move.toml
index 15b23a4e45bff..857ec0edc48f5 100644
--- a/aptos-move/framework/move-stdlib/Move.toml
+++ b/aptos-move/framework/move-stdlib/Move.toml
@@ -3,4 +3,5 @@ name = "MoveStdlib"
 version = "1.5.0"
 
 [addresses]
+vm = "0x0"
 std = "0x1"
diff --git a/aptos-move/framework/move-stdlib/doc/features.md b/aptos-move/framework/move-stdlib/doc/features.md
index 182e470ff503c..edd1e4f4e3eeb 100644
--- a/aptos-move/framework/move-stdlib/doc/features.md
+++ b/aptos-move/framework/move-stdlib/doc/features.md
@@ -30,6 +30,7 @@ return true.
 
 
 -  [Resource `Features`](#0x1_features_Features)
+-  [Resource `PendingFeatures`](#0x1_features_PendingFeatures)
 -  [Constants](#@Constants_0)
 -  [Function `code_dependency_check_enabled`](#0x1_features_code_dependency_check_enabled)
 -  [Function `treat_friend_as_private`](#0x1_features_treat_friend_as_private)
@@ -86,6 +87,8 @@ return true.
 -  [Function `commission_change_delegation_pool_enabled`](#0x1_features_commission_change_delegation_pool_enabled)
 -  [Function `get_bn254_strutures_feature`](#0x1_features_get_bn254_strutures_feature)
 -  [Function `bn254_structures_enabled`](#0x1_features_bn254_structures_enabled)
+-  [Function `get_reconfigure_with_dkg_feature`](#0x1_features_get_reconfigure_with_dkg_feature)
+-  [Function `reconfigure_with_dkg_enabled`](#0x1_features_reconfigure_with_dkg_enabled)
 -  [Function `get_oidb_feature`](#0x1_features_get_oidb_feature)
 -  [Function `oidb_feature_enabled`](#0x1_features_oidb_feature_enabled)
 -  [Function `get_oidb_zkless_feature`](#0x1_features_get_oidb_zkless_feature)
@@ -98,22 +101,31 @@ return true.
 -  [Function `get_max_object_nesting_check_feature`](#0x1_features_get_max_object_nesting_check_feature)
 -  [Function `max_object_nesting_check_enabled`](#0x1_features_max_object_nesting_check_enabled)
 -  [Function `change_feature_flags`](#0x1_features_change_feature_flags)
+-  [Function `change_feature_flags_for_next_epoch`](#0x1_features_change_feature_flags_for_next_epoch)
+-  [Function `on_new_epoch`](#0x1_features_on_new_epoch)
 -  [Function `is_enabled`](#0x1_features_is_enabled)
 -  [Function `set`](#0x1_features_set)
 -  [Function `contains`](#0x1_features_contains)
+-  [Function `apply_diff`](#0x1_features_apply_diff)
+-  [Function `ensure_vm_or_framework_signer`](#0x1_features_ensure_vm_or_framework_signer)
 -  [Specification](#@Specification_1)
     -  [Resource `Features`](#@Specification_1_Features)
+    -  [Resource `PendingFeatures`](#@Specification_1_PendingFeatures)
     -  [Function `periodical_reward_rate_decrease_enabled`](#@Specification_1_periodical_reward_rate_decrease_enabled)
     -  [Function `partial_governance_voting_enabled`](#@Specification_1_partial_governance_voting_enabled)
     -  [Function `module_event_enabled`](#@Specification_1_module_event_enabled)
     -  [Function `change_feature_flags`](#@Specification_1_change_feature_flags)
+    -  [Function `change_feature_flags_for_next_epoch`](#@Specification_1_change_feature_flags_for_next_epoch)
+    -  [Function `on_new_epoch`](#@Specification_1_on_new_epoch)
     -  [Function `is_enabled`](#@Specification_1_is_enabled)
     -  [Function `set`](#@Specification_1_set)
     -  [Function `contains`](#@Specification_1_contains)
+    -  [Function `apply_diff`](#@Specification_1_apply_diff)
 
 
 
use 0x1::error;
 use 0x1::signer;
+use 0x1::vector;
 
@@ -130,6 +142,35 @@ The enabled features, represented by a bitset stored on chain. +
+Fields + + +
+
+features: vector<u8> +
+
+ +
+
+ + +
+ + + +## Resource `PendingFeatures` + +This resource holds the feature vec updates received in the current epoch. +On epoch change, the updates take effect and this buffer is cleared. + + +
struct PendingFeatures has key
+
+ + +
Fields @@ -298,7 +339,7 @@ to create higher throughput concurrent variants. Lifetime: transient -
const CONCURRENT_FUNGIBLE_ASSETS: u64 = 49;
+
const CONCURRENT_FUNGIBLE_ASSETS: u64 = 50;
 
@@ -531,6 +572,18 @@ Lifetime: transient + + +The on-chain randomness feature. + +Lifetime: transient + + +
const RECONFIGURE_WITH_DKG: u64 = 45;
+
+ + + Whether resource groups are enabled. @@ -1942,6 +1995,52 @@ Lifetime: transient +
+ + + +## Function `get_reconfigure_with_dkg_feature` + + + +
public fun get_reconfigure_with_dkg_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_reconfigure_with_dkg_feature(): u64 { RECONFIGURE_WITH_DKG }
+
+ + + +
+ + + +## Function `reconfigure_with_dkg_enabled` + + + +
public fun reconfigure_with_dkg_enabled(): bool
+
+ + + +
+Implementation + + +
public fun reconfigure_with_dkg_enabled(): bool acquires Features {
+    is_enabled(RECONFIGURE_WITH_DKG)
+}
+
+ + +
@@ -2233,6 +2332,85 @@ Function to enable and disable features. Can only be called by a signer of @std. + + + + +## Function `change_feature_flags_for_next_epoch` + +Enable and disable features *for the next epoch*. + +NOTE: when it takes effects depend on feature RECONFIGURE_WITH_DKG. +See aptos_framework::aptos_governance::reconfigure() for more details. + +Can only be called by a signer of @std. + + +
public fun change_feature_flags_for_next_epoch(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
public fun change_feature_flags_for_next_epoch(framework: &signer, enable: vector<u64>, disable: vector<u64>) acquires PendingFeatures, Features {
+    assert!(signer::address_of(framework) == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
+
+    // Figure out the baseline feature vec that the diff will be applied to.
+    let new_feature_vec = if (exists<PendingFeatures>(@std)) {
+        // If there is a buffered feature vec, use it as the baseline.
+        let PendingFeatures { features } = move_from<PendingFeatures>(@std);
+        features
+    } else if (exists<Features>(@std)) {
+        // Otherwise, use the currently effective feature flag vec as the baseline, if it exists.
+        borrow_global<Features>(@std).features
+    } else {
+        // Otherwise, use an empty feature vec.
+        vector[]
+    };
+
+    // Apply the diff and save it to the buffer.
+    apply_diff(&mut new_feature_vec, enable, disable);
+    move_to(framework, PendingFeatures { features: new_feature_vec });
+}
+
+ + + +
+ + + +## Function `on_new_epoch` + +Apply all the pending feature flag changes. Should only be used at the end of a reconfiguration with DKG. + +While the scope is public, it can only be usd in system transactions like block_prologue and governance proposals, +who have permission to set the flag that's checked in extract(). + + +
public fun on_new_epoch(vm_or_framework: &signer)
+
+ + + +
+Implementation + + +
public fun on_new_epoch(vm_or_framework: &signer) acquires Features, PendingFeatures {
+    ensure_vm_or_framework_signer(vm_or_framework);
+    if (exists<PendingFeatures>(@std)) {
+        let PendingFeatures { features } = move_from<PendingFeatures>(@std);
+        borrow_global_mut<Features>(@std).features = features;
+    }
+}
+
+ + +
@@ -2321,6 +2499,60 @@ Helper to check whether a feature flag is enabled. + + + + +## Function `apply_diff` + + + +
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>)
+
+ + + +
+Implementation + + +
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>) {
+    vector::for_each(enable, |feature| {
+        set(features, feature, true);
+    });
+    vector::for_each(disable, |feature| {
+        set(features, feature, false);
+    });
+}
+
+ + + +
+ + + +## Function `ensure_vm_or_framework_signer` + + + +
fun ensure_vm_or_framework_signer(account: &signer)
+
+ + + +
+Implementation + + +
fun ensure_vm_or_framework_signer(account: &signer) {
+    let addr = signer::address_of(account);
+    assert!(addr == @std || addr == @vm, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED));
+}
+
+ + +
@@ -2354,6 +2586,32 @@ Helper to check whether a feature flag is enabled. + + +### Resource `PendingFeatures` + + +
struct PendingFeatures has key
+
+ + + +
+
+features: vector<u8> +
+
+ +
+
+ + + +
pragma bv=b"0";
+
+ + + ### Function `periodical_reward_rate_decrease_enabled` @@ -2437,6 +2695,55 @@ Helper to check whether a feature flag is enabled. + + +### Function `change_feature_flags_for_next_epoch` + + +
public fun change_feature_flags_for_next_epoch(framework: &signer, enable: vector<u64>, disable: vector<u64>)
+
+ + + + +
aborts_if signer::address_of(framework) != @std;
+
+ + + + + + + +
fun spec_contains(features: vector<u8>, feature: u64): bool {
+   ((int2bv((((1 as u8) << ((feature % (8 as u64)) as u64)) as u8)) as u8) & features[feature/8] as u8) > (0 as u8)
+       && (feature / 8) < len(features)
+}
+
+ + + + + +### Function `on_new_epoch` + + +
public fun on_new_epoch(vm_or_framework: &signer)
+
+ + + + +
let addr = signer::address_of(vm_or_framework);
+aborts_if addr != @std && addr != @vm;
+aborts_if exists<PendingFeatures>(@std) && !exists<Features>(@std);
+let features_pending = global<PendingFeatures>(@std).features;
+let post features_std = global<Features>(@std).features;
+ensures exists<PendingFeatures>(@std) ==> features_std == features_pending;
+
+ + + ### Function `is_enabled` @@ -2546,14 +2853,22 @@ Helper to check whether a feature flag is enabled. + - +### Function `apply_diff` -
fun spec_contains(features: vector<u8>, feature: u64): bool {
-   ((int2bv((((1 as u8) << ((feature % (8 as u64)) as u64)) as u8)) as u8) & features[feature/8] as u8) > (0 as u8)
-       && (feature / 8) < len(features)
-}
+
fun apply_diff(features: &mut vector<u8>, enable: vector<u64>, disable: vector<u64>)
+
+ + + + +
aborts_if [abstract] false;
+ensures [abstract] forall i in disable: !spec_contains(features, i);
+ensures [abstract] forall i in enable: !vector::spec_contains(disable, i)
+    ==> spec_contains(features, i);
+pragma opaque;
 
diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.move b/aptos-move/framework/move-stdlib/sources/configs/features.move index 127ced208c7f4..f13294c38662d 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.move @@ -347,6 +347,15 @@ module std::features { is_enabled(BN254_STRUCTURES) } + /// The on-chain randomness feature. + /// + /// Lifetime: transient + const RECONFIGURE_WITH_DKG: u64 = 45; + public fun get_reconfigure_with_dkg_feature(): u64 { RECONFIGURE_WITH_DKG } + public fun reconfigure_with_dkg_enabled(): bool acquires Features { + is_enabled(RECONFIGURE_WITH_DKG) + } + /// Whether the OIDB feature is enabled, possibly with the ZK-less verification mode. /// /// Lifetime: transient @@ -383,7 +392,7 @@ module std::features { /// Whether enable Fungible Asset creation /// to create higher throughput concurrent variants. /// Lifetime: transient - const CONCURRENT_FUNGIBLE_ASSETS: u64 = 49; + const CONCURRENT_FUNGIBLE_ASSETS: u64 = 50; public fun get_concurrent_fungible_assets_feature(): u64 { CONCURRENT_FUNGIBLE_ASSETS } @@ -419,6 +428,12 @@ module std::features { features: vector, } + /// This resource holds the feature vec updates received in the current epoch. + /// On epoch change, the updates take effect and this buffer is cleared. + struct PendingFeatures has key { + features: vector, + } + /// Function to enable and disable features. Can only be called by a signer of @std. public fun change_feature_flags(framework: &signer, enable: vector, disable: vector) acquires Features { @@ -435,6 +450,45 @@ module std::features { }); } + /// Enable and disable features *for the next epoch*. + /// + /// NOTE: when it takes effects depend on feature `RECONFIGURE_WITH_DKG`. + /// See `aptos_framework::aptos_governance::reconfigure()` for more details. + /// + /// Can only be called by a signer of @std. + public fun change_feature_flags_for_next_epoch(framework: &signer, enable: vector, disable: vector) acquires PendingFeatures, Features { + assert!(signer::address_of(framework) == @std, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED)); + + // Figure out the baseline feature vec that the diff will be applied to. + let new_feature_vec = if (exists(@std)) { + // If there is a buffered feature vec, use it as the baseline. + let PendingFeatures { features } = move_from(@std); + features + } else if (exists(@std)) { + // Otherwise, use the currently effective feature flag vec as the baseline, if it exists. + borrow_global(@std).features + } else { + // Otherwise, use an empty feature vec. + vector[] + }; + + // Apply the diff and save it to the buffer. + apply_diff(&mut new_feature_vec, enable, disable); + move_to(framework, PendingFeatures { features: new_feature_vec }); + } + + /// Apply all the pending feature flag changes. Should only be used at the end of a reconfiguration with DKG. + /// + /// While the scope is public, it can only be usd in system transactions like `block_prologue` and governance proposals, + /// who have permission to set the flag that's checked in `extract()`. + public fun on_new_epoch(vm_or_framework: &signer) acquires Features, PendingFeatures { + ensure_vm_or_framework_signer(vm_or_framework); + if (exists(@std)) { + let PendingFeatures { features } = move_from(@std); + borrow_global_mut(@std).features = features; + } + } + #[view] /// Check whether the feature is enabled. public fun is_enabled(feature: u64): bool acquires Features { @@ -463,6 +517,20 @@ module std::features { byte_index < vector::length(features) && (*vector::borrow(features, byte_index) & bit_mask) != 0 } + fun apply_diff(features: &mut vector, enable: vector, disable: vector) { + vector::for_each(enable, |feature| { + set(features, feature, true); + }); + vector::for_each(disable, |feature| { + set(features, feature, false); + }); + } + + fun ensure_vm_or_framework_signer(account: &signer) { + let addr = signer::address_of(account); + assert!(addr == @std || addr == @vm, error::permission_denied(EFRAMEWORK_SIGNER_NEEDED)); + } + #[test] fun test_feature_sets() { let features = vector[]; diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.spec.move b/aptos-move/framework/move-stdlib/sources/configs/features.spec.move index 488215fb6df23..dc0c10a18ba04 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.spec.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.spec.move @@ -4,6 +4,10 @@ spec std::features { pragma bv=b"0"; } + spec PendingFeatures { + pragma bv=b"0"; + } + spec set(features: &mut vector, feature: u64, include: bool) { pragma bv=b"0"; aborts_if false; @@ -11,12 +15,25 @@ spec std::features { ensures include == spec_contains(features, feature); } + + spec apply_diff(features: &mut vector, enable: vector, disable: vector) { + aborts_if [abstract] false; // TODO(#12011) + ensures [abstract] forall i in disable: !spec_contains(features, i); + ensures [abstract] forall i in enable: !vector::spec_contains(disable, i) + ==> spec_contains(features, i); + pragma opaque; + } + spec contains(features: &vector, feature: u64): bool { pragma bv=b"0"; aborts_if false; ensures result == spec_contains(features, feature); } + spec change_feature_flags_for_next_epoch(framework: &signer, enable: vector, disable: vector) { + aborts_if signer::address_of(framework) != @std; + // TODO(tengzhang): add functional spec + } spec fun spec_contains(features: vector, feature: u64): bool { ((int2bv((((1 as u8) << ((feature % (8 as u64)) as u64)) as u8)) as u8) & features[feature/8] as u8) > (0 as u8) @@ -74,4 +91,13 @@ spec std::features { aborts_if [abstract] false; ensures [abstract] result == spec_module_event_enabled(); } + + spec on_new_epoch(vm_or_framework: &signer) { + let addr = signer::address_of(vm_or_framework); + aborts_if addr != @std && addr != @vm; + aborts_if exists(@std) && !exists(@std); + let features_pending = global(@std).features; + let post features_std = global(@std).features; + ensures exists(@std) ==> features_std == features_pending; + } } diff --git a/aptos-move/framework/src/aptos.rs b/aptos-move/framework/src/aptos.rs index a78b0e7bbda4f..b27745c8eb289 100644 --- a/aptos-move/framework/src/aptos.rs +++ b/aptos-move/framework/src/aptos.rs @@ -187,6 +187,7 @@ static NAMED_ADDRESSES: Lazy> = Lazy::new(|| result.insert("aptos_token".to_owned(), three); result.insert("aptos_token_objects".to_owned(), four); result.insert("core_resources".to_owned(), resources); + result.insert("vm".to_owned(), zero); result.insert("vm_reserved".to_owned(), zero); result }); diff --git a/aptos-move/framework/src/natives/consensus_config.rs b/aptos-move/framework/src/natives/consensus_config.rs new file mode 100644 index 0000000000000..0d15f084b7f3d --- /dev/null +++ b/aptos-move/framework/src/natives/consensus_config.rs @@ -0,0 +1,31 @@ +// Copyright © Aptos Foundation + +use aptos_native_interface::{ + safely_pop_arg, RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeResult, +}; +use aptos_types::on_chain_config::OnChainConsensusConfig; +use move_vm_runtime::native_functions::NativeFunction; +use move_vm_types::{loaded_data::runtime_types::Type, values::Value}; +use smallvec::{smallvec, SmallVec}; +use std::collections::VecDeque; + +pub fn validator_txn_enabled( + _context: &mut SafeNativeContext, + _ty_args: Vec, + mut args: VecDeque, +) -> SafeNativeResult> { + let config_bytes = safely_pop_arg!(args, Vec); + let config = bcs::from_bytes::(&config_bytes).unwrap_or_default(); + Ok(smallvec![Value::bool(config.is_vtxn_enabled())]) +} + +pub fn make_all( + builder: &SafeNativeBuilder, +) -> impl Iterator + '_ { + let natives = vec![( + "validator_txn_enabled_internal", + validator_txn_enabled as RawSafeNative, + )]; + + builder.make_named_natives(natives) +} diff --git a/aptos-move/framework/src/natives/mod.rs b/aptos-move/framework/src/natives/mod.rs index 408b9f660c11d..9f5cdf25b6cab 100644 --- a/aptos-move/framework/src/natives/mod.rs +++ b/aptos-move/framework/src/natives/mod.rs @@ -5,6 +5,7 @@ pub mod account; pub mod aggregator_natives; pub mod code; +pub mod consensus_config; pub mod create_signer; pub mod cryptography; pub mod debug; @@ -12,6 +13,7 @@ pub mod event; pub mod hash; pub mod object; pub mod object_code_deployment; +pub mod randomness; pub mod state_storage; pub mod string_utils; pub mod transaction_context; @@ -62,6 +64,7 @@ pub fn all_natives( add_natives_from_module!("type_info", type_info::make_all(builder)); add_natives_from_module!("util", util::make_all(builder)); add_natives_from_module!("from_bcs", util::make_all(builder)); + add_natives_from_module!("randomness", randomness::make_all(builder)); add_natives_from_module!( "ristretto255_bulletproofs", cryptography::bulletproofs::make_all(builder) @@ -79,6 +82,7 @@ pub fn all_natives( add_natives_from_module!("object", object::make_all(builder)); add_natives_from_module!("debug", debug::make_all(builder)); add_natives_from_module!("string_utils", string_utils::make_all(builder)); + add_natives_from_module!("consensus_config", consensus_config::make_all(builder)); make_table_from_iter(framework_addr, natives) } diff --git a/aptos-move/framework/src/natives/randomness.rs b/aptos-move/framework/src/natives/randomness.rs new file mode 100644 index 0000000000000..8c2e94cf86629 --- /dev/null +++ b/aptos-move/framework/src/natives/randomness.rs @@ -0,0 +1,74 @@ +// Copyright © Aptos Foundation + +use crate::natives::transaction_context::NativeTransactionContext; +use aptos_native_interface::{ + RawSafeNative, SafeNativeBuilder, SafeNativeContext, SafeNativeResult, +}; +use better_any::{Tid, TidAble}; +use move_vm_runtime::native_functions::NativeFunction; +use move_vm_types::{loaded_data::runtime_types::Type, values::Value}; +use smallvec::{smallvec, SmallVec}; +use std::collections::VecDeque; + +/// A txn-local counter that increments each time a random 32-byte blob is requested. +#[derive(Tid, Default)] +pub struct RandomnessContext { + txn_local_state: Vec, // 8-byte counter +} + +impl RandomnessContext { + pub fn new() -> Self { + Self { + txn_local_state: vec![0; 8], + } + } + + pub fn increment(&mut self) { + for byte in self.txn_local_state.iter_mut() { + if *byte < 255 { + *byte += 1; + break; + } else { + *byte = 0; + } + } + } +} + +pub fn fetch_and_increment_txn_counter( + context: &mut SafeNativeContext, + _ty_args: Vec, + _args: VecDeque, +) -> SafeNativeResult> { + // TODO: charge gas? + let rand_ctxt = context.extensions_mut().get_mut::(); + let ret = rand_ctxt.txn_local_state.to_vec(); + rand_ctxt.increment(); + Ok(smallvec![Value::vector_u8(ret)]) +} + +pub fn is_safe_call( + context: &mut SafeNativeContext, + _ty_args: Vec, + _args: VecDeque, +) -> SafeNativeResult> { + let ctx = context.extensions().get::(); + // TODO: charge gas? + Ok(smallvec![Value::bool( + ctx.get_is_friend_or_private_entry_func() + )]) +} + +pub fn make_all( + builder: &SafeNativeBuilder, +) -> impl Iterator + '_ { + let natives = vec![ + ( + "fetch_and_increment_txn_counter", + fetch_and_increment_txn_counter as RawSafeNative, + ), + ("is_safe_call", is_safe_call), + ]; + + builder.make_named_natives(natives) +} diff --git a/aptos-move/framework/src/natives/transaction_context.rs b/aptos-move/framework/src/natives/transaction_context.rs index 0323b564fe792..0a4f5edd532bd 100644 --- a/aptos-move/framework/src/natives/transaction_context.rs +++ b/aptos-move/framework/src/natives/transaction_context.rs @@ -23,6 +23,8 @@ pub struct NativeTransactionContext { auid_counter: u64, script_hash: Vec, chain_id: u8, + /// True if the current TXN's payload was an entry function marked as either public(friend) or private + is_friend_or_private_entry_func: bool, } impl NativeTransactionContext { @@ -34,12 +36,21 @@ impl NativeTransactionContext { auid_counter: 0, script_hash, chain_id, + is_friend_or_private_entry_func: false, } } pub fn chain_id(&self) -> u8 { self.chain_id } + + pub fn set_is_friend_or_private_entry_func(&mut self) { + self.is_friend_or_private_entry_func = true; + } + + pub fn get_is_friend_or_private_entry_func(&self) -> bool { + self.is_friend_or_private_entry_func + } } /*************************************************************************************************** diff --git a/aptos-move/move-examples/on_chain_dice/Move.toml b/aptos-move/move-examples/on_chain_dice/Move.toml new file mode 100644 index 0000000000000..32c79fb57df7f --- /dev/null +++ b/aptos-move/move-examples/on_chain_dice/Move.toml @@ -0,0 +1,9 @@ +[package] +name = "OnChainDice" +version = "0.0.0" + +[addresses] +module_owner = "_" + +[dependencies] +AptosFramework = { local = "../../framework/aptos-framework" } diff --git a/aptos-move/move-examples/on_chain_dice/sources/dice.move b/aptos-move/move-examples/on_chain_dice/sources/dice.move new file mode 100644 index 0000000000000..304b685284548 --- /dev/null +++ b/aptos-move/move-examples/on_chain_dice/sources/dice.move @@ -0,0 +1,21 @@ +module module_owner::dice { + use std::signer::address_of; + use std::vector; + use aptos_framework::randomness; + + struct DiceRollHistory has key { + rolls: vector, + } + + entry fun roll(account: signer) acquires DiceRollHistory { + let addr = address_of(&account); + let roll_history = if (exists(addr)) { + move_from(addr) + } else { + DiceRollHistory { rolls: vector[] } + }; + let new_roll = randomness::u64_range(0, 6); + vector::push_back(&mut roll_history.rolls, new_roll); + move_to(&account, roll_history); + } +} diff --git a/aptos-move/vm-genesis/src/lib.rs b/aptos-move/vm-genesis/src/lib.rs index 9ef8a50a1c55a..0d6feae826b5d 100644 --- a/aptos-move/vm-genesis/src/lib.rs +++ b/aptos-move/vm-genesis/src/lib.rs @@ -55,7 +55,12 @@ const GOVERNANCE_MODULE_NAME: &str = "aptos_governance"; const CODE_MODULE_NAME: &str = "code"; const VERSION_MODULE_NAME: &str = "version"; const OIDB_MODULE_NAME: &str = "openid_account"; +#[allow(dead_code)] const JWKS_MODULE_NAME: &str = "jwks"; +const CONFIG_BUFFER_MODULE_NAME: &str = "config_buffer"; +const DKG_MODULE_NAME: &str = "dkg"; +const RANDOMNESS_MODULE_NAME: &str = "randomness"; +const RECONFIGURATION_STATE_MODULE_NAME: &str = "reconfiguration_state"; const NUM_SECONDS_PER_YEAR: u64 = 365 * 24 * 60 * 60; const MICRO_SECONDS_PER_SECOND: u64 = 1_000_000; @@ -76,6 +81,7 @@ pub struct GenesisConfiguration { pub voting_power_increase_limit: u64, pub employee_vesting_start: u64, pub employee_vesting_period_duration: u64, + pub initial_features_override: Option, } pub static GENESIS_KEYPAIR: Lazy<(Ed25519PrivateKey, Ed25519PublicKey)> = Lazy::new(|| { @@ -136,7 +142,13 @@ pub fn encode_aptos_mainnet_genesis_transaction( &execution_config, &gas_schedule, ); - initialize_features(&mut session); + initialize_features( + &mut session, + genesis_config + .initial_features_override + .clone() + .map(Features::into_flag_vec), + ); initialize_aptos_coin(&mut session); initialize_on_chain_governance(&mut session, genesis_config); create_accounts(&mut session, accounts); @@ -245,17 +257,28 @@ pub fn encode_genesis_change_set( execution_config, gas_schedule, ); - initialize_features(&mut session); + initialize_features( + &mut session, + genesis_config + .initial_features_override + .clone() + .map(Features::into_flag_vec), + ); if genesis_config.is_test { initialize_core_resources_and_aptos_coin(&mut session, core_resources_key); } else { initialize_aptos_coin(&mut session); } + initialize_config_buffer(&mut session); + initialize_dkg(&mut session); + initialize_reconfiguration_state(&mut session); + initialize_randomness(&mut session); initialize_on_chain_governance(&mut session, genesis_config); create_and_initialize_validators(&mut session, validators); if genesis_config.is_test { allow_core_resources_to_set_version(&mut session); } + initialize_jwks(&mut session); initialize_oidb(&mut session, chain_id); set_genesis_end(&mut session); @@ -457,15 +480,16 @@ pub fn default_features() -> Vec { // FeatureFlag::RECONFIGURE_WITH_DKG, //TODO: re-enable once randomness is ready. FeatureFlag::OIDB_SIGNATURE, FeatureFlag::OIDB_ZKLESS_SIGNATURE, - FeatureFlag::JWK_CONSENSUS, + // FeatureFlag::JWK_CONSENSUS, FeatureFlag::REFUNDABLE_BYTES, FeatureFlag::OBJECT_CODE_DEPLOYMENT, FeatureFlag::MAX_OBJECT_NESTING_CHECK, ] } -fn initialize_features(session: &mut SessionExt) { - let features: Vec = default_features() +fn initialize_features(session: &mut SessionExt, features_override: Option>) { + let features: Vec = features_override + .unwrap_or_else(default_features) .into_iter() .map(|feature| feature as u64) .collect(); @@ -493,6 +517,57 @@ fn initialize_aptos_coin(session: &mut SessionExt) { ); } +fn initialize_config_buffer(session: &mut SessionExt) { + exec_function( + session, + CONFIG_BUFFER_MODULE_NAME, + "initialize", + vec![], + serialize_values(&vec![MoveValue::Signer(CORE_CODE_ADDRESS)]), + ); +} + +fn initialize_dkg(session: &mut SessionExt) { + exec_function( + session, + DKG_MODULE_NAME, + "initialize", + vec![], + serialize_values(&vec![MoveValue::Signer(CORE_CODE_ADDRESS)]), + ); +} + +fn initialize_randomness(session: &mut SessionExt) { + exec_function( + session, + RANDOMNESS_MODULE_NAME, + "initialize", + vec![], + serialize_values(&vec![MoveValue::Signer(CORE_CODE_ADDRESS)]), + ); +} + +fn initialize_reconfiguration_state(session: &mut SessionExt) { + exec_function( + session, + RECONFIGURATION_STATE_MODULE_NAME, + "initialize", + vec![], + serialize_values(&vec![MoveValue::Signer(CORE_CODE_ADDRESS)]), + ); +} + +#[allow(dead_code)] +fn initialize_jwks(session: &mut SessionExt) { + exec_function( + session, + JWKS_MODULE_NAME, + "initialize", + vec![], + serialize_values(&vec![MoveValue::Signer(CORE_CODE_ADDRESS)]), + ); +} + fn set_genesis_end(session: &mut SessionExt) { exec_function( session, @@ -561,19 +636,6 @@ fn initialize_oidb(session: &mut SessionExt, chain_id: ChainId) { ]), ); } - exec_function( - session, - JWKS_MODULE_NAME, - "upsert_oidc_provider", - vec![], - serialize_values(&vec![ - MoveValue::Signer(CORE_CODE_ADDRESS), - "https://accounts.google.com".to_string().as_move_value(), - "https://accounts.google.com/.well-known/openid-configuration" - .to_string() - .as_move_value(), - ]), - ); } fn create_accounts(session: &mut SessionExt, accounts: &[AccountBalance]) { @@ -885,6 +947,7 @@ pub fn generate_test_genesis( voting_power_increase_limit: 50, employee_vesting_start: 1663456089, employee_vesting_period_duration: 5 * 60, // 5 minutes + initial_features_override: None, }, &OnChainConsensusConfig::default_for_genesis(), &OnChainExecutionConfig::default_for_genesis(), @@ -932,6 +995,7 @@ fn mainnet_genesis_config() -> GenesisConfiguration { voting_power_increase_limit: 30, employee_vesting_start: 1663456089, employee_vesting_period_duration: 5 * 60, // 5 minutes + initial_features_override: None, } } diff --git a/aptos-node/Cargo.toml b/aptos-node/Cargo.toml index ba27106e06be9..b00af35e9c0df 100644 --- a/aptos-node/Cargo.toml +++ b/aptos-node/Cargo.toml @@ -76,14 +76,14 @@ maplit = { workspace = true } num_cpus = { workspace = true } rand = { workspace = true } rayon = { workspace = true } +regex = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +serde_merge = { workspace = true } serde_yaml = { workspace = true } tokio = { workspace = true } tokio-stream = { workspace = true } url = { workspace = true } -regex = { workspace = true } -serde_merge = { workspace = true } [dev-dependencies] claims = { workspace = true } diff --git a/aptos-node/src/lib.rs b/aptos-node/src/lib.rs index ce0a562983da0..877fb81848447 100644 --- a/aptos-node/src/lib.rs +++ b/aptos-node/src/lib.rs @@ -192,7 +192,7 @@ fn load_remote_config( warn!("{:?} is not a dir. using initial config", config_dir); return Ok(None); } - let mut entries = fs::read_dir(config_dir.clone())? + let entries = fs::read_dir(config_dir.clone())? .filter_map(|res| { let Ok(entry) = res else { return None; diff --git a/aptos-node/src/tests.rs b/aptos-node/src/tests.rs index 151bdc4d34327..c7a84d199ac14 100644 --- a/aptos-node/src/tests.rs +++ b/aptos-node/src/tests.rs @@ -1,11 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{create_single_node_test_config, load_remote_config, network}; -use aptos_config::{ - config::{NetworkConfig, NodeConfig, WaypointConfig}, - network_id::NetworkId, -}; +use crate::{create_single_node_test_config, network}; +use aptos_config::config::{NodeConfig, WaypointConfig}; use aptos_event_notifications::EventSubscriptionService; use aptos_infallible::RwLock; use aptos_storage_interface::{DbReader, DbReaderWriter, DbWriter}; diff --git a/config/src/config/identity_config.rs b/config/src/config/identity_config.rs index 0d2142574d834..9fcfd3aec0422 100644 --- a/config/src/config/identity_config.rs +++ b/config/src/config/identity_config.rs @@ -2,14 +2,16 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{config::SecureBackend, keys::ConfigKey}; +use anyhow::anyhow; use aptos_crypto::{ bls12381, ed25519::Ed25519PrivateKey, x25519::{self, PRIVATE_KEY_SIZE}, ValidCryptoMaterial, }; -use aptos_types::account_address::{ - from_identity_public_key, AccountAddress, AccountAddress as PeerId, +use aptos_types::{ + account_address::{from_identity_public_key, AccountAddress, AccountAddress as PeerId}, + dkg::{real_dkg::maybe_dk_from_bls_sk, DKGTrait, DefaultDKG}, }; use serde::{Deserialize, Serialize}; use std::{ @@ -43,6 +45,21 @@ impl IdentityBlob { let mut file = File::open(path)?; Ok(file.write_all(serde_yaml::to_string(self)?.as_bytes())?) } + + pub fn try_into_dkg_dealer_private_key( + self, + ) -> Option<::DealerPrivateKey> { + self.consensus_private_key + } + + pub fn try_into_dkg_new_validator_decrypt_key( + self, + ) -> anyhow::Result<::NewValidatorDecryptKey> { + let consensus_sk = self.consensus_private_key.as_ref().ok_or_else(|| { + anyhow!("try_into_dkg_new_validator_decrypt_key failed with missing consensus key") + })?; + maybe_dk_from_bls_sk(consensus_sk) + } } #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] diff --git a/consensus/Cargo.toml b/consensus/Cargo.toml index c2a11a12ac6ec..78101173323d1 100644 --- a/consensus/Cargo.toml +++ b/consensus/Cargo.toml @@ -23,6 +23,7 @@ aptos-consensus-notifications = { workspace = true } aptos-consensus-types = { workspace = true } aptos-crypto = { workspace = true } aptos-crypto-derive = { workspace = true } +aptos-dkg = { workspace = true } aptos-enum-conversion-derive = { workspace = true } aptos-event-notifications = { workspace = true } aptos-executor = { workspace = true } @@ -76,6 +77,7 @@ serde = { workspace = true } serde_bytes = { workspace = true } serde_json = { workspace = true } serde_yaml = { workspace = true } +sha3 = { workspace = true } strum_macros = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } @@ -91,6 +93,7 @@ aptos-keygen = { workspace = true } aptos-mempool = { workspace = true, features = ["fuzzing"] } aptos-network = { workspace = true, features = ["fuzzing"] } aptos-safety-rules = { workspace = true, features = ["testing"] } +aptos-vm = { workspace = true, features = ["fuzzing"] } aptos-vm-validator = { workspace = true } claims = { workspace = true } move-core-types = { workspace = true } diff --git a/consensus/src/block_storage/tracing.rs b/consensus/src/block_storage/tracing.rs index 1a08f4db118a9..302fc7e336877 100644 --- a/consensus/src/block_storage/tracing.rs +++ b/consensus/src/block_storage/tracing.rs @@ -19,8 +19,7 @@ impl BlockStage { pub const QC_ADDED: &'static str = "qc_added"; pub const QC_AGGREGATED: &'static str = "qc_aggregated"; pub const RAND_ADD_DECISION: &'static str = "rand_add_decision"; - pub const RAND_ADD_SHARE: &'static str = "rand_add_share"; - pub const RAND_AGG_DECISION: &'static str = "rand_agg_decision"; + pub const RAND_ADD_ENOUGH_SHARE: &'static str = "rand_add_enough_share"; pub const RAND_ENTER: &'static str = "rand_enter"; pub const RAND_READY: &'static str = "rand_ready"; pub const ROUND_MANAGER_RECEIVED: &'static str = "round_manager_received"; diff --git a/consensus/src/consensus_provider.rs b/consensus/src/consensus_provider.rs index a60f38c5656f5..610525f37757f 100644 --- a/consensus/src/consensus_provider.rs +++ b/consensus/src/consensus_provider.rs @@ -10,6 +10,7 @@ use crate::{ persistent_liveness_storage::StorageWriteProxy, pipeline::execution_client::ExecutionProxyClient, quorum_store::quorum_store_db::QuorumStoreDB, + rand::rand_gen::storage::db::RandDb, state_computer::ExecutionProxy, transaction_filter::TransactionFilter, txn_notifier::MempoolNotifier, @@ -66,13 +67,16 @@ pub fn start_consensus( aptos_channels::new_unbounded(&counters::PENDING_SELF_MESSAGES); let consensus_network_client = ConsensusNetworkClient::new(network_client); let bounded_executor = BoundedExecutor::new(32, runtime.handle().clone()); + let rand_storage = Arc::new(RandDb::new(node_config.storage.dir())); let execution_client = Arc::new(ExecutionProxyClient::new( + node_config.consensus.clone(), Arc::new(execution_proxy), node_config.validator_network.as_ref().unwrap().peer_id(), self_sender.clone(), consensus_network_client.clone(), bounded_executor.clone(), + rand_storage.clone(), )); let epoch_mgr = EpochManager::new( @@ -89,6 +93,7 @@ pub fn start_consensus( bounded_executor, aptos_time_service::TimeService::real(), vtxn_pool, + rand_storage, ); let (network_task, network_receiver) = NetworkTask::new(network_service_events, self_receiver); diff --git a/consensus/src/counters.rs b/consensus/src/counters.rs index ff68614e3f711..dadbe0219a07b 100644 --- a/consensus/src/counters.rs +++ b/consensus/src/counters.rs @@ -959,3 +959,11 @@ pub static PROPOSED_VTXN_BYTES: Lazy = Lazy::new(|| { ) .unwrap() }); + +pub static RAND_QUEUE_SIZE: Lazy = Lazy::new(|| { + register_int_gauge!( + "aptos_consensus_rand_queue_size", + "Number of randomness-pending blocks." + ) + .unwrap() +}); diff --git a/consensus/src/dag/dag_fetcher.rs b/consensus/src/dag/dag_fetcher.rs index b8642133f9af7..a86caec740b78 100644 --- a/consensus/src/dag/dag_fetcher.rs +++ b/consensus/src/dag/dag_fetcher.rs @@ -2,10 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use super::{ - adapter::{LedgerInfoProvider, TLedgerInfoProvider}, - dag_store::DagStore, - errors::DagFetchError, - DAGRpcResult, + adapter::TLedgerInfoProvider, dag_store::DagStore, errors::DagFetchError, DAGRpcResult, }; use crate::{ dag::{ @@ -373,7 +370,6 @@ impl TDagFetcher for DagFetcher { let bounded_executor = bounded_executor.clone(); async move { let nodes = response.certified_nodes(); - let epoch = epoch_state.epoch; ensure!( stream::iter(nodes.clone()) .concurrent_map(bounded_executor.clone(), move |node| { diff --git a/consensus/src/dag/dag_state_sync.rs b/consensus/src/dag/dag_state_sync.rs index 807b794994adc..b4ae1178181bf 100644 --- a/consensus/src/dag/dag_state_sync.rs +++ b/consensus/src/dag/dag_state_sync.rs @@ -354,7 +354,7 @@ impl SyncModeMessageHandler { dag_message_result: anyhow::Result, epoch: u64, author: Author, - responder: RpcResponder, + _responder: RpcResponder, buffer: &mut Vec, ) -> anyhow::Result> { match dag_message_result { diff --git a/consensus/src/dag/types.rs b/consensus/src/dag/types.rs index 89c0707f2cb7b..b16746ca16708 100644 --- a/consensus/src/dag/types.rs +++ b/consensus/src/dag/types.rs @@ -32,7 +32,6 @@ use aptos_types::{ validator_verifier::ValidatorVerifier, }; use futures_channel::oneshot; -use rayon::iter::IntoParallelRefIterator; use serde::{Deserialize, Serialize}; use std::{ cmp::min, diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index 88b1bfd23c6ba..61fc948868ecb 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -29,7 +29,7 @@ use crate::{ monitor, network::{ IncomingBatchRetrievalRequest, IncomingBlockRetrievalRequest, IncomingDAGRequest, - IncomingRpcRequest, NetworkReceivers, NetworkSender, + IncomingRandGenRequest, IncomingRpcRequest, NetworkReceivers, NetworkSender, }, network_interface::{ConsensusMsg, ConsensusNetworkClient}, payload_client::{ @@ -43,22 +43,31 @@ use crate::{ quorum_store_coordinator::CoordinatorCommand, quorum_store_db::QuorumStoreStorage, }, + rand::rand_gen::{ + storage::interface::RandStorage, + types::{AugmentedData, RandConfig}, + }, recovery_manager::RecoveryManager, round_manager::{RoundManager, UnverifiedEvent, VerifiedEvent}, util::time_service::TimeService, }; -use anyhow::{bail, ensure, Context}; +use anyhow::{anyhow, bail, ensure, Context}; use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_config::config::{ ConsensusConfig, DagConsensusConfig, ExecutionConfig, NodeConfig, QcAggregatorType, - SecureBackend, + SafetyRulesConfig, SecureBackend, }; use aptos_consensus_types::{ common::{Author, Round}, delayed_qc_msg::DelayedQcMsg, epoch_retrieval::EpochRetrievalRequest, - proof_of_store::{BatchInfo, ProofCache}, + proof_of_store::ProofCache, +}; +use aptos_crypto::bls12381; +use aptos_dkg::{ + pvss::{traits::Transcript, Player}, + weighted_vuf::traits::WeightedVUF, }; use aptos_event_notifications::ReconfigNotificationListener; use aptos_global_constants::CONSENSUS_KEY; @@ -70,12 +79,14 @@ use aptos_safety_rules::SafetyRulesManager; use aptos_secure_storage::{KVStorage, Storage}; use aptos_types::{ account_address::AccountAddress, + dkg::{real_dkg::maybe_dk_from_bls_sk, DKGState, DKGTrait, DefaultDKG}, epoch_change::EpochChangeProof, epoch_state::EpochState, on_chain_config::{ - Features, LeaderReputationType, OnChainConfigPayload, OnChainConfigProvider, + FeatureFlag, Features, LeaderReputationType, OnChainConfigPayload, OnChainConfigProvider, OnChainConsensusConfig, OnChainExecutionConfig, ProposerElectionType, ValidatorSet, }, + randomness::{RandKeys, WvufPP, WVUF}, validator_signer::ValidatorSigner, }; use aptos_validator_transaction_pool::VTxnPoolState; @@ -90,6 +101,7 @@ use futures::{ }; use itertools::Itertools; use mini_moka::sync::Cache; +use rand::{prelude::StdRng, thread_rng, SeedableRng}; use std::{ cmp::Ordering, collections::HashMap, @@ -130,6 +142,8 @@ pub struct EpochManager { safety_rules_manager: SafetyRulesManager, vtxn_pool: VTxnPoolState, reconfig_events: ReconfigNotificationListener

, + // channels to rand manager + rand_manager_msg_tx: Option>, // channels to round manager round_manager_tx: Option< aptos_channel::Sender<(Author, Discriminant), (Author, VerifiedEvent)>, @@ -154,6 +168,7 @@ pub struct EpochManager { dag_config: DagConsensusConfig, payload_manager: Arc, proof_cache: ProofCache, + rand_storage: Arc>, } impl EpochManager

{ @@ -171,6 +186,7 @@ impl EpochManager

{ bounded_executor: BoundedExecutor, aptos_time_service: aptos_time_service::TimeService, vtxn_pool: VTxnPoolState, + rand_storage: Arc>, ) -> Self { let author = node_config.validator_network.as_ref().unwrap().peer_id(); let config = node_config.consensus.clone(); @@ -194,6 +210,7 @@ impl EpochManager

{ safety_rules_manager, vtxn_pool, reconfig_events, + rand_manager_msg_tx: None, round_manager_tx: None, round_manager_close_tx: None, buffered_proposal_tx: None, @@ -215,6 +232,7 @@ impl EpochManager

{ .initial_capacity(1_000) .time_to_live(Duration::from_secs(10)) .build(), + rand_storage, } } @@ -582,6 +600,9 @@ impl EpochManager

{ } self.dag_shutdown_tx = None; + // Shutdown the previous rand manager + self.rand_manager_msg_tx = None; + // Shutdown the previous buffer manager, to release the SafetyRule client self.execution_client.end_epoch().await; @@ -713,7 +734,9 @@ impl EpochManager

{ network_sender: Arc, payload_client: Arc, payload_manager: Arc, + rand_config: Option, features: Features, + rand_msg_rx: aptos_channel::Receiver, ) { let epoch = epoch_state.epoch; info!( @@ -759,8 +782,11 @@ impl EpochManager

{ epoch_state.clone(), safety_rules_container.clone(), payload_manager.clone(), + &onchain_consensus_config, &onchain_execution_config, &features, + rand_config, + rand_msg_rx, ) .await; @@ -823,7 +849,7 @@ impl EpochManager

{ onchain_consensus_config, buffered_proposal_tx, self.config.clone(), - features, + features.clone(), true, ); @@ -859,6 +885,99 @@ impl EpochManager

{ ) } + fn try_get_rand_config_for_new_epoch( + &self, + new_epoch_state: &EpochState, + features: &Features, + maybe_dkg_state: anyhow::Result, + consensus_config: &OnChainConsensusConfig, + ) -> Result { + if !consensus_config.is_vtxn_enabled() { + return Err(NoRandomnessReason::VTxnDisabled); + } + if !features.is_enabled(FeatureFlag::RECONFIGURE_WITH_DKG) { + return Err(NoRandomnessReason::FeatureDisabled); + } + let new_epoch = new_epoch_state.epoch; + + let dkg_state = maybe_dkg_state.map_err(NoRandomnessReason::DKGStateResourceMissing)?; + let dkg_session = dkg_state + .last_completed + .ok_or_else(|| NoRandomnessReason::DKGCompletedSessionResourceMissing)?; + if dkg_session.metadata.dealer_epoch + 1 != new_epoch_state.epoch { + return Err(NoRandomnessReason::CompletedSessionTooOld); + } + let dkg_pub_params = DefaultDKG::new_public_params(&dkg_session.metadata); + let my_index = new_epoch_state + .verifier + .address_to_validator_index() + .get(&self.author) + .copied() + .ok_or_else(|| NoRandomnessReason::NotInValidatorSet)?; + + let dkg_decrypt_key = load_dkg_decrypt_key(&self.config.safety_rules) + .ok_or_else(|| NoRandomnessReason::DKGDecryptKeyUnavailable)?; + let transcript = bcs::from_bytes::<::Transcript>( + dkg_session.transcript.as_slice(), + ) + .map_err(NoRandomnessReason::TranscriptDeserializationError)?; + + let vuf_pp = WvufPP::from(&dkg_pub_params.pvss_config.pp); + + // No need to verify the transcript. + + // keys for randomness generation + let (sk, pk) = DefaultDKG::decrypt_secret_share_from_transcript( + &dkg_pub_params, + &transcript, + my_index as u64, + &dkg_decrypt_key, + ) + .map_err(NoRandomnessReason::SecretShareDecryptionFailed)?; + + let pk_shares = (0..new_epoch_state.verifier.len()) + .map(|id| { + transcript.get_public_key_share(&dkg_pub_params.pvss_config.wconfig, &Player { id }) + }) + .collect::>(); + + // Recover existing augmented key pair or generate a new one + let (ask, apk) = if let Some((_, key_pair)) = self + .rand_storage + .get_key_pair_bytes() + .map_err(NoRandomnessReason::RandDbNotAvailable)? + .filter(|(epoch, _)| *epoch == new_epoch) + { + bcs::from_bytes(&key_pair).map_err(NoRandomnessReason::KeyPairDeserializationError)? + } else { + let mut rng = + StdRng::from_rng(thread_rng()).map_err(NoRandomnessReason::RngCreationError)?; + let augmented_key_pair = WVUF::augment_key_pair(&vuf_pp, sk, pk, &mut rng); + self.rand_storage + .save_key_pair_bytes( + new_epoch, + bcs::to_bytes(&augmented_key_pair) + .map_err(NoRandomnessReason::KeyPairSerializationError)?, + ) + .map_err(NoRandomnessReason::KeyPairPersistError)?; + augmented_key_pair + }; + + let keys = RandKeys::new(ask, apk, pk_shares, new_epoch_state.verifier.len()); + + let rand_config = RandConfig::new( + self.author, + new_epoch, + new_epoch_state.verifier.clone(), + vuf_pp, + keys, + dkg_pub_params.pvss_config.wconfig.clone(), + dkg_session.metadata.block_randomness, + ); + + Ok(rand_config) + } + async fn start_new_epoch(&mut self, payload: OnChainConfigPayload

) { let validator_set: ValidatorSet = payload .get() @@ -868,9 +987,12 @@ impl EpochManager

{ verifier: (&validator_set).into(), }); + self.epoch_state = Some(epoch_state.clone()); + let onchain_consensus_config: anyhow::Result = payload.get(); let onchain_execution_config: anyhow::Result = payload.get(); let features = payload.get::(); + let dkg_state = payload.get::(); if let Err(error) = &onchain_consensus_config { error!("Failed to read on-chain consensus config {}", error); @@ -891,10 +1013,30 @@ impl EpochManager

{ .unwrap_or_else(|_| OnChainExecutionConfig::default_if_missing()); let features = features.unwrap_or_default(); + let rand_config = self.try_get_rand_config_for_new_epoch( + &epoch_state, + &features, + dkg_state, + &consensus_config, + ); + info!( + "[Randomness] start_new_epoch: epoch={}, rand_config={:?}, ", + epoch_state.epoch, rand_config + ); // The sk inside has `SlientDebug`. + let rand_config = rand_config.ok(); + let (network_sender, payload_client, payload_manager) = self .initialize_shared_component(&epoch_state, &consensus_config) .await; + let (rand_msg_tx, rand_msg_rx) = aptos_channel::new::( + QueueStyle::FIFO, + 100, + None, + ); + + self.rand_manager_msg_tx = Some(rand_msg_tx); + if consensus_config.is_dag_enabled() { self.start_new_epoch_with_dag( epoch_state, @@ -903,7 +1045,9 @@ impl EpochManager

{ network_sender, payload_client, payload_manager, - features, + rand_config, + &features, + rand_msg_rx, ) .await } else { @@ -914,7 +1058,9 @@ impl EpochManager

{ network_sender, payload_client, payload_manager, - features, + rand_config, + &features, + rand_msg_rx, ) .await } @@ -954,7 +1100,9 @@ impl EpochManager

{ network_sender: NetworkSender, payload_client: Arc, payload_manager: Arc, - features: Features, + rand_config: Option, + features: &Features, + rand_msg_rx: aptos_channel::Receiver, ) { match self.storage.start() { LivenessStorageData::FullRecoveryData(initial_data) => { @@ -967,7 +1115,9 @@ impl EpochManager

{ Arc::new(network_sender), payload_client, payload_manager, - features, + rand_config, + features.clone(), + rand_msg_rx, ) .await }, @@ -992,12 +1142,15 @@ impl EpochManager

{ network_sender: NetworkSender, payload_client: Arc, payload_manager: Arc, - features: Features, + rand_config: Option, + features: &Features, + rand_msg_rx: aptos_channel::Receiver, ) { let epoch = epoch_state.epoch; - - let signer = new_signer_from_storage(self.author, &self.config.safety_rules.backend); - let commit_signer = Arc::new(DagCommitSigner::new(signer)); + let consensus_key = new_consensus_key_from_storage(&self.config.safety_rules.backend) + .expect("unable to get private key"); + let signer = Arc::new(ValidatorSigner::new(self.author, consensus_key)); + let commit_signer = Arc::new(DagCommitSigner::new(signer.clone())); assert!( onchain_consensus_config.decoupled_execution(), @@ -1009,8 +1162,11 @@ impl EpochManager

{ epoch_state.clone(), commit_signer, payload_manager.clone(), + &onchain_consensus_config, &on_chain_execution_config, - &features, + features, + rand_config, + rand_msg_rx, ) .await; @@ -1028,7 +1184,6 @@ impl EpochManager

{ self.storage.aptos_db(), )); - let signer = new_signer_from_storage(self.author, &self.config.safety_rules.backend); let network_sender_arc = Arc::new(network_sender); let bootstrapper = DagBootstrapper::new( @@ -1049,7 +1204,7 @@ impl EpochManager

{ onchain_consensus_config.quorum_store_enabled(), onchain_consensus_config.effective_validator_txn_config(), self.bounded_executor.clone(), - features, + features.clone(), ); let (dag_rpc_tx, dag_rpc_rx) = aptos_channel::new(QueueStyle::FIFO, 10, None); @@ -1337,7 +1492,13 @@ impl EpochManager

{ IncomingRpcRequest::CommitRequest(request) => { self.execution_client.send_commit_msg(peer_id, request) }, - IncomingRpcRequest::RandGenRequest(_) => Ok(()), + IncomingRpcRequest::RandGenRequest(request) => { + if let Some(tx) = &self.rand_manager_msg_tx { + tx.push(peer_id, request) + } else { + bail!("Rand manager not started"); + } + }, } } @@ -1408,15 +1569,69 @@ impl EpochManager

{ } } -#[allow(dead_code)] -fn new_signer_from_storage(author: Author, backend: &SecureBackend) -> Arc { +fn new_consensus_key_from_storage(backend: &SecureBackend) -> anyhow::Result { let storage: Storage = backend.into(); - if let Err(error) = storage.available() { - panic!("Storage is not available: {:?}", error); - } - let private_key = storage + storage + .available() + .map_err(|e| anyhow!("Storage is not available: {e}"))?; + storage .get(CONSENSUS_KEY) .map(|v| v.value) - .expect("Unable to get private key"); - Arc::new(ValidatorSigner::new(author, private_key)) + .map_err(|e| anyhow!("storage get and map err: {e}")) +} + +fn load_dkg_decrypt_key_from_identity_blob( + config: &SafetyRulesConfig, +) -> anyhow::Result<::NewValidatorDecryptKey> { + let identity_blob = config.initial_safety_rules_config.identity_blob()?; + identity_blob.try_into_dkg_new_validator_decrypt_key() +} + +fn load_dkg_decrypt_key_from_secure_storage( + config: &SafetyRulesConfig, +) -> anyhow::Result<::NewValidatorDecryptKey> { + let consensus_key = new_consensus_key_from_storage(&config.backend)?; + maybe_dk_from_bls_sk(&consensus_key) +} + +fn load_dkg_decrypt_key( + config: &SafetyRulesConfig, +) -> Option<::NewValidatorDecryptKey> { + match load_dkg_decrypt_key_from_secure_storage(config) { + Ok(dk) => { + return Some(dk); + }, + Err(e) => { + warn!("{e}"); + }, + } + + match load_dkg_decrypt_key_from_identity_blob(config) { + Ok(dk) => { + return Some(dk); + }, + Err(e) => { + warn!("{e}"); + }, + } + + None +} + +#[derive(Debug)] +enum NoRandomnessReason { + VTxnDisabled, + FeatureDisabled, + DKGStateResourceMissing(anyhow::Error), + DKGCompletedSessionResourceMissing, + CompletedSessionTooOld, + NotInValidatorSet, + DKGDecryptKeyUnavailable, + TranscriptDeserializationError(bcs::Error), + SecretShareDecryptionFailed(anyhow::Error), + RngCreationError(rand::Error), + RandDbNotAvailable(anyhow::Error), + KeyPairDeserializationError(bcs::Error), + KeyPairSerializationError(bcs::Error), + KeyPairPersistError(anyhow::Error), } diff --git a/consensus/src/logging.rs b/consensus/src/logging.rs index 4040b0ef5a23d..0e04adb3343d6 100644 --- a/consensus/src/logging.rs +++ b/consensus/src/logging.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_consensus_types::common::Author; +use aptos_crypto::HashValue; use aptos_logger::Schema; use aptos_types::block_info::Round; use serde::Serialize; @@ -10,9 +11,11 @@ use serde::Serialize; #[derive(Schema)] pub struct LogSchema { event: LogEvent, + author: Option, remote_peer: Option, epoch: Option, round: Option, + id: Option, } #[derive(Serialize)] @@ -40,15 +43,25 @@ pub enum LogEvent { Timeout, Vote, VoteNIL, + // log events related to randomness generation + BroadcastRandShare, + ReceiveProactiveRandShare, + ReceiveReactiveRandShare, + BroadcastAugData, + ReceiveAugData, + BroadcastCertifiedAugData, + ReceiveCertifiedAugData, } impl LogSchema { pub fn new(event: LogEvent) -> Self { Self { event, + author: None, remote_peer: None, epoch: None, round: None, + id: None, } } } diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 498ff85452244..349ce88511b45 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -14,7 +14,7 @@ use crate::{ network_interface::{ConsensusMsg, ConsensusNetworkClient, RPC}, pipeline::commit_reliable_broadcast::CommitMessage, quorum_store::types::{Batch, BatchMsg, BatchRequest, BatchResponse}, - rand::rand_gen::RandGenMessage, + rand::rand_gen::network_messages::RandGenMessage, }; use anyhow::{anyhow, bail, ensure}; use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; diff --git a/consensus/src/network_interface.rs b/consensus/src/network_interface.rs index 3b91bfcf09679..16f7d06d3587d 100644 --- a/consensus/src/network_interface.rs +++ b/consensus/src/network_interface.rs @@ -8,7 +8,7 @@ use crate::{ dag::DAGNetworkMessage, pipeline, quorum_store::types::{Batch, BatchMsg, BatchRequest, BatchResponse}, - rand::rand_gen::RandGenMessage, + rand::rand_gen::network_messages::RandGenMessage, }; use aptos_config::network_id::{NetworkId, PeerNetworkId}; use aptos_consensus_types::{ diff --git a/consensus/src/pipeline/buffer_manager.rs b/consensus/src/pipeline/buffer_manager.rs index f3f136c47a992..021075fedda24 100644 --- a/consensus/src/pipeline/buffer_manager.rs +++ b/consensus/src/pipeline/buffer_manager.rs @@ -534,7 +534,7 @@ impl BufferManager { fn process_commit_message(&mut self, commit_msg: IncomingCommitRequest) -> Option { let IncomingCommitRequest { req, - author, + author: _, protocol, response_sender, } = commit_msg; diff --git a/consensus/src/pipeline/errors.rs b/consensus/src/pipeline/errors.rs index cdb8396bb00a7..746228438eceb 100644 --- a/consensus/src/pipeline/errors.rs +++ b/consensus/src/pipeline/errors.rs @@ -15,4 +15,6 @@ pub enum Error { VerificationError, #[error("Reset host dropped")] ResetDropped, + #[error("Rand Reset host dropped")] + RandResetDropped, } diff --git a/consensus/src/pipeline/execution_client.rs b/consensus/src/pipeline/execution_client.rs index fdeacae3dae03..fcc81ab66441a 100644 --- a/consensus/src/pipeline/execution_client.rs +++ b/consensus/src/pipeline/execution_client.rs @@ -5,7 +5,7 @@ use crate::{ counters, error::StateSyncError, - network::{IncomingCommitRequest, NetworkSender}, + network::{IncomingCommitRequest, IncomingRandGenRequest, NetworkSender}, network_interface::{ConsensusMsg, ConsensusNetworkClient}, payload_manager::PayloadManager, pipeline::{ @@ -14,6 +14,11 @@ use crate::{ errors::Error, signing_phase::CommitSignerProvider, }, + rand::rand_gen::{ + rand_manager::RandManager, + storage::interface::RandStorage, + types::{AugmentedData, RandConfig, Share}, + }, state_computer::ExecutionProxy, state_replication::{StateComputer, StateComputerCommitCallBackType}, transaction_deduper::create_transaction_deduper, @@ -22,15 +27,18 @@ use crate::{ use anyhow::Result; use aptos_bounded_executor::BoundedExecutor; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; +use aptos_config::config::ConsensusConfig; use aptos_consensus_types::{common::Author, pipelined_block::PipelinedBlock}; use aptos_executor_types::ExecutorResult; use aptos_infallible::RwLock; use aptos_logger::prelude::*; use aptos_network::{application::interface::NetworkClient, protocols::network::Event}; +use aptos_safety_rules::safety_rules_manager::load_consensus_key_from_secure_storage; use aptos_types::{ epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, - on_chain_config::{FeatureFlag, Features, OnChainExecutionConfig}, + on_chain_config::{FeatureFlag, Features, OnChainConsensusConfig, OnChainExecutionConfig}, + validator_signer::ValidatorSigner, }; use fail::fail_point; use futures::{ @@ -49,8 +57,11 @@ pub trait TExecutionClient: Send + Sync { epoch_state: Arc, commit_signer_provider: Arc, payload_manager: Arc, + onchain_consensus_config: &OnChainConsensusConfig, onchain_execution_config: &OnChainExecutionConfig, features: &Features, + rand_config: Option, + rand_msg_rx: aptos_channel::Receiver, ); /// This is needed for some DAG tests. Clean this up as a TODO. @@ -80,7 +91,8 @@ pub trait TExecutionClient: Send + Sync { struct BufferManagerHandle { pub execute_tx: Option>, pub commit_tx: Option>, - pub reset_tx: Option>, + pub reset_tx_to_buffer_manager: Option>, + pub reset_tx_to_rand_manager: Option>, } impl BufferManagerHandle { @@ -88,7 +100,8 @@ impl BufferManagerHandle { Self { execute_tx: None, commit_tx: None, - reset_tx: None, + reset_tx_to_buffer_manager: None, + reset_tx_to_rand_manager: None, } } @@ -96,23 +109,31 @@ impl BufferManagerHandle { &mut self, execute_tx: UnboundedSender, commit_tx: aptos_channel::Sender, - reset_tx: UnboundedSender, + reset_tx_to_buffer_manager: UnboundedSender, + reset_tx_to_rand_manager: Option>, ) { self.execute_tx = Some(execute_tx); self.commit_tx = Some(commit_tx); - self.reset_tx = Some(reset_tx); + self.reset_tx_to_buffer_manager = Some(reset_tx_to_buffer_manager); + self.reset_tx_to_rand_manager = reset_tx_to_rand_manager; } - pub fn reset(&mut self) -> Option> { - let reset_tx = self.reset_tx.take(); + pub fn reset( + &mut self, + ) -> ( + Option>, + Option>, + ) { + let reset_tx_to_rand_manager = self.reset_tx_to_rand_manager.take(); + let reset_tx_to_buffer_manager = self.reset_tx_to_buffer_manager.take(); self.execute_tx = None; self.commit_tx = None; - self.reset_tx = None; - reset_tx + (reset_tx_to_rand_manager, reset_tx_to_buffer_manager) } } pub struct ExecutionProxyClient { + consensus_config: ConsensusConfig, execution_proxy: Arc, author: Author, self_sender: aptos_channels::UnboundedSender>, @@ -120,23 +141,28 @@ pub struct ExecutionProxyClient { bounded_executor: BoundedExecutor, // channels to buffer manager handle: Arc>, + rand_storage: Arc>, } impl ExecutionProxyClient { pub fn new( + consensus_config: ConsensusConfig, execution_proxy: Arc, author: Author, self_sender: aptos_channels::UnboundedSender>, network_sender: ConsensusNetworkClient>, bounded_executor: BoundedExecutor, + rand_storage: Arc>, ) -> Self { Self { + consensus_config, execution_proxy, author, self_sender, network_sender, bounded_executor, handle: Arc::new(RwLock::new(BufferManagerHandle::new())), + rand_storage, } } @@ -144,6 +170,8 @@ impl ExecutionProxyClient { &self, commit_signer_provider: Arc, epoch_state: Arc, + rand_config: Option, + rand_msg_rx: aptos_channel::Receiver, ) { let network_sender = NetworkSender::new( self.author, @@ -152,8 +180,7 @@ impl ExecutionProxyClient { epoch_state.verifier.clone(), ); - let (block_tx, block_rx) = unbounded::(); - let (reset_tx, reset_rx) = unbounded::(); + let (reset_buffer_manager_tx, reset_buffer_manager_rx) = unbounded::(); let (commit_msg_tx, commit_msg_rx) = aptos_channel::new::( @@ -162,7 +189,51 @@ impl ExecutionProxyClient { Some(&counters::BUFFER_MANAGER_MSGS), ); - self.handle.write().init(block_tx, commit_msg_tx, reset_tx); + let (execution_ready_block_tx, execution_ready_block_rx, maybe_reset_tx_to_rand_manager) = + if let Some(rand_config) = rand_config { + let (ordered_block_tx, ordered_block_rx) = unbounded::(); + let (rand_ready_block_tx, rand_ready_block_rx) = unbounded::(); + + let (reset_tx_to_rand_manager, reset_rand_manager_rx) = unbounded::(); + let consensus_key = + load_consensus_key_from_secure_storage(&self.consensus_config.safety_rules) + .expect("Failed in loading consensus key for ExecutionProxyClient."); + let signer = Arc::new(ValidatorSigner::new(self.author, consensus_key)); + + let rand_manager = RandManager::::new( + self.author, + epoch_state.clone(), + signer, + rand_config, + rand_ready_block_tx, + Arc::new(network_sender.clone()), + self.rand_storage.clone(), + self.bounded_executor.clone(), + ); + + tokio::spawn(rand_manager.start( + ordered_block_rx, + rand_msg_rx, + reset_rand_manager_rx, + self.bounded_executor.clone(), + )); + + ( + ordered_block_tx, + rand_ready_block_rx, + Some(reset_tx_to_rand_manager), + ) + } else { + let (ordered_block_tx, ordered_block_rx) = unbounded(); + (ordered_block_tx, ordered_block_rx, None) + }; + + self.handle.write().init( + execution_ready_block_tx, + commit_msg_tx, + reset_buffer_manager_tx, + maybe_reset_tx_to_rand_manager, + ); let ( execution_schedule_phase, @@ -177,8 +248,8 @@ impl ExecutionProxyClient { network_sender, commit_msg_rx, self.execution_proxy.clone(), - block_rx, - reset_rx, + execution_ready_block_rx, + reset_buffer_manager_rx, epoch_state, self.bounded_executor.clone(), ); @@ -198,10 +269,18 @@ impl TExecutionClient for ExecutionProxyClient { epoch_state: Arc, commit_signer_provider: Arc, payload_manager: Arc, + onchain_consensus_config: &OnChainConsensusConfig, onchain_execution_config: &OnChainExecutionConfig, features: &Features, + rand_config: Option, + rand_msg_rx: aptos_channel::Receiver, ) { - self.spawn_decoupled_execution(commit_signer_provider, epoch_state.clone()); + let maybe_rand_msg_tx = self.spawn_decoupled_execution( + commit_signer_provider, + epoch_state.clone(), + rand_config, + rand_msg_rx, + ); let transaction_shuffler = create_transaction_shuffler(onchain_execution_config.transaction_shuffler_type()); @@ -209,14 +288,18 @@ impl TExecutionClient for ExecutionProxyClient { onchain_execution_config.block_executor_onchain_config(); let transaction_deduper = create_transaction_deduper(onchain_execution_config.transaction_deduper_type()); + let randomness_enabled = onchain_consensus_config.is_vtxn_enabled() + && features.is_enabled(FeatureFlag::RECONFIGURE_WITH_DKG); self.execution_proxy.new_epoch( &epoch_state, payload_manager, transaction_shuffler, block_executor_onchain_config, transaction_deduper, - features.is_enabled(FeatureFlag::RECONFIGURE_WITH_DKG), + randomness_enabled, ); + + maybe_rand_msg_tx } fn get_execution_channel(&self) -> Option> { @@ -280,9 +363,27 @@ impl TExecutionClient for ExecutionProxyClient { Err(anyhow::anyhow!("Injected error in sync_to").into()) }); - let reset_tx = self.handle.read().reset_tx.clone(); + let (reset_tx_to_rand_manager, reset_tx_to_buffer_manager) = { + let handle = self.handle.read(); + ( + handle.reset_tx_to_rand_manager.clone(), + handle.reset_tx_to_buffer_manager.clone(), + ) + }; - if let Some(mut reset_tx) = reset_tx { + if let Some(mut reset_tx) = reset_tx_to_rand_manager { + let (ack_tx, ack_rx) = oneshot::channel::(); + reset_tx + .send(ResetRequest { + tx: ack_tx, + signal: ResetSignal::TargetRound(target.commit_info().round()), + }) + .await + .map_err(|_| Error::RandResetDropped)?; + ack_rx.await.map_err(|_| Error::RandResetDropped)?; + } + + if let Some(mut reset_tx) = reset_tx_to_buffer_manager { // reset execution phase and commit phase let (tx, rx) = oneshot::channel::(); reset_tx @@ -302,8 +403,25 @@ impl TExecutionClient for ExecutionProxyClient { } async fn end_epoch(&self) { - let reset_tx = self.handle.write().reset(); - if let Some(mut tx) = reset_tx { + let (reset_tx_to_rand_manager, reset_tx_to_buffer_manager) = { + let mut handle = self.handle.write(); + handle.reset() + }; + + if let Some(mut tx) = reset_tx_to_rand_manager { + let (ack_tx, ack_rx) = oneshot::channel(); + tx.send(ResetRequest { + tx: ack_tx, + signal: ResetSignal::Stop, + }) + .await + .expect("[EpochManager] Fail to drop rand manager"); + ack_rx + .await + .expect("[EpochManager] Fail to drop rand manager"); + } + + if let Some(mut tx) = reset_tx_to_buffer_manager { let (ack_tx, ack_rx) = oneshot::channel(); tx.send(ResetRequest { tx: ack_tx, @@ -328,8 +446,11 @@ impl TExecutionClient for DummyExecutionClient { _epoch_state: Arc, _commit_signer_provider: Arc, _payload_manager: Arc, + _onchain_consensus_config: &OnChainConsensusConfig, _onchain_execution_config: &OnChainExecutionConfig, _features: &Features, + _rand_config: Option, + _rand_msg_rx: aptos_channel::Receiver, ) { } diff --git a/consensus/src/quorum_store/tests/proof_coordinator_test.rs b/consensus/src/quorum_store/tests/proof_coordinator_test.rs index b34f10533d519..64f2ac7cf940c 100644 --- a/consensus/src/quorum_store/tests/proof_coordinator_test.rs +++ b/consensus/src/quorum_store/tests/proof_coordinator_test.rs @@ -11,7 +11,7 @@ use crate::{ test_utils::{create_vec_signed_transactions, mock_quorum_store_sender::MockQuorumStoreSender}, }; use aptos_consensus_types::proof_of_store::{ - BatchId, ProofOfStore, SignedBatchInfo, SignedBatchInfoMsg, + BatchId, ProofCache, ProofOfStore, SignedBatchInfo, SignedBatchInfoMsg, }; use aptos_crypto::HashValue; use aptos_executor_types::ExecutorResult; @@ -51,6 +51,7 @@ async fn test_proof_coordinator_basic() { peer: signers[0].author(), }), tx, + ProofCache::new(64), //TODO: i'm only making it build... true, ); let (proof_coordinator_tx, proof_coordinator_rx) = channel(100); @@ -79,7 +80,8 @@ async fn test_proof_coordinator_basic() { msg => panic!("Expected LocalProof but received: {:?}", msg), }; // check normal path - assert!(proof_msg.verify(100, &verifier).is_ok()); + let cache = ProofCache::new(64); //TODO: i'm only making it build... + assert!(proof_msg.verify(100, &verifier, &cache).is_ok()); let proofs = proof_msg.take(); assert_eq!(proofs[0].digest(), digest); } diff --git a/consensus/src/rand/rand_gen/aug_data_store.rs b/consensus/src/rand/rand_gen/aug_data_store.rs index 7d5211ea6cb01..5723a60ee7231 100644 --- a/consensus/src/rand/rand_gen/aug_data_store.rs +++ b/consensus/src/rand/rand_gen/aug_data_store.rs @@ -2,10 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::rand::rand_gen::{ - storage::interface::AugDataStorage, + storage::interface::RandStorage, types::{ - AugData, AugDataId, AugDataSignature, AugmentedData, CertifiedAugData, CertifiedAugDataAck, - RandConfig, + AugData, AugDataId, AugDataSignature, CertifiedAugData, CertifiedAugDataAck, RandConfig, + TAugmentedData, }, }; use anyhow::ensure; @@ -14,16 +14,16 @@ use aptos_logger::error; use aptos_types::validator_signer::ValidatorSigner; use std::{collections::HashMap, sync::Arc}; -pub struct AugDataStore { +pub struct AugDataStore { epoch: u64, signer: Arc, config: RandConfig, data: HashMap>, certified_data: HashMap>, - db: Arc, + db: Arc>, } -impl> AugDataStore { +impl AugDataStore { fn filter_by_epoch( epoch: u64, all_data: impl Iterator, @@ -44,24 +44,30 @@ impl> AugDataStore { epoch: u64, signer: Arc, config: RandConfig, - db: Arc, + db: Arc>, ) -> Self { let all_data = db.get_all_aug_data().unwrap_or_default(); let (to_remove, aug_data) = Self::filter_by_epoch(epoch, all_data.into_iter()); - if let Err(e) = db.remove_aug_data(to_remove.into_iter()) { + if let Err(e) = db.remove_aug_data(to_remove) { error!("[AugDataStore] failed to remove aug data: {:?}", e); } let all_certified_data = db.get_all_certified_aug_data().unwrap_or_default(); let (to_remove, certified_data) = Self::filter_by_epoch(epoch, all_certified_data.into_iter()); - if let Err(e) = db.remove_certified_aug_data(to_remove.into_iter()) { + if let Err(e) = db.remove_certified_aug_data(to_remove) { error!( "[AugDataStore] failed to remove certified aug data: {:?}", e ); } + for (_, certified_data) in &certified_data { + certified_data + .data() + .augment(&config, certified_data.author()); + } + Self { epoch, signer, @@ -79,11 +85,11 @@ impl> AugDataStore { } pub fn get_my_aug_data(&self) -> Option> { - self.data.get(self.config.author()).cloned() + self.data.get(&self.config.author()).cloned() } pub fn get_my_certified_aug_data(&self) -> Option> { - self.certified_data.get(self.config.author()).cloned() + self.certified_data.get(&self.config.author()).cloned() } pub fn add_aug_data(&mut self, data: AugData) -> anyhow::Result { diff --git a/consensus/src/rand/rand_gen/block_queue.rs b/consensus/src/rand/rand_gen/block_queue.rs index 79dad70de7061..0c5b316829e6b 100644 --- a/consensus/src/rand/rand_gen/block_queue.rs +++ b/consensus/src/rand/rand_gen/block_queue.rs @@ -94,6 +94,10 @@ impl BlockQueue { } } + pub fn queue(&self) -> &BTreeMap { + &self.queue + } + pub fn push_back(&mut self, item: QueueItem) { for block in item.blocks() { observe_block(block.timestamp_usecs(), BlockStage::RAND_ENTER); diff --git a/consensus/src/rand/rand_gen/mod.rs b/consensus/src/rand/rand_gen/mod.rs index 49cfd4cc4d0a1..0127c320dc217 100644 --- a/consensus/src/rand/rand_gen/mod.rs +++ b/consensus/src/rand/rand_gen/mod.rs @@ -4,14 +4,12 @@ #[cfg(test)] mod test_utils; -mod block_queue; -mod network_messages; -mod rand_store; -mod types; - -mod aug_data_store; -mod rand_manager; -mod reliable_broadcast_state; -mod storage; - -pub use network_messages::RandGenMessage; +pub mod block_queue; +pub mod network_messages; +pub mod rand_store; +pub mod types; + +pub mod aug_data_store; +pub mod rand_manager; +pub mod reliable_broadcast_state; +pub mod storage; diff --git a/consensus/src/rand/rand_gen/network_messages.rs b/consensus/src/rand/rand_gen/network_messages.rs index 157cc2060989c..4477d0b49c955 100644 --- a/consensus/src/rand/rand_gen/network_messages.rs +++ b/consensus/src/rand/rand_gen/network_messages.rs @@ -5,8 +5,8 @@ use crate::{ network::TConsensusMsg, network_interface::ConsensusMsg, rand::rand_gen::types::{ - AugData, AugDataSignature, AugmentedData, CertifiedAugData, CertifiedAugDataAck, - RandConfig, RandShare, RequestShare, Share, + AugData, AugDataSignature, CertifiedAugData, CertifiedAugDataAck, RandConfig, RandShare, + RequestShare, TAugmentedData, TShare, }, }; use anyhow::bail; @@ -30,7 +30,7 @@ pub enum RandMessage { CertifiedAugDataAck(CertifiedAugDataAck), } -impl RandMessage { +impl RandMessage { pub fn verify( &self, epoch_state: &EpochState, @@ -49,9 +49,9 @@ impl RandMessage { } } -impl RBMessage for RandMessage {} +impl RBMessage for RandMessage {} -impl TConsensusMsg for RandMessage { +impl TConsensusMsg for RandMessage { fn epoch(&self) -> u64 { match self { RandMessage::RequestShare(request) => request.epoch(), diff --git a/consensus/src/rand/rand_gen/rand_manager.rs b/consensus/src/rand/rand_gen/rand_manager.rs index c43c975d830f5..740165f3cc648 100644 --- a/consensus/src/rand/rand_gen/rand_manager.rs +++ b/consensus/src/rand/rand_gen/rand_manager.rs @@ -2,6 +2,8 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ + counters::RAND_QUEUE_SIZE, + logging::{LogEvent, LogSchema}, network::{IncomingRandGenRequest, NetworkSender, TConsensusMsg}, pipeline::buffer_manager::{OrderedBlocks, ResetAck, ResetRequest, ResetSignal}, rand::rand_gen::{ @@ -12,11 +14,12 @@ use crate::{ reliable_broadcast_state::{ AugDataCertBuilder, CertifiedAugDataAckState, ShareAggregateState, }, - storage::interface::AugDataStorage, - types::{AugmentedData, CertifiedAugData, RandConfig, RequestShare, Share}, + storage::interface::RandStorage, + types::{RandConfig, RequestShare, TAugmentedData, TShare}, }, }; use aptos_bounded_executor::BoundedExecutor; +use aptos_channels::aptos_channel; use aptos_consensus_types::common::Author; use aptos_infallible::Mutex; use aptos_logger::{error, info, spawn_named, warn}; @@ -29,16 +32,21 @@ use aptos_types::{ validator_signer::ValidatorSigner, }; use bytes::Bytes; -use futures::future::{AbortHandle, Abortable}; -use futures_channel::oneshot; +use futures::{ + future::{AbortHandle, Abortable}, + FutureExt, StreamExt, +}; +use futures_channel::{ + mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, + oneshot, +}; use std::{sync::Arc, time::Duration}; -use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use tokio_retry::strategy::ExponentialBackoff; pub type Sender = UnboundedSender; pub type Receiver = UnboundedReceiver; -pub struct RandManager { +pub struct RandManager { author: Author, epoch_state: Arc, stop: bool, @@ -52,11 +60,11 @@ pub struct RandManager { outgoing_blocks: Sender, // local state rand_store: Arc>>, - aug_data_store: AugDataStore, + aug_data_store: AugDataStore, block_queue: BlockQueue, } -impl> RandManager { +impl RandManager { pub fn new( author: Author, epoch_state: Arc, @@ -64,7 +72,7 @@ impl> RandManager, network_sender: Arc, - db: Arc, + db: Arc>, bounded_executor: BoundedExecutor, ) -> Self { let rb_backoff_policy = ExponentialBackoff::from_millis(2) @@ -78,7 +86,7 @@ impl> RandManager> RandManager = blocks.ordered_blocks.iter().map(|b| b.round()).collect(); + info!(rounds = rounds, "Processing incoming blocks."); let broadcast_handles: Vec<_> = blocks .ordered_blocks .iter() @@ -117,19 +127,30 @@ impl> RandManager DropGuard { let self_share = S::generate(&self.config, metadata.clone()); + info!(LogSchema::new(LogEvent::BroadcastRandShare) + .epoch(self.epoch_state.epoch) + .author(self.author) + .round(metadata.round())); let mut rand_store = self.rand_store.lock(); - rand_store.add_rand_metadata(metadata.clone()); + rand_store.update_highest_known_round(metadata.round()); rand_store .add_share(self_share.clone()) .expect("Add self share should succeed"); + rand_store.add_rand_metadata(metadata.clone()); self.network_sender .broadcast_without_self(RandMessage::::Share(self_share).into_network_message()); self.spawn_aggregate_shares_task(metadata) } fn process_ready_blocks(&mut self, ready_blocks: Vec) { + let rounds: Vec = ready_blocks + .iter() + .flat_map(|b| b.ordered_blocks.iter().map(|b3| b3.round())) + .collect(); + info!(rounds = rounds, "Processing rand-ready blocks."); + for blocks in ready_blocks { - let _ = self.outgoing_blocks.send(blocks); + let _ = self.outgoing_blocks.unbounded_send(blocks); } } @@ -140,12 +161,18 @@ impl> RandManager round, }; self.block_queue = BlockQueue::new(); - self.rand_store.lock().reset(target_round); + self.rand_store + .lock() + .update_highest_known_round(target_round); self.stop = matches!(signal, ResetSignal::Stop); let _ = tx.send(ResetAck::default()); } fn process_randomness(&mut self, randomness: Randomness) { + info!( + metadata = randomness.metadata().metadata_to_sign, + "Processing decisioned randomness." + ); if let Some(block) = self.block_queue.item_mut(randomness.round()) { block.set_randomness(randomness.round(), randomness); } @@ -163,12 +190,12 @@ impl> RandManager, - mut incoming_rpc_request: Receiver, + mut incoming_rpc_request: aptos_channel::Receiver, verified_msg_tx: UnboundedSender>, rand_config: RandConfig, bounded_executor: BoundedExecutor, ) { - while let Some(rand_gen_msg) = incoming_rpc_request.recv().await { + while let Some(rand_gen_msg) = incoming_rpc_request.next().await { let tx = verified_msg_tx.clone(); let epoch_state_clone = epoch_state.clone(); let config_clone = rand_config.clone(); @@ -180,7 +207,7 @@ impl> RandManager> RandManager CertifiedAugData { - if let Some(certified_data) = self.aug_data_store.get_my_certified_aug_data() { - info!("[RandManager] Already have certified aug data"); - return certified_data; - } + async fn broadcast_aug_data(&mut self) -> DropGuard { let data = self .aug_data_store .get_my_aug_data() @@ -251,25 +274,31 @@ impl> RandManager) -> DropGuard { - let rb = self.reliable_broadcast.clone(); + let rb2 = self.reliable_broadcast.clone(); let validators = self.epoch_state.verifier.get_ordered_account_addresses(); - // Add it synchronously to be able to sign without a race that we get to sign before the broadcast reaches aug store. - self.aug_data_store - .add_certified_aug_data(certified_data.clone()) - .expect("Add self aug data should succeed"); - let task = async move { - let ack_state = Arc::new(CertifiedAugDataAckState::new(validators.into_iter())); + let maybe_existing_certified_data = self.aug_data_store.get_my_certified_aug_data(); + let phase1 = async move { + if let Some(certified_data) = maybe_existing_certified_data { + info!("[RandManager] Already have certified aug data"); + return certified_data; + } + info!("[RandManager] Start broadcasting aug data"); + info!(LogSchema::new(LogEvent::BroadcastAugData) + .author(*data.author()) + .epoch(data.epoch())); + let certified_data = rb.broadcast(data, aug_ack).await; + info!("[RandManager] Finish broadcasting aug data"); + certified_data + }; + let ack_state = Arc::new(CertifiedAugDataAckState::new(validators.into_iter())); + let task = phase1.then(|certified_data| async move { + info!(LogSchema::new(LogEvent::BroadcastCertifiedAugData) + .author(*certified_data.author()) + .epoch(certified_data.epoch())); info!("[RandManager] Start broadcasting certified aug data"); - rb.broadcast(certified_data, ack_state).await; + rb2.broadcast(certified_data, ack_state).await; info!("[RandManager] Finish broadcasting certified aug data"); - }; + }); let (abort_handle, abort_registration) = AbortHandle::new_pair(); tokio::spawn(Abortable::new(task, abort_registration)); DropGuard::new(abort_handle) @@ -278,12 +307,12 @@ impl> RandManager, - incoming_rpc_request: Receiver, + incoming_rpc_request: aptos_channel::Receiver, mut reset_rx: Receiver, bounded_executor: BoundedExecutor, ) { info!("RandManager started"); - let (verified_msg_tx, mut verified_msg_rx) = tokio::sync::mpsc::unbounded_channel(); + let (verified_msg_tx, mut verified_msg_rx) = unbounded(); let epoch_state = self.epoch_state.clone(); let rand_config = self.config.clone(); spawn_named!( @@ -297,22 +326,21 @@ impl> RandManager { + Some(blocks) = incoming_blocks.next() => { self.process_incoming_blocks(blocks); } - Some(reset) = reset_rx.recv() => { - while incoming_blocks.try_recv().is_ok() {} + Some(reset) = reset_rx.next() => { + while matches!(incoming_blocks.try_next(), Ok(Some(_))) {} self.process_reset(reset); } - Some(randomness) = self.decision_rx.recv() => { + Some(randomness) = self.decision_rx.next() => { self.process_randomness(randomness); } - Some(request) = verified_msg_rx.recv() => { + Some(request) = verified_msg_rx.next() => { let RpcRequest { req: rand_gen_msg, protocol, @@ -337,17 +365,31 @@ impl> RandManager { + info!(LogSchema::new(LogEvent::ReceiveProactiveRandShare) + .author(self.author) + .epoch(share.epoch()) + .round(share.metadata().round()) + .remote_peer(*share.author())); + if let Err(e) = self.rand_store.lock().add_share(share) { warn!("[RandManager] Failed to add share: {}", e); } } RandMessage::AugData(aug_data) => { + info!(LogSchema::new(LogEvent::ReceiveAugData) + .author(self.author) + .epoch(aug_data.epoch()) + .remote_peer(*aug_data.author())); match self.aug_data_store.add_aug_data(aug_data) { Ok(sig) => self.process_response(protocol, response_sender, RandMessage::AugDataSignature(sig)), Err(e) => error!("[RandManager] Failed to add aug data: {}", e), } } RandMessage::CertifiedAugData(certified_aug_data) => { + info!(LogSchema::new(LogEvent::ReceiveCertifiedAugData) + .author(self.author) + .epoch(certified_aug_data.epoch()) + .remote_peer(*certified_aug_data.author())); match self.aug_data_store.add_certified_aug_data(certified_aug_data) { Ok(ack) => self.process_response(protocol, response_sender, RandMessage::CertifiedAugDataAck(ack)), Err(e) => error!("[RandManager] Failed to add certified aug data: {}", e), @@ -356,12 +398,23 @@ impl> RandManager unreachable!("[RandManager] Unexpected message type after verification"), } } + _ = interval.tick().fuse() => { + self.observe_queue(); + }, + } - let maybe_ready_blocks = self.block_queue.dequeue_rand_ready_prefix(); - if !maybe_ready_blocks.is_empty() { - self.process_ready_blocks(maybe_ready_blocks); + if !self.config.block_randomness { + let maybe_ready_blocks = self.block_queue.dequeue_rand_ready_prefix(); + if !maybe_ready_blocks.is_empty() { + self.process_ready_blocks(maybe_ready_blocks); + } } } info!("RandManager stopped"); } + + pub fn observe_queue(&self) { + let queue = &self.block_queue.queue(); + RAND_QUEUE_SIZE.set(queue.len() as i64); + } } diff --git a/consensus/src/rand/rand_gen/rand_store.rs b/consensus/src/rand/rand_gen/rand_store.rs index 61f424ea2deab..ee5b98a2a5968 100644 --- a/consensus/src/rand/rand_gen/rand_store.rs +++ b/consensus/src/rand/rand_gen/rand_store.rs @@ -5,7 +5,7 @@ use crate::{ block_storage::tracing::{observe_block, BlockStage}, rand::rand_gen::{ rand_manager::Sender, - types::{RandConfig, RandShare, Share}, + types::{RandConfig, RandShare, TShare}, }, }; use anyhow::ensure; @@ -22,7 +22,7 @@ pub struct ShareAggregator { total_weight: u64, } -impl ShareAggregator { +impl ShareAggregator { pub fn new(author: Author) -> Self { Self { author, @@ -32,9 +32,7 @@ impl ShareAggregator { } pub fn add_share(&mut self, weight: u64, share: RandShare) { - let timestamp = share.metadata().timestamp; if self.shares.insert(*share.author(), share).is_none() { - observe_block(timestamp, BlockStage::RAND_ADD_SHARE); self.total_weight += weight; } } @@ -45,15 +43,17 @@ impl ShareAggregator { rand_metadata: RandMetadata, decision_tx: Sender, ) -> Either> { - if self.total_weight < rand_config.threshold_weight() { + if self.total_weight < rand_config.threshold() { return Either::Left(self); } + // timestamp records the time when the block is created + observe_block(rand_metadata.timestamp, BlockStage::RAND_ADD_ENOUGH_SHARE); let rand_config = rand_config.clone(); let self_share = self .get_self_share() .expect("Aggregated item should have self share"); tokio::task::spawn_blocking(move || { - decision_tx.send(S::aggregate( + decision_tx.unbounded_send(S::aggregate( self.shares.values(), &rand_config, rand_metadata, @@ -92,7 +92,7 @@ enum RandItem { }, } -impl RandItem { +impl RandItem { fn new(author: Author) -> Self { Self::PendingMetadata(ShareAggregator::new(author)) } @@ -198,7 +198,7 @@ pub struct RandStore { decision_tx: Sender, } -impl RandStore { +impl RandStore { pub fn new( epoch: u64, author: Author, @@ -215,12 +215,11 @@ impl RandStore { } } - pub fn reset(&mut self, target_round: u64) { - self.highest_known_round = std::cmp::max(self.highest_known_round, target_round); + pub fn update_highest_known_round(&mut self, round: u64) { + self.highest_known_round = std::cmp::max(self.highest_known_round, round); } pub fn add_rand_metadata(&mut self, rand_metadata: RandMetadata) { - self.highest_known_round = std::cmp::max(self.highest_known_round, rand_metadata.round()); let rand_item = self .rand_map .entry(rand_metadata.round()) @@ -283,30 +282,144 @@ mod tests { types::{MockShare, RandConfig}, }; use aptos_consensus_types::common::Author; - use aptos_types::randomness::RandMetadata; - use std::{collections::HashMap, str::FromStr}; - use tokio::sync::mpsc::unbounded_channel; + use aptos_crypto::{bls12381, HashValue, Uniform}; + use aptos_dkg::{ + pvss::{traits::Transcript, Player, WeightedConfig}, + weighted_vuf::traits::WeightedVUF, + }; + use aptos_types::{ + dkg::{real_dkg::maybe_dk_from_bls_sk, DKGSessionMetadata, DKGTrait, DefaultDKG}, + randomness::{RandKeys, RandMetadata, WvufPP, WVUF}, + validator_verifier::{ + ValidatorConsensusInfo, ValidatorConsensusInfoMoveStruct, ValidatorVerifier, + }, + }; + use futures::StreamExt; + use futures_channel::mpsc::unbounded; + use rand::thread_rng; + use std::str::FromStr; + + /// Captures important data items across the whole DKG-WVUF flow. + struct TestContext { + authors: Vec, + dealer_epoch: u64, + target_epoch: u64, + rand_config: RandConfig, + } + + impl TestContext { + fn new(weights: Vec, my_index: usize) -> Self { + let dealer_epoch = 0; + let target_epoch = 1; + let num_validators = weights.len(); + let mut rng = thread_rng(); + let authors: Vec<_> = (0..num_validators) + .map(|i| Author::from_str(&format!("{:x}", i)).unwrap()) + .collect(); + let private_keys: Vec = (0..num_validators) + .map(|_| bls12381::PrivateKey::generate_for_testing()) + .collect(); + let public_keys: Vec = + private_keys.iter().map(bls12381::PublicKey::from).collect(); + let dkg_decrypt_keys: Vec<::NewValidatorDecryptKey> = + private_keys + .iter() + .map(|sk| maybe_dk_from_bls_sk(sk).unwrap()) + .collect(); + let consensus_infos: Vec = (0..num_validators) + .map(|idx| { + ValidatorConsensusInfo::new( + authors[idx], + public_keys[idx].clone(), + weights[idx], + ) + }) + .collect(); + let consensus_info_move_structs = consensus_infos + .clone() + .into_iter() + .map(ValidatorConsensusInfoMoveStruct::from) + .collect::>(); + let verifier = ValidatorVerifier::new(consensus_infos.clone()); + let dkg_session_metadata = DKGSessionMetadata { + dealer_epoch: 999, + dealer_validator_set: consensus_info_move_structs.clone(), + target_validator_set: consensus_info_move_structs.clone(), + block_dkg: false, + block_randomness: false, + }; + let dkg_pub_params = DefaultDKG::new_public_params(&dkg_session_metadata); + let input_secret = ::InputSecret::generate_for_testing(); + let transcript = DefaultDKG::generate_transcript( + &mut rng, + &dkg_pub_params, + &input_secret, + 0, + &private_keys[0], + ); + let (sk, pk) = DefaultDKG::decrypt_secret_share_from_transcript( + &dkg_pub_params, + &transcript, + my_index as u64, + &dkg_decrypt_keys[my_index], + ) + .unwrap(); + + let pk_shares = (0..num_validators) + .map(|id| { + transcript + .get_public_key_share(&dkg_pub_params.pvss_config.wconfig, &Player { id }) + }) + .collect::>(); + let vuf_pub_params = WvufPP::from(&dkg_pub_params.pvss_config.pp); + + let (ask, apk) = WVUF::augment_key_pair(&vuf_pub_params, sk, pk, &mut rng); + + let rand_keys = RandKeys::new(ask, apk, pk_shares, num_validators); + let weights: Vec = weights.into_iter().map(|x| x as usize).collect(); + let half_total_weights = weights.clone().into_iter().sum::() / 2; + let weighted_config = WeightedConfig::new(half_total_weights, weights).unwrap(); + let rand_config = RandConfig::new( + authors[my_index], + target_epoch, + verifier, + vuf_pub_params, + rand_keys, + weighted_config, + false, + ); + + Self { + authors, + dealer_epoch, + target_epoch, + rand_config, + } + } + } #[test] fn test_share_aggregator() { - let mut aggr = ShareAggregator::new(Author::ONE); - let weights = HashMap::from([(Author::ONE, 1), (Author::TWO, 2), (Author::ZERO, 3)]); - let shares = vec![ - create_share_for_round(1, Author::ONE), - create_share_for_round(2, Author::TWO), - create_share_for_round(1, Author::ZERO), - ]; - for share in shares.iter() { - aggr.add_share(*weights.get(share.author()).unwrap(), share.clone()); - // double add should be no op to the total weight - aggr.add_share(*weights.get(share.author()).unwrap(), share.clone()); - } + let ctxt = TestContext::new(vec![1, 2, 3], 0); + let mut aggr = ShareAggregator::new(ctxt.authors[0]); + aggr.add_share( + 1, + create_share_for_round(ctxt.target_epoch, 1, ctxt.authors[0]), + ); + aggr.add_share( + 2, + create_share_for_round(ctxt.target_epoch, 2, ctxt.authors[1]), + ); + aggr.add_share( + 3, + create_share_for_round(ctxt.target_epoch, 1, ctxt.authors[2]), + ); assert_eq!(aggr.shares.len(), 3); assert_eq!(aggr.total_weight, 6); // retain the shares with the same metadata aggr.retain( - &RandConfig::new(1, Author::ZERO, weights), - &RandMetadata::new_for_testing(1), + &ctxt.rand_config, + &RandMetadata::new(ctxt.target_epoch, 1, HashValue::zero(), 1700000000), ); assert_eq!(aggr.shares.len(), 2); assert_eq!(aggr.total_weight, 4); @@ -314,42 +427,48 @@ mod tests { #[tokio::test] async fn test_rand_item() { - let weights = HashMap::from([(Author::ONE, 1), (Author::TWO, 2), (Author::ZERO, 3)]); - let config = RandConfig::new(1, Author::ZERO, weights); - let (tx, _rx) = unbounded_channel(); + let ctxt = TestContext::new(vec![1, 2, 3], 1); + let (tx, _rx) = unbounded(); let shares = vec![ - create_share_for_round(2, Author::ONE), - create_share_for_round(1, Author::TWO), - create_share_for_round(1, Author::ZERO), + create_share_for_round(ctxt.target_epoch, 2, ctxt.authors[0]), + create_share_for_round(ctxt.target_epoch, 1, ctxt.authors[1]), + create_share_for_round(ctxt.target_epoch, 1, ctxt.authors[2]), ]; - let mut item = RandItem::::new(Author::TWO); + let mut item = RandItem::::new(ctxt.authors[1]); for share in shares.iter() { - item.add_share(share.clone(), &config).unwrap(); + item.add_share(share.clone(), &ctxt.rand_config).unwrap(); } assert_eq!(item.total_weights().unwrap(), 6); - item.add_metadata(&config, RandMetadata::new_for_testing(1)); + item.add_metadata( + &ctxt.rand_config, + RandMetadata::new(ctxt.target_epoch, 1, HashValue::zero(), 1700000000), + ); assert_eq!(item.total_weights().unwrap(), 5); - item.try_aggregate(&config, tx); + item.try_aggregate(&ctxt.rand_config, tx); assert!(item.has_decision()); - let mut item = RandItem::::new(Author::ONE); - item.add_metadata(&config, RandMetadata::new_for_testing(2)); + let mut item = RandItem::::new(ctxt.authors[0]); + item.add_metadata( + &ctxt.rand_config, + RandMetadata::new(ctxt.target_epoch, 2, HashValue::zero(), 1700000000), + ); for share in shares[1..].iter() { - item.add_share(share.clone(), &config).unwrap_err(); + item.add_share(share.clone(), &ctxt.rand_config) + .unwrap_err(); } } #[tokio::test] async fn test_rand_store() { - let authors: Vec<_> = (0..7) - .map(|i| Author::from_str(&format!("{:x}", i)).unwrap()) - .collect(); - let weights: HashMap = authors.iter().map(|addr| (*addr, 1)).collect(); - let authors: Vec = weights.keys().cloned().collect(); - let config = RandConfig::new(1, Author::ZERO, weights); - let (decision_tx, mut decision_rx) = unbounded_channel(); - let mut rand_store = RandStore::new(1, authors[1], config, decision_tx); + let ctxt = TestContext::new(vec![100; 7], 0); + let (decision_tx, mut decision_rx) = unbounded(); + let mut rand_store = RandStore::new( + ctxt.target_epoch, + ctxt.authors[1], + ctxt.rand_config.clone(), + decision_tx, + ); let rounds = vec![vec![1], vec![2, 3], vec![5, 8, 13]]; let blocks_1 = QueueItem::new(create_ordered_blocks(rounds[0].clone()), None); @@ -358,29 +477,30 @@ mod tests { let metadata_2 = blocks_2.all_rand_metadata(); // shares come before metadata - for share in authors[0..5] + for share in ctxt.authors[0..5] .iter() .map(|author| create_share(metadata_1[0].clone(), *author)) { rand_store.add_share(share).unwrap(); } - assert!(decision_rx.try_recv().is_err()); + assert!(decision_rx.try_next().is_err()); for metadata in blocks_1.all_rand_metadata() { rand_store.add_rand_metadata(metadata); } - assert!(decision_rx.recv().await.is_some()); + assert!(decision_rx.next().await.is_some()); + // metadata come after shares for metadata in blocks_2.all_rand_metadata() { rand_store.add_rand_metadata(metadata); } - assert!(decision_rx.try_recv().is_err()); + assert!(decision_rx.try_next().is_err()); - for share in authors[1..6] + for share in ctxt.authors[1..6] .iter() .map(|author| create_share(metadata_2[0].clone(), *author)) { rand_store.add_share(share).unwrap(); } - assert!(decision_rx.recv().await.is_some()); + assert!(decision_rx.next().await.is_some()); } } diff --git a/consensus/src/rand/rand_gen/reliable_broadcast_state.rs b/consensus/src/rand/rand_gen/reliable_broadcast_state.rs index 43999890a0e05..e993ee36d17bb 100644 --- a/consensus/src/rand/rand_gen/reliable_broadcast_state.rs +++ b/consensus/src/rand/rand_gen/reliable_broadcast_state.rs @@ -5,8 +5,8 @@ use crate::rand::rand_gen::{ network_messages::RandMessage, rand_store::RandStore, types::{ - AugData, AugDataSignature, AugmentedData, CertifiedAugData, CertifiedAugDataAck, - RandConfig, RandShare, RequestShare, Share, + AugData, AugDataSignature, CertifiedAugData, CertifiedAugDataAck, RandConfig, RandShare, + RequestShare, TAugmentedData, TShare, }, }; use anyhow::ensure; @@ -34,7 +34,7 @@ impl AugDataCertBuilder { } } -impl BroadcastStatus, RandMessage> +impl BroadcastStatus, RandMessage> for Arc> { type Aggregated = CertifiedAugData; @@ -45,7 +45,7 @@ impl BroadcastStatus, RandMessage< ack.verify(peer, &self.epoch_state.verifier, &self.aug_data)?; let mut parital_signatures_guard = self.partial_signatures.lock(); parital_signatures_guard.add_signature(peer, ack.into_signature()); - Ok(self + let qc_aug_data = self .epoch_state .verifier .check_voting_power(parital_signatures_guard.signatures().keys(), true) @@ -57,7 +57,8 @@ impl BroadcastStatus, RandMessage< .aggregate_signatures(&parital_signatures_guard) .expect("Signature aggregation should succeed"); CertifiedAugData::new(self.aug_data.clone(), aggregated_signature) - })) + }); + Ok(qc_aug_data) } } @@ -73,7 +74,7 @@ impl CertifiedAugDataAckState { } } -impl BroadcastStatus, RandMessage> +impl BroadcastStatus, RandMessage> for Arc { type Aggregated = (); @@ -116,7 +117,7 @@ impl ShareAggregateState { } } -impl BroadcastStatus, RandMessage> +impl BroadcastStatus, RandMessage> for Arc> { type Aggregated = (); diff --git a/consensus/src/rand/rand_gen/storage/db.rs b/consensus/src/rand/rand_gen/storage/db.rs index 3a968000c4064..0700d6f92563a 100644 --- a/consensus/src/rand/rand_gen/storage/db.rs +++ b/consensus/src/rand/rand_gen/storage/db.rs @@ -5,12 +5,13 @@ use crate::{ error::DbError, rand::rand_gen::{ storage::{ - interface::AugDataStorage, + interface::RandStorage, schema::{ - AugDataSchema, CertifiedAugDataSchema, AUG_DATA_CF_NAME, CERTIFIED_AUG_DATA_CF_NAME, + AugDataSchema, CertifiedAugDataSchema, KeyPairSchema, AUG_DATA_CF_NAME, + CERTIFIED_AUG_DATA_CF_NAME, KEY_PAIR_CF_NAME, }, }, - types::{AugData, AugDataId, AugmentedData, CertifiedAugData}, + types::{AugData, AugDataId, CertifiedAugData, TAugmentedData}, }, }; use anyhow::Result; @@ -26,7 +27,11 @@ pub const RAND_DB_NAME: &str = "rand_db"; impl RandDb { pub(crate) fn new + Clone>(db_root_path: P) -> Self { - let column_families = vec![AUG_DATA_CF_NAME, CERTIFIED_AUG_DATA_CF_NAME]; + let column_families = vec![ + KEY_PAIR_CF_NAME, + AUG_DATA_CF_NAME, + CERTIFIED_AUG_DATA_CF_NAME, + ]; let path = db_root_path.as_ref().join(RAND_DB_NAME); let instant = Instant::now(); @@ -77,34 +82,40 @@ impl RandDb { } } -impl AugDataStorage for RandDb { - fn save_aug_data(&self, aug_data: &AugData) -> anyhow::Result<()> { +impl RandStorage for RandDb { + fn save_key_pair_bytes(&self, epoch: u64, key_pair: Vec) -> Result<()> { + Ok(self.put::(&(), &(epoch, key_pair))?) + } + + fn save_aug_data(&self, aug_data: &AugData) -> Result<()> { Ok(self.put::>(&aug_data.id(), aug_data)?) } - fn save_certified_aug_data( - &self, - certified_aug_data: &CertifiedAugData, - ) -> anyhow::Result<()> { + fn save_certified_aug_data(&self, certified_aug_data: &CertifiedAugData) -> Result<()> { Ok(self.put::>(&certified_aug_data.id(), certified_aug_data)?) } - fn get_all_aug_data(&self) -> anyhow::Result)>> { + fn get_key_pair_bytes(&self) -> Result)>> { + Ok(self.get_all::()?.pop().map(|(_, v)| v)) + } + + fn get_all_aug_data(&self) -> Result)>> { Ok(self.get_all::>()?) } - fn get_all_certified_aug_data(&self) -> anyhow::Result)>> { + fn get_all_certified_aug_data(&self) -> Result)>> { Ok(self.get_all::>()?) } - fn remove_aug_data(&self, aug_data: impl Iterator>) -> anyhow::Result<()> { - Ok(self.delete::>(aug_data.map(|d| d.id()))?) + fn remove_aug_data(&self, aug_data: Vec>) -> Result<()> { + Ok(self.delete::>(aug_data.into_iter().map(|d| d.id()))?) } fn remove_certified_aug_data( &self, - certified_aug_data: impl Iterator>, - ) -> anyhow::Result<()> { - Ok(self.delete::>(certified_aug_data.map(|d| d.id()))?) + certified_aug_data: Vec>, + ) -> Result<()> { + Ok(self + .delete::>(certified_aug_data.into_iter().map(|d| d.id()))?) } } diff --git a/consensus/src/rand/rand_gen/storage/in_memory.rs b/consensus/src/rand/rand_gen/storage/in_memory.rs index d669563bb2c65..cf5046f5d1a53 100644 --- a/consensus/src/rand/rand_gen/storage/in_memory.rs +++ b/consensus/src/rand/rand_gen/storage/in_memory.rs @@ -2,13 +2,14 @@ // SPDX-License-Identifier: Apache-2.0 use crate::rand::rand_gen::{ - storage::interface::AugDataStorage, - types::{AugData, AugDataId, AugmentedData, CertifiedAugData}, + storage::interface::RandStorage, + types::{AugData, AugDataId, CertifiedAugData, TAugmentedData}, }; use aptos_infallible::RwLock; use std::collections::HashMap; pub struct InMemRandDb { + key_pair: RwLock)>>, aug_data: RwLock>>, certified_aug_data: RwLock>>, } @@ -16,13 +17,19 @@ pub struct InMemRandDb { impl InMemRandDb { pub fn new() -> Self { Self { + key_pair: RwLock::new(None), aug_data: RwLock::new(HashMap::new()), certified_aug_data: RwLock::new(HashMap::new()), } } } -impl AugDataStorage for InMemRandDb { +impl RandStorage for InMemRandDb { + fn save_key_pair_bytes(&self, epoch: u64, key_pair: Vec) -> anyhow::Result<()> { + self.key_pair.write().replace((epoch, key_pair)); + Ok(()) + } + fn save_aug_data(&self, aug_data: &AugData) -> anyhow::Result<()> { self.aug_data .write() @@ -40,6 +47,10 @@ impl AugDataStorage for InMemRandDb { Ok(()) } + fn get_key_pair_bytes(&self) -> anyhow::Result)>> { + Ok(self.key_pair.read().clone()) + } + fn get_all_aug_data(&self) -> anyhow::Result)>> { Ok(self.aug_data.read().clone().into_iter().collect()) } @@ -48,7 +59,7 @@ impl AugDataStorage for InMemRandDb { Ok(self.certified_aug_data.read().clone().into_iter().collect()) } - fn remove_aug_data(&self, aug_data: impl Iterator>) -> anyhow::Result<()> { + fn remove_aug_data(&self, aug_data: Vec>) -> anyhow::Result<()> { for data in aug_data { self.aug_data.write().remove(&data.id()); } @@ -57,7 +68,7 @@ impl AugDataStorage for InMemRandDb { fn remove_certified_aug_data( &self, - certified_aug_data: impl Iterator>, + certified_aug_data: Vec>, ) -> anyhow::Result<()> { for data in certified_aug_data { self.certified_aug_data.write().remove(&data.id()); diff --git a/consensus/src/rand/rand_gen/storage/interface.rs b/consensus/src/rand/rand_gen/storage/interface.rs index d9bdd26354749..80a391f78285e 100644 --- a/consensus/src/rand/rand_gen/storage/interface.rs +++ b/consensus/src/rand/rand_gen/storage/interface.rs @@ -3,19 +3,21 @@ use crate::rand::rand_gen::types::{AugData, AugDataId, CertifiedAugData}; -pub trait AugDataStorage: 'static { +pub trait RandStorage: Send + Sync + 'static { + fn save_key_pair_bytes(&self, epoch: u64, key_pair: Vec) -> anyhow::Result<()>; fn save_aug_data(&self, aug_data: &AugData) -> anyhow::Result<()>; fn save_certified_aug_data( &self, certified_aug_data: &CertifiedAugData, ) -> anyhow::Result<()>; + fn get_key_pair_bytes(&self) -> anyhow::Result)>>; fn get_all_aug_data(&self) -> anyhow::Result)>>; fn get_all_certified_aug_data(&self) -> anyhow::Result)>>; - fn remove_aug_data(&self, aug_data: impl Iterator>) -> anyhow::Result<()>; + fn remove_aug_data(&self, aug_data: Vec>) -> anyhow::Result<()>; fn remove_certified_aug_data( &self, - certified_aug_data: impl Iterator>, + certified_aug_data: Vec>, ) -> anyhow::Result<()>; } diff --git a/consensus/src/rand/rand_gen/storage/schema.rs b/consensus/src/rand/rand_gen/storage/schema.rs index ef57242db7568..37b7c5b2a7e0e 100644 --- a/consensus/src/rand/rand_gen/storage/schema.rs +++ b/consensus/src/rand/rand_gen/storage/schema.rs @@ -1,25 +1,50 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::rand::rand_gen::types::{AugData, AugDataId, AugmentedData, CertifiedAugData}; +use crate::rand::rand_gen::types::{AugData, AugDataId, CertifiedAugData, TAugmentedData}; use aptos_schemadb::{ + define_schema, schema::{KeyCodec, Schema, ValueCodec}, ColumnFamilyName, }; use std::marker::PhantomData; +pub(crate) const KEY_PAIR_CF_NAME: ColumnFamilyName = "key_pair"; + +define_schema!(KeyPairSchema, (), (u64, Vec), KEY_PAIR_CF_NAME); + +impl KeyCodec for () { + fn encode_key(&self) -> anyhow::Result> { + Ok(bcs::to_bytes(self)?) + } + + fn decode_key(data: &[u8]) -> anyhow::Result { + Ok(bcs::from_bytes(data)?) + } +} + +impl ValueCodec for (u64, Vec) { + fn encode_value(&self) -> anyhow::Result> { + Ok(bcs::to_bytes(self)?) + } + + fn decode_value(data: &[u8]) -> anyhow::Result { + Ok(bcs::from_bytes(data)?) + } +} + pub(crate) const AUG_DATA_CF_NAME: ColumnFamilyName = "aug_data"; #[derive(Debug)] pub struct AugDataSchema(PhantomData); -impl Schema for AugDataSchema { +impl Schema for AugDataSchema { type Key = AugDataId; type Value = AugData; const COLUMN_FAMILY_NAME: ColumnFamilyName = AUG_DATA_CF_NAME; } -impl KeyCodec> for AugDataId { +impl KeyCodec> for AugDataId { fn encode_key(&self) -> anyhow::Result> { Ok(bcs::to_bytes(self)?) } @@ -29,7 +54,7 @@ impl KeyCodec> for AugDataId { } } -impl ValueCodec> for AugData { +impl ValueCodec> for AugData { fn encode_value(&self) -> anyhow::Result> { Ok(bcs::to_bytes(&self)?) } @@ -43,14 +68,14 @@ pub(crate) const CERTIFIED_AUG_DATA_CF_NAME: ColumnFamilyName = "certified_aug_d #[derive(Debug)] pub struct CertifiedAugDataSchema(PhantomData); -impl Schema for CertifiedAugDataSchema { +impl Schema for CertifiedAugDataSchema { type Key = AugDataId; type Value = CertifiedAugData; const COLUMN_FAMILY_NAME: ColumnFamilyName = CERTIFIED_AUG_DATA_CF_NAME; } -impl KeyCodec> for AugDataId { +impl KeyCodec> for AugDataId { fn encode_key(&self) -> anyhow::Result> { Ok(bcs::to_bytes(self)?) } @@ -60,7 +85,7 @@ impl KeyCodec> for AugDataId { } } -impl ValueCodec> for CertifiedAugData { +impl ValueCodec> for CertifiedAugData { fn encode_value(&self) -> anyhow::Result> { Ok(bcs::to_bytes(&self)?) } diff --git a/consensus/src/rand/rand_gen/test_utils.rs b/consensus/src/rand/rand_gen/test_utils.rs index 06d0b3eea063c..0c2977941c823 100644 --- a/consensus/src/rand/rand_gen/test_utils.rs +++ b/consensus/src/rand/rand_gen/test_utils.rs @@ -51,8 +51,16 @@ pub fn create_ordered_blocks(rounds: Vec) -> OrderedBlocks { } } -pub(super) fn create_share_for_round(round: Round, author: Author) -> RandShare { - RandShare::::new(author, RandMetadata::new_for_testing(round), MockShare) +pub(super) fn create_share_for_round( + epoch: u64, + round: Round, + author: Author, +) -> RandShare { + RandShare::::new( + author, + RandMetadata::new(epoch, round, HashValue::zero(), 1700000000), + MockShare, + ) } pub(super) fn create_share(rand_metadata: RandMetadata, author: Author) -> RandShare { diff --git a/consensus/src/rand/rand_gen/types.rs b/consensus/src/rand/rand_gen/types.rs index f3d4009ce66f3..7a658127e667b 100644 --- a/consensus/src/rand/rand_gen/types.rs +++ b/consensus/src/rand/rand_gen/types.rs @@ -1,17 +1,28 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use anyhow::ensure; +use anyhow::{bail, ensure}; use aptos_consensus_types::common::{Author, Round}; use aptos_crypto::bls12381::Signature; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; +use aptos_dkg::{ + pvss::{Player, WeightedConfig}, + weighted_vuf::traits::WeightedVUF, +}; +use aptos_logger::debug; +use aptos_runtimes::spawn_rayon_thread_pool; use aptos_types::{ aggregate_signature::AggregateSignature, - randomness::{RandMetadata, Randomness}, + randomness::{ + Delta, PKShare, ProofShare, RandKeys, RandMetadata, Randomness, WvufPP, APK, WVUF, + }, validator_verifier::ValidatorVerifier, }; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use std::{collections::HashMap, fmt::Debug}; +use sha3::{Digest, Sha3_256}; +use std::{fmt::Debug, sync::Arc}; + +const NUM_THREADS_FOR_WVUF_DERIVATION: usize = 8; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub(super) struct MockShare; @@ -19,7 +30,129 @@ pub(super) struct MockShare; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub(super) struct MockAugData; -impl Share for MockShare { +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct Share { + share: ProofShare, +} + +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub struct AugmentedData { + delta: Delta, +} + +impl TShare for Share { + fn verify( + &self, + rand_config: &RandConfig, + rand_metadata: &RandMetadata, + author: &Author, + ) -> anyhow::Result<()> { + let index = *rand_config + .validator + .address_to_validator_index() + .get(author) + .unwrap(); + let maybe_apk = &rand_config.keys.certified_apks[index]; + if let Some(apk) = maybe_apk.get() { + WVUF::verify_share( + &rand_config.vuf_pp, + apk, + rand_metadata.to_bytes().as_slice(), + &self.share, + )?; + } else { + bail!( + "[RandShare] No augmented public key for validator id {}, {}", + index, + author + ); + } + Ok(()) + } + + fn generate(rand_config: &RandConfig, rand_metadata: RandMetadata) -> RandShare + where + Self: Sized, + { + let share = Share { + share: WVUF::create_share(&rand_config.keys.ask, rand_metadata.to_bytes().as_slice()), + }; + RandShare::new(rand_config.author(), rand_metadata, share) + } + + fn aggregate<'a>( + shares: impl Iterator>, + rand_config: &RandConfig, + rand_metadata: RandMetadata, + ) -> Randomness + where + Self: Sized, + { + let timer = std::time::Instant::now(); + let mut apks_and_proofs = vec![]; + for share in shares { + let id = *rand_config + .validator + .address_to_validator_index() + .get(share.author()) + .unwrap(); + let apk = rand_config.get_certified_apk(share.author()).unwrap(); // needs to have apk to verify the share + apks_and_proofs.push((Player { id }, apk.clone(), share.share().share)); + } + + let proof = WVUF::aggregate_shares(&rand_config.wconfig, &apks_and_proofs); + let pool = + spawn_rayon_thread_pool("wvuf".to_string(), Some(NUM_THREADS_FOR_WVUF_DERIVATION)); + let eval = WVUF::derive_eval( + &rand_config.wconfig, + &rand_config.vuf_pp, + rand_metadata.to_bytes().as_slice(), + &rand_config.get_all_certified_apk(), + &proof, + &pool, + ) + .expect("All APK should exist"); + debug!( + "WVUF derivation time: {} ms, number of threads: {}", + timer.elapsed().as_millis(), + NUM_THREADS_FOR_WVUF_DERIVATION + ); + let eval_bytes = bcs::to_bytes(&eval).unwrap(); + let rand_bytes = Sha3_256::digest(eval_bytes.as_slice()).to_vec(); + Randomness::new(rand_metadata.clone(), rand_bytes) + } +} + +impl TAugmentedData for AugmentedData { + fn generate(rand_config: &RandConfig) -> AugData + where + Self: Sized, + { + let delta = rand_config.get_my_delta().clone(); + rand_config + .add_certified_delta(&rand_config.author(), delta.clone()) + .expect("Add self delta should succeed"); + let data = AugmentedData { + delta: delta.clone(), + }; + AugData::new(rand_config.epoch(), rand_config.author(), data) + } + + fn augment(&self, rand_config: &RandConfig, author: &Author) { + let AugmentedData { delta } = self; + rand_config + .add_certified_delta(author, delta.clone()) + .expect("Add delta should succeed") + } + + fn verify(&self, rand_config: &RandConfig, author: &Author) -> anyhow::Result<()> { + rand_config + .derive_apk(author, self.delta.clone()) + .map(|_| ()) + } +} + +impl TShare for MockShare { fn verify( &self, _rand_config: &RandConfig, @@ -33,7 +166,7 @@ impl Share for MockShare { where Self: Sized, { - RandShare::new(*rand_config.author(), rand_metadata, Self) + RandShare::new(rand_config.author(), rand_metadata, Self) } fn aggregate<'a>( @@ -48,12 +181,12 @@ impl Share for MockShare { } } -impl AugmentedData for MockAugData { +impl TAugmentedData for MockAugData { fn generate(rand_config: &RandConfig) -> AugData where Self: Sized, { - AugData::new(rand_config.epoch(), *rand_config.author(), Self) + AugData::new(rand_config.epoch(), rand_config.author(), Self) } fn augment(&self, _rand_config: &RandConfig, _author: &Author) {} @@ -63,7 +196,7 @@ impl AugmentedData for MockAugData { } } -pub trait Share: +pub trait TShare: Clone + Debug + PartialEq + Send + Sync + Serialize + DeserializeOwned + 'static { fn verify( @@ -86,7 +219,7 @@ pub trait Share: Self: Sized; } -pub trait AugmentedData: +pub trait TAugmentedData: Clone + Debug + PartialEq + Send + Sync + Serialize + DeserializeOwned + 'static { fn generate(rand_config: &RandConfig) -> AugData @@ -112,7 +245,7 @@ pub struct RandShare { share: S, } -impl RandShare { +impl RandShare { pub fn new(author: Author, metadata: RandMetadata, share: S) -> Self { Self { author, @@ -125,6 +258,10 @@ impl RandShare { &self.author } + pub fn share(&self) -> &S { + &self.share + } + pub fn metadata(&self) -> &RandMetadata { &self.metadata } @@ -200,7 +337,7 @@ pub struct AugData { data: D, } -impl AugData { +impl AugData { pub fn new(epoch: u64, author: Author, data: D) -> Self { Self { epoch, @@ -246,7 +383,7 @@ impl AugDataSignature { self.epoch } - pub fn verify( + pub fn verify( &self, author: Author, verifier: &ValidatorVerifier, @@ -266,7 +403,7 @@ pub struct CertifiedAugData { signatures: AggregateSignature, } -impl CertifiedAugData { +impl CertifiedAugData { pub fn new(aug_data: AugData, signatures: AggregateSignature) -> Self { Self { aug_data, @@ -313,20 +450,47 @@ impl CertifiedAugDataAck { #[derive(Clone)] pub struct RandConfig { - epoch: u64, - author: Author, - threshold: u64, - weights: HashMap, + pub author: Author, + pub epoch: u64, + pub validator: ValidatorVerifier, + // public parameters of the weighted VUF + pub vuf_pp: WvufPP, + // key shares for weighted VUF + pub keys: Arc, + // weighted config for weighted VUF + pub wconfig: WeightedConfig, + /// If set, do not perform dequeue (so execution is blocked). This is test-only. + pub block_randomness: bool, +} + +impl Debug for RandConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "RandConfig {{ epoch: {}, author: {}, wconfig: {:?} }}", + self.epoch, self.author, self.wconfig + ) + } } impl RandConfig { - pub fn new(epoch: u64, author: Author, weights: HashMap) -> Self { - let sum = weights.values().sum::(); + pub fn new( + author: Author, + epoch: u64, + validator: ValidatorVerifier, + vuf_pp: WvufPP, + keys: RandKeys, + wconfig: WeightedConfig, + block_randomness: bool, + ) -> Self { Self { - epoch, author, - weights, - threshold: sum * 2 / 3 + 1, + epoch, + validator, + vuf_pp, + keys: Arc::new(keys), + wconfig, + block_randomness, } } @@ -334,18 +498,64 @@ impl RandConfig { self.epoch } - pub fn author(&self) -> &Author { - &self.author + pub fn author(&self) -> Author { + self.author } - pub fn get_peer_weight(&self, author: &Author) -> u64 { + pub fn get_id(&self, peer: &Author) -> usize { *self - .weights - .get(author) - .expect("Author should exist after verify") + .validator + .address_to_validator_index() + .get(peer) + .unwrap() + } + + pub fn get_certified_apk(&self, peer: &Author) -> Option<&APK> { + let index = self.get_id(peer); + self.keys.certified_apks[index].get() + } + + pub fn get_all_certified_apk(&self) -> Vec> { + self.keys + .certified_apks + .iter() + .map(|cell| cell.get().cloned()) + .collect() + } + + pub fn add_certified_apk(&self, peer: &Author, apk: APK) -> anyhow::Result<()> { + let index = self.get_id(peer); + self.keys.add_certified_apk(index, apk) + } + + fn derive_apk(&self, peer: &Author, delta: Delta) -> anyhow::Result { + let apk = WVUF::augment_pubkey(&self.vuf_pp, self.get_pk_share(peer).clone(), delta)?; + Ok(apk) + } + + pub fn add_certified_delta(&self, peer: &Author, delta: Delta) -> anyhow::Result<()> { + let apk = self.derive_apk(peer, delta)?; + self.add_certified_apk(peer, apk)?; + Ok(()) + } + + pub fn get_my_delta(&self) -> &Delta { + WVUF::get_public_delta(&self.keys.apk) + } + + pub fn get_pk_share(&self, peer: &Author) -> &PKShare { + let index = self.get_id(peer); + &self.keys.pk_shares[index] + } + + pub fn get_peer_weight(&self, peer: &Author) -> u64 { + let player = Player { + id: self.get_id(peer), + }; + self.wconfig.get_player_weight(&player) as u64 } - pub fn threshold_weight(&self) -> u64 { - self.threshold + pub fn threshold(&self) -> u64 { + self.wconfig.get_threshold_weight() as u64 } } diff --git a/consensus/src/test_utils/mock_execution_client.rs b/consensus/src/test_utils/mock_execution_client.rs index 82d30670b84a8..579812c75207b 100644 --- a/consensus/src/test_utils/mock_execution_client.rs +++ b/consensus/src/test_utils/mock_execution_client.rs @@ -4,16 +4,18 @@ use crate::{ error::StateSyncError, - network::IncomingCommitRequest, + network::{IncomingCommitRequest, IncomingRandGenRequest}, payload_manager::PayloadManager, pipeline::{ buffer_manager::OrderedBlocks, execution_client::TExecutionClient, signing_phase::CommitSignerProvider, }, + rand::rand_gen::types::RandConfig, state_replication::StateComputerCommitCallBackType, test_utils::mock_storage::MockStorage, }; use anyhow::{format_err, Result}; +use aptos_channels::aptos_channel; use aptos_consensus_types::{common::Payload, pipelined_block::PipelinedBlock}; use aptos_crypto::HashValue; use aptos_executor_types::ExecutorResult; @@ -22,7 +24,7 @@ use aptos_logger::prelude::*; use aptos_types::{ epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, - on_chain_config::{Features, OnChainExecutionConfig}, + on_chain_config::{Features, OnChainConsensusConfig, OnChainExecutionConfig}, transaction::SignedTransaction, }; use futures::{channel::mpsc, SinkExt}; @@ -92,8 +94,11 @@ impl TExecutionClient for MockExecutionClient { _epoch_state: Arc, _commit_signer_provider: Arc, _payload_manager: Arc, + _onchain_consensus_config: &OnChainConsensusConfig, _onchain_execution_config: &OnChainExecutionConfig, _features: &Features, + _rand_config: Option, + _rand_msg_rx: aptos_channel::Receiver, ) { } diff --git a/consensus/src/twins/twins_node.rs b/consensus/src/twins/twins_node.rs index b47bc28997db2..3456cd3b58357 100644 --- a/consensus/src/twins/twins_node.rs +++ b/consensus/src/twins/twins_node.rs @@ -11,6 +11,7 @@ use crate::{ payload_manager::PayloadManager, pipeline::buffer_manager::OrderedBlocks, quorum_store::quorum_store_db::MockQuorumStoreDB, + rand::rand_gen::storage::in_memory::InMemRandDb, test_utils::{mock_execution_client::MockExecutionClient, MockStorage}, util::time_service::ClockTimeService, }; @@ -162,6 +163,7 @@ impl SMRNode { bounded_executor, aptos_time_service::TimeService::real(), vtxn_pool, + Arc::new(InMemRandDb::new()), ); let (network_task, network_receiver) = NetworkTask::new(network_service_events, self_receiver); diff --git a/crates/aptos-genesis/src/builder.rs b/crates/aptos-genesis/src/builder.rs index 2f9992df2358a..7ad40777cbcd0 100644 --- a/crates/aptos-genesis/src/builder.rs +++ b/crates/aptos-genesis/src/builder.rs @@ -27,7 +27,7 @@ use aptos_keygen::KeyGen; use aptos_logger::prelude::*; use aptos_types::{ chain_id::ChainId, - on_chain_config::{GasScheduleV2, OnChainConsensusConfig, OnChainExecutionConfig}, + on_chain_config::{Features, GasScheduleV2, OnChainConsensusConfig, OnChainExecutionConfig}, transaction::Transaction, waypoint::Waypoint, }; @@ -431,6 +431,7 @@ pub struct GenesisConfiguration { pub consensus_config: OnChainConsensusConfig, pub execution_config: OnChainExecutionConfig, pub gas_schedule: GasScheduleV2, + pub initial_features_override: Option, } pub type InitConfigFn = Arc; @@ -648,6 +649,7 @@ impl Builder { consensus_config: OnChainConsensusConfig::default_for_genesis(), execution_config: OnChainExecutionConfig::default_for_genesis(), gas_schedule: default_gas_schedule(), + initial_features_override: None, }; if let Some(init_genesis_config) = &self.init_genesis_config { (init_genesis_config)(&mut genesis_config); diff --git a/crates/aptos-genesis/src/lib.rs b/crates/aptos-genesis/src/lib.rs index 73633e2d2c73c..68d61db174eee 100644 --- a/crates/aptos-genesis/src/lib.rs +++ b/crates/aptos-genesis/src/lib.rs @@ -23,7 +23,7 @@ use aptos_storage_interface::DbReaderWriter; use aptos_temppath::TempPath; use aptos_types::{ chain_id::ChainId, - on_chain_config::{GasScheduleV2, OnChainConsensusConfig, OnChainExecutionConfig}, + on_chain_config::{Features, GasScheduleV2, OnChainConsensusConfig, OnChainExecutionConfig}, transaction::Transaction, waypoint::Waypoint, }; @@ -70,6 +70,7 @@ pub struct GenesisInfo { pub consensus_config: OnChainConsensusConfig, pub execution_config: OnChainExecutionConfig, pub gas_schedule: GasScheduleV2, + pub initial_features_override: Option, } impl GenesisInfo { @@ -106,6 +107,7 @@ impl GenesisInfo { consensus_config: genesis_config.consensus_config.clone(), execution_config: genesis_config.execution_config.clone(), gas_schedule: genesis_config.gas_schedule.clone(), + initial_features_override: genesis_config.initial_features_override.clone(), }) } @@ -138,6 +140,7 @@ impl GenesisInfo { voting_power_increase_limit: self.voting_power_increase_limit, employee_vesting_start: 1663456089, employee_vesting_period_duration: 5 * 60, // 5 minutes + initial_features_override: self.initial_features_override.clone(), }, &self.consensus_config, &self.execution_config, diff --git a/crates/aptos-genesis/src/mainnet.rs b/crates/aptos-genesis/src/mainnet.rs index 7e1aa674dd72d..068f919b170e1 100644 --- a/crates/aptos-genesis/src/mainnet.rs +++ b/crates/aptos-genesis/src/mainnet.rs @@ -10,7 +10,9 @@ use aptos_db::AptosDB; use aptos_framework::ReleaseBundle; use aptos_storage_interface::DbReaderWriter; use aptos_temppath::TempPath; -use aptos_types::{chain_id::ChainId, transaction::Transaction, waypoint::Waypoint}; +use aptos_types::{ + chain_id::ChainId, on_chain_config::Features, transaction::Transaction, waypoint::Waypoint, +}; use aptos_vm::AptosVM; use aptos_vm_genesis::{AccountBalance, EmployeePool, ValidatorWithCommissionRate}; @@ -54,6 +56,8 @@ pub struct MainnetGenesisInfo { employee_vesting_start: u64, /// Duration of each vesting period (in seconds). employee_vesting_period_duration: u64, + + initial_features_override: Option, } impl MainnetGenesisInfo { @@ -93,6 +97,7 @@ impl MainnetGenesisInfo { voting_power_increase_limit: genesis_config.voting_power_increase_limit, employee_vesting_start, employee_vesting_period_duration, + initial_features_override: genesis_config.initial_features_override.clone(), }) } @@ -126,6 +131,7 @@ impl MainnetGenesisInfo { voting_power_increase_limit: self.voting_power_increase_limit, employee_vesting_start: self.employee_vesting_start, employee_vesting_period_duration: self.employee_vesting_period_duration, + initial_features_override: self.initial_features_override.clone(), }, ) } diff --git a/crates/aptos-telemetry-service/src/tests/test_context.rs b/crates/aptos-telemetry-service/src/tests/test_context.rs index af6c691df6d07..be3c1b797a74a 100644 --- a/crates/aptos-telemetry-service/src/tests/test_context.rs +++ b/crates/aptos-telemetry-service/src/tests/test_context.rs @@ -2,15 +2,19 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - context::{ClientTuple, Context, GroupedMetricsClients, JsonWebTokenService, PeerStoreTuple}, + context::{ + ClientTuple, Context, GroupedMetricsClients, JsonWebTokenService, PeerStoreTuple, + RemoteNodeConfigProvider, + }, index, CustomEventConfig, LogIngestConfig, MetricsEndpointsConfig, TelemetryServiceConfig, }; use aptos_crypto::{x25519, Uniform}; +use aptos_infallible::RwLock; use aptos_rest_client::aptos_api_types::mime_types; use rand::SeedableRng; use reqwest::header::AUTHORIZATION; use serde_json::Value; -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use warp::{ http::{header::CONTENT_TYPE, Response}, hyper::body::Bytes, @@ -36,7 +40,7 @@ pub async fn new_test_context() -> TestContext { peer_identities: HashMap::new(), metrics_endpoints_config: MetricsEndpointsConfig::default_for_test(), humio_ingest_config: LogIngestConfig::default_for_test(), - remote_config_url: todo!(), + remote_config_url: "".to_string(), //TODO: i'm only making it build... }; let peers = PeerStoreTuple::default(); @@ -51,7 +55,7 @@ pub async fn new_test_context() -> TestContext { jwt_service, HashMap::new(), HashMap::new(), - todo!(), + Arc::new(RwLock::new(RemoteNodeConfigProvider::new())), //TODO: i'm just making it build... ), ) } diff --git a/crates/aptos-telemetry/Cargo.toml b/crates/aptos-telemetry/Cargo.toml index 3c48e32fdbbd0..3026581c23b8a 100644 --- a/crates/aptos-telemetry/Cargo.toml +++ b/crates/aptos-telemetry/Cargo.toml @@ -40,6 +40,7 @@ reqwest-middleware = { workspace = true } reqwest-retry = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +serde_yaml = { workspace = true } sysinfo = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true } @@ -47,9 +48,8 @@ tokio-retry = { workspace = true } tokio-stream = { workspace = true } url = { workspace = true } uuid = { workspace = true } -serde_yaml = { workspace = true } [dev-dependencies] -httpmock = { workspace = true } -claims = { workspace = true } aptos-temppath = { workspace = true } +claims = { workspace = true } +httpmock = { workspace = true } diff --git a/crates/aptos-telemetry/src/sender.rs b/crates/aptos-telemetry/src/sender.rs index 444783ee893c1..64a9ce36a6b26 100644 --- a/crates/aptos-telemetry/src/sender.rs +++ b/crates/aptos-telemetry/src/sender.rs @@ -412,7 +412,7 @@ impl TelemetrySender { .expect("unable to build telemetry path for config/node"), ), ) - .and_then(|res| error_for_status_with_body(res)) + .and_then(error_for_status_with_body) .and_then(|res| async move { Ok(res.json::>().await?) }) .await; debug!("get remote config response {:?}", response); diff --git a/crates/aptos/src/genesis/mod.rs b/crates/aptos/src/genesis/mod.rs index 9c9155645175b..830eddfa51bd0 100644 --- a/crates/aptos/src/genesis/mod.rs +++ b/crates/aptos/src/genesis/mod.rs @@ -257,6 +257,7 @@ pub fn fetch_mainnet_genesis_info(git_options: GitOptions) -> CliTypedResult CliTypedResult PathBuf { + pub fn aptos_framework_dir() -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("..") .join("..") diff --git a/crates/indexer/src/indexer/fetcher.rs b/crates/indexer/src/indexer/fetcher.rs index c565ddf516eab..780ec643353d6 100644 --- a/crates/indexer/src/indexer/fetcher.rs +++ b/crates/indexer/src/indexer/fetcher.rs @@ -252,7 +252,13 @@ async fn fetch_nexts( // Do not update block_height if first block is block metadata if ind > 0 { // Update the timestamp if the next block occurs - if let Some(txn) = raw_txn.transaction.try_as_block_metadata() { + if let Some(txn) = raw_txn.transaction.try_as_block_metadata_ext() { + timestamp = txn.timestamp_usecs(); + epoch = txn.epoch(); + epoch_bcs = aptos_api_types::U64::from(epoch); + block_height += 1; + block_height_bcs = aptos_api_types::U64::from(block_height); + } else if let Some(txn) = raw_txn.transaction.try_as_block_metadata() { timestamp = txn.timestamp_usecs(); epoch = txn.epoch(); epoch_bcs = aptos_api_types::U64::from(epoch); diff --git a/dkg/Cargo.toml b/dkg/Cargo.toml index dc338ebe86ca1..5ecd984e3bdb0 100644 --- a/dkg/Cargo.toml +++ b/dkg/Cargo.toml @@ -36,6 +36,7 @@ aptos-validator-transaction-pool = { workspace = true } async-trait = { workspace = true } bcs = { workspace = true } bytes = { workspace = true } +fail = { workspace = true } futures = { workspace = true } futures-channel = { workspace = true } futures-util = { workspace = true } diff --git a/dkg/src/agg_trx_producer.rs b/dkg/src/agg_trx_producer.rs index 7d76caab5c294..b42ed929234f6 100644 --- a/dkg/src/agg_trx_producer.rs +++ b/dkg/src/agg_trx_producer.rs @@ -4,11 +4,13 @@ use crate::{ transcript_aggregation::TranscriptAggregationState, types::DKGTranscriptRequest, DKGMessage, }; use aptos_channels::aptos_channel::Sender; +use aptos_logger::info; use aptos_reliable_broadcast::ReliableBroadcast; use aptos_types::{dkg::DKGTrait, epoch_state::EpochState}; use futures::future::AbortHandle; use futures_util::future::Abortable; -use std::sync::Arc; +use move_core_types::account_address::AccountAddress; +use std::{sync::Arc, time::Duration}; use tokio_retry::strategy::ExponentialBackoff; /// A sub-process of the whole DKG process. @@ -18,6 +20,8 @@ use tokio_retry::strategy::ExponentialBackoff; pub trait TAggTranscriptProducer: Send + Sync { fn start_produce( &self, + start_time: Duration, + my_addr: AccountAddress, epoch_state: Arc, dkg_config: S::PublicParams, agg_trx_tx: Option>, @@ -40,17 +44,38 @@ impl AggTranscriptProducer { impl TAggTranscriptProducer for AggTranscriptProducer { fn start_produce( &self, + start_time: Duration, + my_addr: AccountAddress, epoch_state: Arc, params: DKG::PublicParams, agg_trx_tx: Option>, ) -> AbortHandle { + let epoch = epoch_state.epoch; let rb = self.reliable_broadcast.clone(); let req = DKGTranscriptRequest::new(epoch_state.epoch); - let agg_state = Arc::new(TranscriptAggregationState::::new(params, epoch_state)); + let agg_state = Arc::new(TranscriptAggregationState::::new( + start_time, + my_addr, + params, + epoch_state, + )); let task = async move { let agg_trx = rb.broadcast(req, agg_state).await; - if let Some(tx) = agg_trx_tx { - let _ = tx.push((), agg_trx); // If the `DKGManager` was dropped, this send will fail by design. + info!( + epoch = epoch, + my_addr = my_addr, + "[DKG] aggregated transcript locally" + ); + if let Err(e) = agg_trx_tx + .expect("[DKG] agg_trx_tx should be available") + .push((), agg_trx) + { + // If the `DKGManager` was dropped, this send will fail by design. + info!( + epoch = epoch, + my_addr = my_addr, + "[DKG] Failed to send aggregated transcript to DKGManager, maybe DKGManager stopped and channel dropped: {:?}", e + ); } }; let (abort_handle, abort_registration) = AbortHandle::new_pair(); @@ -66,6 +91,8 @@ pub struct DummyAggTranscriptProducer {} impl TAggTranscriptProducer for DummyAggTranscriptProducer { fn start_produce( &self, + _start_time: Duration, + _my_addr: AccountAddress, _epoch_state: Arc, _dkg_config: DKG::PublicParams, _agg_trx_tx: Option>, diff --git a/dkg/src/counters.rs b/dkg/src/counters.rs index 7c490550bed79..da128591e19b3 100644 --- a/dkg/src/counters.rs +++ b/dkg/src/counters.rs @@ -1,6 +1,6 @@ // Copyright © Aptos Foundation -use aptos_metrics_core::{register_int_gauge, IntGauge}; +use aptos_metrics_core::{register_histogram_vec, register_int_gauge, HistogramVec, IntGauge}; use once_cell::sync::Lazy; /// Count of the pending messages sent to itself in the channel @@ -11,3 +11,12 @@ pub static PENDING_SELF_MESSAGES: Lazy = Lazy::new(|| { ) .unwrap() }); + +pub static DKG_STAGE_SECONDS: Lazy = Lazy::new(|| { + register_histogram_vec!( + "aptos_dkg_session_stage_seconds", + "How long it takes to reach different DKG stages", + &["dealer", "stage"] + ) + .unwrap() +}); diff --git a/dkg/src/dkg_manager/mod.rs b/dkg/src/dkg_manager/mod.rs index c0193bc23ddb0..f59b36c4ad4c7 100644 --- a/dkg/src/dkg_manager/mod.rs +++ b/dkg/src/dkg_manager/mod.rs @@ -1,9 +1,13 @@ // Copyright © Aptos Foundation -use crate::{agg_trx_producer::TAggTranscriptProducer, network::IncomingRpcRequest, DKGMessage}; +use crate::{ + agg_trx_producer::TAggTranscriptProducer, counters::DKG_STAGE_SECONDS, + network::IncomingRpcRequest, DKGMessage, +}; use anyhow::{anyhow, bail, ensure, Result}; use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_crypto::Uniform; -use aptos_logger::error; +use aptos_infallible::duration_since_epoch; +use aptos_logger::{debug, error, info}; use aptos_types::{ dkg::{ DKGSessionMetadata, DKGSessionState, DKGStartEvent, DKGTrait, DKGTranscript, @@ -13,62 +17,35 @@ use aptos_types::{ validator_txn::{Topic, ValidatorTransaction}, }; use aptos_validator_transaction_pool::{TxnGuard, VTxnPoolState}; +use fail::fail_point; use futures_channel::oneshot; use futures_util::{future::AbortHandle, FutureExt, StreamExt}; use move_core_types::account_address::AccountAddress; use rand::{prelude::StdRng, thread_rng, SeedableRng}; -use std::sync::Arc; +use std::{sync::Arc, time::Duration}; -#[allow(dead_code)] #[derive(Clone, Debug)] -enum InnerState { +enum InnerState { NotStarted, InProgress { - start_time_us: u64, - public_params: DKG::PublicParams, + start_time: Duration, my_transcript: DKGTranscript, abort_handle: AbortHandle, }, Finished { vtxn_guard: TxnGuard, - start_time_us: u64, + start_time: Duration, my_transcript: DKGTranscript, - pull_confirmed: bool, + proposed: bool, }, } -impl InnerState { - fn variant_name(&self) -> &str { - match self { - InnerState::NotStarted => "NotStarted", - InnerState::InProgress { .. } => "InProgress", - InnerState::Finished { .. } => "Finished", - } - } - - #[cfg(test)] - pub fn my_node_cloned(&self) -> DKGTranscript { - match self { - InnerState::NotStarted => panic!("my_node unavailable"), - InnerState::InProgress { - my_transcript: my_node, - .. - } - | InnerState::Finished { - my_transcript: my_node, - .. - } => my_node.clone(), - } - } -} - -impl Default for InnerState { +impl Default for InnerState { fn default() -> Self { Self::NotStarted } } -#[allow(dead_code)] pub struct DKGManager { dealer_sk: Arc, my_index: usize, @@ -85,7 +62,26 @@ pub struct DKGManager { // Control states. stopped: bool, - state: InnerState, + state: InnerState, +} + +impl InnerState { + fn variant_name(&self) -> &str { + match self { + InnerState::NotStarted => "NotStarted", + InnerState::InProgress { .. } => "InProgress", + InnerState::Finished { .. } => "Finished", + } + } + + #[cfg(test)] + pub fn my_node_cloned(&self) -> DKGTranscript { + match self { + InnerState::NotStarted => panic!("my_node unavailable"), + InnerState::InProgress { my_transcript, .. } + | InnerState::Finished { my_transcript, .. } => my_transcript.clone(), + } + } } impl DKGManager { @@ -101,8 +97,8 @@ impl DKGManager { aptos_channel::new(QueueStyle::KLAST, 1, None); Self { dealer_sk, - my_index, my_addr, + my_index, epoch_state, vtxn_pool, agg_trx_tx: None, @@ -117,60 +113,128 @@ impl DKGManager { pub async fn run( mut self, in_progress_session: Option, - dkg_start_event_rx: oneshot::Receiver, + mut dkg_start_event_rx: aptos_channel::Receiver<(), DKGStartEvent>, mut rpc_msg_rx: aptos_channel::Receiver< AccountAddress, (AccountAddress, IncomingRpcRequest), >, close_rx: oneshot::Receiver>, ) { + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr.to_hex().as_str(), + "[DKG] DKGManager started." + ); + let mut interval = tokio::time::interval(Duration::from_millis(5000)); + + let (agg_trx_tx, mut agg_trx_rx) = aptos_channel::new(QueueStyle::KLAST, 1, None); + self.agg_trx_tx = Some(agg_trx_tx); + if let Some(session_state) = in_progress_session { let DKGSessionState { - metadata, start_time_us, + metadata, .. } = session_state; - self.setup_deal_broadcast(start_time_us, &metadata) - .await - .expect("setup_deal_broadcast() should be infallible"); - } - let (agg_trx_tx, mut agg_trx_rx) = aptos_channel::new(QueueStyle::KLAST, 1, None); - self.agg_trx_tx = Some(agg_trx_tx); + if metadata.dealer_epoch == self.epoch_state.epoch { + info!( + epoch = self.epoch_state.epoch, + "Found unfinished and current DKG session. Continuing it." + ); + if let Err(e) = self.setup_deal_broadcast(start_time_us, &metadata).await { + error!(epoch = self.epoch_state.epoch, "dkg resumption failed: {e}"); + } + } else { + info!( + cur_epoch = self.epoch_state.epoch, + dealer_epoch = metadata.dealer_epoch, + "Found unfinished but stale DKG session. Ignoring it." + ); + } + } - let mut dkg_start_event_rx = dkg_start_event_rx.into_stream(); let mut close_rx = close_rx.into_stream(); while !self.stopped { let handling_result = tokio::select! { dkg_start_event = dkg_start_event_rx.select_next_some() => { - self.process_dkg_start_event(dkg_start_event.ok()).await + self.process_dkg_start_event(dkg_start_event) + .await + .map_err(|e|anyhow!("[DKG] process_dkg_start_event failed: {e}")) }, (_sender, msg) = rpc_msg_rx.select_next_some() => { - self.process_peer_rpc_msg(msg).await + self.process_peer_rpc_msg(msg) + .await + .map_err(|e|anyhow!("[DKG] process_peer_rpc_msg failed: {e}")) }, - agg_node = agg_trx_rx.select_next_some() => { - self.process_aggregated_transcript(agg_node).await + agg_transcript = agg_trx_rx.select_next_some() => { + self.process_aggregated_transcript(agg_transcript) + .await + .map_err(|e|anyhow!("[DKG] process_aggregated_transcript failed: {e}")) + }, dkg_txn = self.pull_notification_rx.select_next_some() => { - self.process_dkg_txn_pulled_notification(dkg_txn).await + self.process_dkg_txn_pulled_notification(dkg_txn) + .await + .map_err(|e|anyhow!("[DKG] process_dkg_txn_pulled_notification failed: {e}")) }, close_req = close_rx.select_next_some() => { self.process_close_cmd(close_req.ok()) - } + }, + _ = interval.tick().fuse() => { + self.observe() + }, }; if let Err(e) = handling_result { - error!("{}", e); + error!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr.to_hex().as_str(), + "[DKG] DKGManager handling error: {e}" + ); } } + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr.to_hex().as_str(), + "[DKG] DKGManager finished." + ); + } + + fn observe(&self) -> Result<()> { + debug!("[DKG] dkg_manager_state={:?}", self.state); + Ok(()) } /// On a CLOSE command from epoch manager, do clean-up. fn process_close_cmd(&mut self, ack_tx: Option>) -> Result<()> { self.stopped = true; - if let InnerState::InProgress { abort_handle, .. } = &self.state { - abort_handle.abort(); + match std::mem::take(&mut self.state) { + InnerState::NotStarted => {}, + InnerState::InProgress { abort_handle, .. } => { + abort_handle.abort(); + }, + InnerState::Finished { + vtxn_guard, + start_time, + .. + } => { + let epoch_change_time = duration_since_epoch(); + let secs_since_dkg_start = + epoch_change_time.as_secs_f64() - start_time.as_secs_f64(); + DKG_STAGE_SECONDS + .with_label_values(&[self.my_addr.to_hex().as_str(), "epoch_change"]) + .observe(secs_since_dkg_start); + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr, + secs_since_dkg_start = secs_since_dkg_start, + "[DKG] txn executed and entering new epoch.", + ); + + drop(vtxn_guard); + }, } if let Some(tx) = ack_tx { @@ -185,13 +249,33 @@ impl DKGManager { &mut self, _txn: Arc, ) -> Result<()> { - if let InnerState::Finished { pull_confirmed, .. } = &mut self.state { - if !*pull_confirmed { - // TODO(zjma): metric DKG_AGG_NODE_PROPOSED - } - *pull_confirmed = true; + match &mut self.state { + InnerState::Finished { + start_time, + proposed, + .. + } => { + if !*proposed { + *proposed = true; + let proposed_time = duration_since_epoch(); + let secs_since_dkg_start = + proposed_time.as_secs_f64() - start_time.as_secs_f64(); + DKG_STAGE_SECONDS + .with_label_values(&[self.my_addr.to_hex().as_str(), "proposed"]) + .observe(secs_since_dkg_start); + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr, + secs_since_dkg_start = secs_since_dkg_start, + "[DKG] aggregated transcript proposed by consensus.", + ); + } + Ok(()) + }, + _ => { + bail!("[DKG] pull notification only expected in finished state"); + }, } - Ok(()) } /// Calculate DKG config. Deal a transcript. Start broadcasting the transcript. @@ -204,49 +288,72 @@ impl DKGManager { start_time_us: u64, dkg_session_metadata: &DKGSessionMetadata, ) -> Result<()> { - self.state = match &self.state { - InnerState::NotStarted => { - let public_params = DKG::new_public_params(dkg_session_metadata); - let mut rng = if cfg!(feature = "smoke-test") { - StdRng::from_seed(self.my_addr.into_bytes()) - } else { - StdRng::from_rng(thread_rng()).unwrap() - }; - let input_secret = DKG::InputSecret::generate(&mut rng); - - let trx = DKG::generate_transcript( - &mut rng, - &public_params, - &input_secret, - self.my_index as u64, - &self.dealer_sk, - ); + ensure!( + matches!(&self.state, InnerState::NotStarted), + "transcript already dealt" + ); + if dkg_session_metadata.block_dkg { + bail!("DKG aborted per config"); + } + let dkg_start_time = Duration::from_micros(start_time_us); + let deal_start = duration_since_epoch(); + let secs_since_dkg_start = deal_start.as_secs_f64() - dkg_start_time.as_secs_f64(); + DKG_STAGE_SECONDS + .with_label_values(&[self.my_addr.to_hex().as_str(), "deal_start"]) + .observe(secs_since_dkg_start); + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr, + secs_since_dkg_start = secs_since_dkg_start, + "[DKG] Deal transcript started.", + ); + let public_params = DKG::new_public_params(dkg_session_metadata); + let mut rng = if cfg!(feature = "smoke-test") { + StdRng::from_seed(self.my_addr.into_bytes()) + } else { + StdRng::from_rng(thread_rng()).unwrap() + }; + let input_secret = DKG::InputSecret::generate(&mut rng); - let dkg_transcript = DKGTranscript::new( - self.epoch_state.epoch, - self.my_addr, - bcs::to_bytes(&trx).map_err(|e| { - anyhow!("setup_deal_broadcast failed with trx serialization error: {e}") - })?, - ); + let trx = DKG::generate_transcript( + &mut rng, + &public_params, + &input_secret, + self.my_index as u64, + &self.dealer_sk, + ); - // TODO(zjma): DKG_NODE_READY metric + let my_transcript = DKGTranscript::new( + self.epoch_state.epoch, + self.my_addr, + bcs::to_bytes(&trx).map_err(|e| anyhow!("transcript serialization error: {e}"))?, + ); - let abort_handle = self.agg_trx_producer.start_produce( - self.epoch_state.clone(), - public_params.clone(), - self.agg_trx_tx.clone(), - ); + let deal_finish = duration_since_epoch(); + let secs_since_dkg_start = deal_finish.as_secs_f64() - dkg_start_time.as_secs_f64(); + DKG_STAGE_SECONDS + .with_label_values(&[self.my_addr.to_hex().as_str(), "deal_finish"]) + .observe(secs_since_dkg_start); + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr, + secs_since_dkg_start = secs_since_dkg_start, + "[DKG] Deal transcript finished.", + ); - // Switch to the next stage. - InnerState::InProgress { - start_time_us, - public_params, - my_transcript: dkg_transcript, - abort_handle, - } - }, - _ => unreachable!(), // `setup_deal_broadcast` is called only when DKG state is `NotStarted`. + let abort_handle = self.agg_trx_producer.start_produce( + dkg_start_time, + self.my_addr, + self.epoch_state.clone(), + public_params.clone(), + self.agg_trx_tx.clone(), + ); + + // Switch to the next stage. + self.state = InnerState::InProgress { + start_time: dkg_start_time, + my_transcript, + abort_handle, }; Ok(()) @@ -254,49 +361,75 @@ impl DKGManager { /// On a locally aggregated transcript, put it into the validator txn pool and update inner states. async fn process_aggregated_transcript(&mut self, agg_trx: DKG::Transcript) -> Result<()> { + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr, + "[DKG] Processing locally aggregated transcript." + ); self.state = match std::mem::take(&mut self.state) { InnerState::InProgress { - start_time_us, - my_transcript: my_node, + start_time, + my_transcript, .. } => { - // TODO(zjma): metric DKG_AGG_NODE_READY + let agg_transcript_ready_time = duration_since_epoch(); + let secs_since_dkg_start = + agg_transcript_ready_time.as_secs_f64() - start_time.as_secs_f64(); + DKG_STAGE_SECONDS + .with_label_values(&[self.my_addr.to_hex().as_str(), "agg_transcript_ready"]) + .observe(secs_since_dkg_start); + let txn = ValidatorTransaction::DKGResult(DKGTranscript { metadata: DKGTranscriptMetadata { epoch: self.epoch_state.epoch, author: self.my_addr, }, - transcript_bytes: bcs::to_bytes(&agg_trx).map_err(|e|anyhow!("process_aggregated_transcript failed with trx serialization error: {e}"))?, + transcript_bytes: bcs::to_bytes(&agg_trx) + .map_err(|e| anyhow!("transcript serialization error: {e}"))?, }); let vtxn_guard = self.vtxn_pool.put( Topic::DKG, Arc::new(txn), Some(self.pull_notification_tx.clone()), ); + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr, + "[DKG] aggregated transcript put into vtxn pool." + ); InnerState::Finished { vtxn_guard, - start_time_us, - my_transcript: my_node, - pull_confirmed: false, + start_time, + my_transcript, + proposed: false, } }, - _ => bail!("process agg trx failed with invalid inner state"), + _ => bail!("[DKG] aggregated transcript only expected during DKG"), }; Ok(()) } - /// On a DKG start event, execute DKG. - async fn process_dkg_start_event(&mut self, maybe_event: Option) -> Result<()> { - if let Some(event) = maybe_event { - let DKGStartEvent { - session_metadata, - start_time_us, - } = event; - ensure!(self.epoch_state.epoch == session_metadata.dealer_epoch); - self.setup_deal_broadcast(start_time_us, &session_metadata) - .await?; - } - Ok(()) + async fn process_dkg_start_event(&mut self, event: DKGStartEvent) -> Result<()> { + info!( + epoch = self.epoch_state.epoch, + my_addr = self.my_addr, + "[DKG] Processing DKGStart event." + ); + fail_point!("dkg::process_dkg_start_event"); + let DKGStartEvent { + session_metadata, + start_time_us, + } = event; + ensure!( + matches!(&self.state, InnerState::NotStarted), + "[DKG] dkg already started" + ); + ensure!( + self.epoch_state.epoch == session_metadata.dealer_epoch, + "[DKG] event not for current epoch" + ); + self.setup_deal_broadcast(start_time_us, &session_metadata) + .await } /// Process an RPC request from DKG peers. @@ -306,24 +439,17 @@ impl DKGManager { mut response_sender, .. } = req; - ensure!(msg.epoch() == self.epoch_state.epoch); + ensure!( + msg.epoch() == self.epoch_state.epoch, + "[DKG] msg not for current epoch" + ); let response = match (&self.state, &msg) { - ( - InnerState::Finished { - my_transcript: my_node, - .. - }, - DKGMessage::NodeRequest(_), - ) - | ( - InnerState::InProgress { - my_transcript: my_node, - .. - }, - DKGMessage::NodeRequest(_), - ) => Ok(DKGMessage::NodeResponse(my_node.clone())), + (InnerState::Finished { my_transcript, .. }, DKGMessage::TranscriptRequest(_)) + | (InnerState::InProgress { my_transcript, .. }, DKGMessage::TranscriptRequest(_)) => { + Ok(DKGMessage::TranscriptResponse(my_transcript.clone())) + }, _ => Err(anyhow!( - "msg {:?} unexpected in state {:?}", + "[DKG] msg {:?} unexpected in state {:?}", msg.name(), self.state.variant_name() )), diff --git a/dkg/src/dkg_manager/tests.rs b/dkg/src/dkg_manager/tests.rs index 8e48a6227df41..44aa7cdd70716 100644 --- a/dkg/src/dkg_manager/tests.rs +++ b/dkg/src/dkg_manager/tests.rs @@ -80,21 +80,28 @@ async fn test_dkg_state_transition() { // In state `NotStarted`, DKGManager should accept `DKGStartEvent`: // it should record start time, compute its own node, and enter state `InProgress`. - let handle_result = dkg_manager - .process_dkg_start_event(Some(DKGStartEvent { - session_metadata: DKGSessionMetadata { - dealer_epoch: 999, - dealer_validator_set: validator_consensus_info_move_structs.clone(), - target_validator_set: validator_consensus_info_move_structs.clone(), - }, - start_time_us: 1700000000000000, - })) - .await; + let start_time_1 = Duration::from_secs(1700000000); + let event = DKGStartEvent { + session_metadata: DKGSessionMetadata { + dealer_epoch: 999, + dealer_validator_set: validator_consensus_info_move_structs.clone(), + target_validator_set: validator_consensus_info_move_structs.clone(), + block_dkg: false, + block_randomness: false, + }, + start_time_us: start_time_1.as_micros() as u64, + }; + let handle_result = dkg_manager.process_dkg_start_event(event.clone()).await; assert!(handle_result.is_ok()); assert!( - matches!(&dkg_manager.state, InnerState::InProgress { start_time_us, my_transcript, .. } if *start_time_us == 1700000000000000 && my_transcript.metadata == DKGTranscriptMetadata{ epoch: 999, author: addrs[0]}) + matches!(&dkg_manager.state, InnerState::InProgress { start_time, my_transcript, .. } if *start_time == start_time_1 && my_transcript.metadata == DKGTranscriptMetadata{ epoch: 999, author: addrs[0]}) ); + // 2nd `DKGStartEvent` should be rejected. + let handle_result = dkg_manager.process_dkg_start_event(event).await; + println!("{:?}", handle_result); + assert!(handle_result.is_err()); + // In state `InProgress`, DKGManager should respond to `DKGNodeRequest` with its own node. let rpc_node_request = new_rpc_node_request(999, addrs[3], rpc_response_collector.clone()); let handle_result = dkg_manager.process_peer_rpc_msg(rpc_node_request).await; @@ -104,7 +111,9 @@ async fn test_dkg_state_transition() { .map(anyhow::Result::unwrap) .collect::>(); assert_eq!( - vec![DKGMessage::NodeResponse(dkg_manager.state.my_node_cloned())], + vec![DKGMessage::TranscriptResponse( + dkg_manager.state.my_node_cloned() + )], last_responses ); assert!(matches!(&dkg_manager.state, InnerState::InProgress { .. })); @@ -143,7 +152,9 @@ async fn test_dkg_state_transition() { .map(anyhow::Result::unwrap) .collect::>(); assert_eq!( - vec![DKGMessage::NodeResponse(dkg_manager.state.my_node_cloned())], + vec![DKGMessage::TranscriptResponse( + dkg_manager.state.my_node_cloned() + )], last_responses ); assert!(matches!(&dkg_manager.state, InnerState::Finished { .. })); @@ -156,7 +167,7 @@ fn new_rpc_node_request( response_collector: Arc>>>, ) -> IncomingRpcRequest { IncomingRpcRequest { - msg: DKGMessage::NodeRequest(DKGTranscriptRequest::new(epoch)), + msg: DKGMessage::TranscriptRequest(DKGTranscriptRequest::new(epoch)), sender, response_sender: Box::new(DummyRpcResponseSender::new(response_collector)), } diff --git a/dkg/src/epoch_manager.rs b/dkg/src/epoch_manager.rs index f5b210820ed2e..71634d7a8aae7 100644 --- a/dkg/src/epoch_manager.rs +++ b/dkg/src/epoch_manager.rs @@ -22,7 +22,8 @@ use aptos_types::{ dkg::{DKGStartEvent, DKGState, DKGTrait, DefaultDKG}, epoch_state::EpochState, on_chain_config::{ - FeatureFlag, Features, OnChainConfigPayload, OnChainConfigProvider, ValidatorSet, + FeatureFlag, Features, OnChainConfigPayload, OnChainConfigProvider, OnChainConsensusConfig, + ValidatorSet, }, }; use aptos_validator_transaction_pool::VTxnPoolState; @@ -45,7 +46,7 @@ pub struct EpochManager { dkg_rpc_msg_tx: Option>, dkg_manager_close_tx: Option>>, - dkg_start_event_tx: Option>, + dkg_start_event_tx: Option>, vtxn_pool: VTxnPoolState, // Network utils @@ -93,13 +94,13 @@ impl EpochManager

{ } fn on_dkg_start_notification(&mut self, notification: EventNotification) -> Result<()> { - if let Some(tx) = self.dkg_start_event_tx.take() { + if let Some(tx) = self.dkg_start_event_tx.as_ref() { let EventNotification { subscribed_events, .. } = notification; for event in subscribed_events { if let Ok(dkg_start_event) = DKGStartEvent::try_from(&event) { - let _ = tx.send(dkg_start_event); + let _ = tx.push((), dkg_start_event); return Ok(()); } else { debug!("[DKG] on_dkg_start_notification: failed in converting a contract event to a dkg start event!"); @@ -157,11 +158,16 @@ impl EpochManager

{ .copied(); let features = payload.get::().unwrap_or_default(); + let onchain_consensus_config: anyhow::Result = payload.get(); + if let Err(error) = &onchain_consensus_config { + error!("Failed to read on-chain consensus config {}", error); + } + let consensus_config = onchain_consensus_config.unwrap_or_default(); - if let (true, Some(my_index)) = ( - features.is_enabled(FeatureFlag::RECONFIGURE_WITH_DKG), - my_index, - ) { + // Check both validator txn and DKG features are enabled + let randomness_enabled = consensus_config.is_vtxn_enabled() + && features.is_enabled(FeatureFlag::RECONFIGURE_WITH_DKG); + if let (true, Some(my_index)) = (randomness_enabled, my_index) { let DKGState { in_progress: in_progress_session, .. @@ -178,7 +184,8 @@ impl EpochManager

{ ); let agg_trx_producer = AggTranscriptProducer::new(rb); - let (dkg_start_event_tx, dkg_start_event_rx) = oneshot::channel(); + let (dkg_start_event_tx, dkg_start_event_rx) = + aptos_channel::new(QueueStyle::KLAST, 1, None); self.dkg_start_event_tx = Some(dkg_start_event_tx); let (dkg_rpc_msg_tx, dkg_rpc_msg_rx) = aptos_channel::new::< diff --git a/dkg/src/lib.rs b/dkg/src/lib.rs index 9e62b71c8e35c..4b694eb980ce0 100644 --- a/dkg/src/lib.rs +++ b/dkg/src/lib.rs @@ -1,6 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +mod agg_trx_producer; mod counters; mod dkg_manager; pub mod epoch_manager; @@ -49,5 +50,3 @@ pub fn start_dkg_runtime( runtime.spawn(dkg_epoch_manager.start(network_receiver)); runtime } - -pub mod agg_trx_producer; diff --git a/dkg/src/transcript_aggregation/mod.rs b/dkg/src/transcript_aggregation/mod.rs index f0d896b2dfbf7..62d47817e17f4 100644 --- a/dkg/src/transcript_aggregation/mod.rs +++ b/dkg/src/transcript_aggregation/mod.rs @@ -1,16 +1,18 @@ // Copyright © Aptos Foundation -use crate::{types::DKGTranscriptRequest, DKGMessage}; -use anyhow::ensure; +use crate::{counters::DKG_STAGE_SECONDS, types::DKGTranscriptRequest, DKGMessage}; +use anyhow::{anyhow, ensure}; use aptos_consensus_types::common::Author; -use aptos_infallible::Mutex; +use aptos_infallible::{duration_since_epoch, Mutex}; +use aptos_logger::info; use aptos_reliable_broadcast::BroadcastStatus; use aptos_types::{ dkg::{DKGTrait, DKGTranscript}, epoch_state::EpochState, + validator_verifier::VerifyError, }; use move_core_types::account_address::AccountAddress; -use std::{collections::HashSet, sync::Arc}; +use std::{collections::HashSet, sync::Arc, time::Duration}; pub struct TranscriptAggregator { pub contributors: HashSet, @@ -27,15 +29,26 @@ impl Default for TranscriptAggregator { } pub struct TranscriptAggregationState { + start_time: Duration, + my_addr: AccountAddress, + valid_peer_transcript_seen: bool, trx_aggregator: Mutex>, dkg_pub_params: DKG::PublicParams, epoch_state: Arc, } impl TranscriptAggregationState { - pub fn new(dkg_pub_params: DKG::PublicParams, epoch_state: Arc) -> Self { + pub fn new( + start_time: Duration, + my_addr: AccountAddress, + dkg_pub_params: DKG::PublicParams, + epoch_state: Arc, + ) -> Self { //TODO(zjma): take DKG threshold as a parameter. Self { + start_time, + my_addr, + valid_peer_transcript_seen: false, trx_aggregator: Mutex::new(TranscriptAggregator::default()), dkg_pub_params, epoch_state, @@ -59,33 +72,79 @@ impl BroadcastStatus for Arc Some(*x), + Err(VerifyError::TooLittleVotingPower { voting_power, .. }) => Some(*voting_power), + _ => None, + }; + let maybe_aggregated = power_check_result .ok() - .map(|_aggregated_voting_power| trx_aggregator.trx.clone().unwrap()); + .map(|_| trx_aggregator.trx.clone().unwrap()); + info!( + epoch = self.epoch_state.epoch, + peer = sender, + is_self = is_self, + peer_power = peer_power, + new_total_power = new_total_power, + threshold = threshold, + threshold_exceeded = maybe_aggregated.is_some(), + "[DKG] added transcript from validator {}, {} out of {} aggregated.", + self.epoch_state + .verifier + .address_to_validator_index() + .get(&sender) + .unwrap(), + new_total_power.unwrap_or(0), + threshold + ); Ok(maybe_aggregated) } } diff --git a/dkg/src/transcript_aggregation/tests.rs b/dkg/src/transcript_aggregation/tests.rs index eeb2e34dcfb50..07e83c3712063 100644 --- a/dkg/src/transcript_aggregation/tests.rs +++ b/dkg/src/transcript_aggregation/tests.rs @@ -2,6 +2,7 @@ use crate::transcript_aggregation::TranscriptAggregationState; use aptos_crypto::{bls12381::bls12381_keys, Uniform}; +use aptos_infallible::duration_since_epoch; use aptos_reliable_broadcast::BroadcastStatus; use aptos_types::{ dkg::{ @@ -23,6 +24,7 @@ fn test_transcript_aggregation_state() { let addrs: Vec = (0..num_validators) .map(|_| AccountAddress::random()) .collect(); + let vfn_addr = AccountAddress::random(); let private_keys: Vec = (0..num_validators) .map(|_| bls12381_keys::PrivateKey::generate_for_testing()) .collect(); @@ -43,9 +45,13 @@ fn test_transcript_aggregation_state() { dealer_epoch: 999, dealer_validator_set: validator_consensus_info_move_structs.clone(), target_validator_set: validator_consensus_info_move_structs.clone(), + block_dkg: false, + block_randomness: false, }); let epoch_state = Arc::new(EpochState { epoch, verifier }); let trx_agg_state = Arc::new(TranscriptAggregationState::::new( + duration_since_epoch(), + addrs[0], pub_params, epoch_state, )); @@ -73,6 +79,16 @@ fn test_transcript_aggregation_state() { }); assert!(result.is_err()); + // Node authored by non-active-validator should be rejected. + let result = trx_agg_state.add(vfn_addr, DKGTranscript { + metadata: DKGTranscriptMetadata { + epoch: 999, + author: vfn_addr, + }, + transcript_bytes: good_trx_bytes.clone(), + }); + assert!(result.is_err()); + // Node with invalid transcript should be rejected. let mut bad_trx_bytes = good_trx_bytes.clone(); bad_trx_bytes[0] = 0xAB; diff --git a/dkg/src/types.rs b/dkg/src/types.rs index 29172e48e05ad..928b659027278 100644 --- a/dkg/src/types.rs +++ b/dkg/src/types.rs @@ -24,22 +24,22 @@ impl DKGTranscriptRequest { /// The DKG network message. #[derive(Clone, Serialize, Deserialize, Debug, EnumConversion, PartialEq)] pub enum DKGMessage { - NodeRequest(DKGTranscriptRequest), - NodeResponse(DKGTranscript), + TranscriptRequest(DKGTranscriptRequest), + TranscriptResponse(DKGTranscript), } impl DKGMessage { pub fn epoch(&self) -> u64 { match self { - DKGMessage::NodeRequest(request) => request.dealer_epoch, - DKGMessage::NodeResponse(response) => response.metadata.epoch, + DKGMessage::TranscriptRequest(request) => request.dealer_epoch, + DKGMessage::TranscriptResponse(response) => response.metadata.epoch, } } pub fn name(&self) -> &str { match self { - DKGMessage::NodeRequest(_) => "DKGTranscriptRequest", - DKGMessage::NodeResponse(_) => "DKGTranscriptResponse", + DKGMessage::TranscriptRequest(_) => "DKGTranscriptRequest", + DKGMessage::TranscriptResponse(_) => "DKGTranscriptResponse", } } } diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs index dc710b437b232..d5a95af6e196b 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs @@ -363,7 +363,13 @@ impl IndexerStreamCoordinator { // Do not update block_height if first block is block metadata if ind > 0 { // Update the timestamp if the next block occurs - if let Some(txn) = raw_txn.transaction.try_as_block_metadata() { + if let Some(txn) = raw_txn.transaction.try_as_block_metadata_ext() { + timestamp = txn.timestamp_usecs(); + epoch = txn.epoch(); + epoch_bcs = aptos_api_types::U64::from(epoch); + block_height += 1; + block_height_bcs = aptos_api_types::U64::from(block_height); + } else if let Some(txn) = raw_txn.transaction.try_as_block_metadata() { timestamp = txn.timestamp_usecs(); epoch = txn.epoch(); epoch_bcs = aptos_api_types::U64::from(epoch); diff --git a/storage/aptosdb/src/fake_aptosdb.rs b/storage/aptosdb/src/fake_aptosdb.rs index fdc0609424d8f..3cd3ad65ad995 100644 --- a/storage/aptosdb/src/fake_aptosdb.rs +++ b/storage/aptosdb/src/fake_aptosdb.rs @@ -338,7 +338,10 @@ impl FakeAptosDB { .or_insert(user_txn.sequence_number()); } - if let Some(txn) = txn_to_commit.transaction().try_as_block_metadata() { + if let Some(txn) = txn_to_commit.transaction().try_as_block_metadata_ext() { + self.latest_block_timestamp + .fetch_max(txn.timestamp_usecs(), Ordering::Relaxed); + } else if let Some(txn) = txn_to_commit.transaction().try_as_block_metadata() { self.latest_block_timestamp .fetch_max(txn.timestamp_usecs(), Ordering::Relaxed); } diff --git a/testsuite/smoke-test/Cargo.toml b/testsuite/smoke-test/Cargo.toml index acbb13ca84256..501e639fa553b 100644 --- a/testsuite/smoke-test/Cargo.toml +++ b/testsuite/smoke-test/Cargo.toml @@ -21,6 +21,8 @@ aptos-config = { workspace = true } aptos-consensus = { workspace = true } aptos-crypto = { workspace = true } aptos-db = { workspace = true } +aptos-debugger = { workspace = true } +aptos-dkg = { workspace = true } aptos-faucet-core = { workspace = true } aptos-forge = { workspace = true } aptos-framework = { workspace = true } @@ -49,11 +51,14 @@ diesel = { workspace = true, features = [ "numeric", "serde_json", ] } +digest = { workspace = true } hex = { workspace = true } hyper = { workspace = true } move-core-types = { workspace = true } +num-traits = { workspace = true } proptest = { workspace = true } reqwest = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } url = { workspace = true } diff --git a/testsuite/smoke-test/src/genesis.rs b/testsuite/smoke-test/src/genesis.rs index c482921626c7b..c7ae8c1e6b09f 100644 --- a/testsuite/smoke-test/src/genesis.rs +++ b/testsuite/smoke-test/src/genesis.rs @@ -456,7 +456,7 @@ fn generate_genesis_transaction( fun main(vm_signer: &signer, framework_signer: &signer) {{ stake::remove_validators(framework_signer, &vector[@0x{}]); block::emit_writeset_block_event(vm_signer, @0x1); - aptos_governance::reconfigure(framework_signer); + aptos_governance::force_end_epoch(framework_signer); }} }} "#, diff --git a/testsuite/smoke-test/src/jwks/mod.rs b/testsuite/smoke-test/src/jwks/mod.rs index 2740ef1fc7df9..065585d404707 100644 --- a/testsuite/smoke-test/src/jwks/mod.rs +++ b/testsuite/smoke-test/src/jwks/mod.rs @@ -31,7 +31,7 @@ pub async fn put_provider_on_chain( r#" let issuer = b"{}"; let config_url = b"{}"; - jwks::upsert_oidc_provider(&framework_signer, issuer, config_url); + jwks::upsert_oidc_provider_for_next_epoch(&framework_signer, issuer, config_url); "#, String::from_utf8(name).unwrap(), String::from_utf8(config_url).unwrap(), diff --git a/testsuite/smoke-test/src/lib.rs b/testsuite/smoke-test/src/lib.rs index b72bdf5134026..dfc612e30e062 100644 --- a/testsuite/smoke-test/src/lib.rs +++ b/testsuite/smoke-test/src/lib.rs @@ -31,6 +31,8 @@ mod network; #[cfg(test)] mod oidb; #[cfg(test)] +mod randomness; +#[cfg(test)] mod rest_api; #[cfg(test)] mod rosetta; diff --git a/testsuite/smoke-test/src/randomness/disable_feature_0.rs b/testsuite/smoke-test/src/randomness/disable_feature_0.rs new file mode 100644 index 0000000000000..949c20e654b6e --- /dev/null +++ b/testsuite/smoke-test/src/randomness/disable_feature_0.rs @@ -0,0 +1,101 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{decrypt_key_map, get_on_chain_resource, verify_dkg_transcript}, + smoke_test_environment::SwarmBuilder, +}; +use aptos_forge::{Node, Swarm, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{FeatureFlag, Features}, + randomness::PerBlockRandomness, +}; +use std::{sync::Arc, time::Duration}; + +/// Disable on-chain randomness by only disabling feature `RECONFIGURE_WITH_DKG`. +#[tokio::test] +async fn disable_feature_0() { + let epoch_duration_secs = 20; + + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + conf.allow_new_validators = true; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let root_addr = swarm.chain_info().root_account().address(); + let root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_addr); + + let decrypt_key_map = decrypt_key_map(&swarm); + + let client_endpoint = swarm.validators().nth(1).unwrap().rest_api_endpoint(); + let client = aptos_rest_client::Client::new(client_endpoint.clone()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 3."); + + info!("Now in epoch 3. Disabling feature RECONFIGURE_WITH_DKG."); + let disable_dkg_script = r#" +script { + use aptos_framework::aptos_governance; + fun main(core_resources: &signer) { + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let dkg_feature_id: u64 = std::features::get_reconfigure_with_dkg_feature(); + aptos_governance::toggle_features(&framework_signer, vector[], vector[dkg_feature_id]); + } +} +"#; + + let txn_summary = cli + .run_script(root_idx, disable_dkg_script) + .await + .expect("Txn execution error."); + debug!("disabling_dkg_summary={:?}", txn_summary); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(4, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 4."); + + info!("Now in epoch 4. DKG transcript should still be available. Randomness seed should be unavailable."); + let dkg_session = get_on_chain_resource::(&client) + .await + .last_completed + .expect("dkg result for epoch 4 should be present"); + assert_eq!(4, dkg_session.target_epoch()); + assert!(verify_dkg_transcript(&dkg_session, &decrypt_key_map).is_ok()); + + let randomness_seed = get_on_chain_resource::(&client).await; + assert!(randomness_seed.seed.is_none()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(5, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 5."); + + info!("Now in epoch 5. DKG transcript should be unavailable. Randomness seed should be unavailable."); + let maybe_last_complete = get_on_chain_resource::(&client) + .await + .last_completed; + assert!( + maybe_last_complete.is_none() || maybe_last_complete.as_ref().unwrap().target_epoch() != 5 + ); + + let randomness_seed = get_on_chain_resource::(&client).await; + assert!(randomness_seed.seed.is_none()); +} diff --git a/testsuite/smoke-test/src/randomness/disable_feature_1.rs b/testsuite/smoke-test/src/randomness/disable_feature_1.rs new file mode 100644 index 0000000000000..07a281aff5071 --- /dev/null +++ b/testsuite/smoke-test/src/randomness/disable_feature_1.rs @@ -0,0 +1,111 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{decrypt_key_map, get_on_chain_resource, verify_dkg_transcript}, + smoke_test_environment::SwarmBuilder, + utils::get_current_consensus_config, +}; +use aptos_forge::{Node, Swarm, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{FeatureFlag, Features}, + randomness::PerBlockRandomness, +}; +use std::{sync::Arc, time::Duration}; + +/// Disable on-chain randomness by only disabling validator transactions. +#[tokio::test] +async fn disable_feature_1() { + let epoch_duration_secs = 20; + + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + conf.allow_new_validators = true; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let root_addr = swarm.chain_info().root_account().address(); + let root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_addr); + + let decrypt_key_map = decrypt_key_map(&swarm); + + let client_endpoint = swarm.validators().nth(1).unwrap().rest_api_endpoint(); + let client = aptos_rest_client::Client::new(client_endpoint.clone()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 3."); + + info!("Now in epoch 3. Disabling validator transactions."); + let mut config = get_current_consensus_config(&client).await; + assert!(config.is_vtxn_enabled()); + config.disable_validator_txns(); + let config_bytes = bcs::to_bytes(&config).unwrap(); + let disable_vtxn_script = format!( + r#" +script {{ + use aptos_framework::aptos_governance; + use aptos_framework::consensus_config; + fun main(core_resources: &signer) {{ + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let config_bytes = vector{:?}; + consensus_config::set_for_next_epoch(&framework_signer, config_bytes); + aptos_governance::reconfigure(&framework_signer); + }} +}} +"#, + config_bytes + ); + debug!("disable_vtxn_script={}", disable_vtxn_script); + let txn_summary = cli + .run_script(root_idx, disable_vtxn_script.as_str()) + .await + .expect("Txn execution error."); + debug!("disabling_vtxn_summary={:?}", txn_summary); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(4, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 4."); + + info!("Now in epoch 4. DKG transcript should still be available. Randomness seed should be unavailable."); + let dkg_session = get_on_chain_resource::(&client) + .await + .last_completed + .expect("dkg result for epoch 4 should be present"); + assert_eq!(4, dkg_session.target_epoch()); + assert!(verify_dkg_transcript(&dkg_session, &decrypt_key_map).is_ok()); + + let randomness_seed = get_on_chain_resource::(&client).await; + assert!(randomness_seed.seed.is_none()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(5, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 5."); + + info!("Now in epoch 5. DKG transcript should be unavailable. Randomness seed should be unavailable."); + let maybe_last_complete = get_on_chain_resource::(&client) + .await + .last_completed; + assert!( + maybe_last_complete.is_none() || maybe_last_complete.as_ref().unwrap().target_epoch() != 5 + ); + + let randomness_seed = get_on_chain_resource::(&client).await; + assert!(randomness_seed.seed.is_none()); +} diff --git a/testsuite/smoke-test/src/randomness/dkg_with_validator_down.rs b/testsuite/smoke-test/src/randomness/dkg_with_validator_down.rs new file mode 100644 index 0000000000000..dd1e2b3212593 --- /dev/null +++ b/testsuite/smoke-test/src/randomness/dkg_with_validator_down.rs @@ -0,0 +1,59 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{decrypt_key_map, verify_dkg_transcript, wait_for_dkg_finish}, + smoke_test_environment::SwarmBuilder, +}; +use aptos_forge::NodeExt; +use aptos_types::on_chain_config::{FeatureFlag, Features}; +use std::sync::Arc; + +#[tokio::test] +async fn dkg_with_validator_down() { + let epoch_duration_secs = 10; + let estimated_dkg_latency_secs = 20; + let time_limit_secs = epoch_duration_secs + estimated_dkg_latency_secs; + + let mut swarm = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(|conf| { + conf.epoch_duration_secs = 10; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build() + .await; + let decrypt_key_map = decrypt_key_map(&swarm); + + let client = swarm.validators().last().unwrap().rest_client(); + println!("Wait for an epoch start."); + let dkg_session_1 = wait_for_dkg_finish(&client, None, time_limit_secs).await; + + println!("Current epoch is {}.", dkg_session_1.target_epoch()); + + println!("Take one validator down."); + swarm.validators_mut().take(1).for_each(|v| { + v.stop(); + }); + + println!( + "Wait until we fully entered epoch {}.", + dkg_session_1.target_epoch() + 1 + ); + + let dkg_session_2 = wait_for_dkg_finish( + &client, + Some(dkg_session_1.target_epoch() + 1), + time_limit_secs, + ) + .await; + + assert!(verify_dkg_transcript(&dkg_session_2, &decrypt_key_map).is_ok()); +} diff --git a/testsuite/smoke-test/src/randomness/dkg_with_validator_join_leave.rs b/testsuite/smoke-test/src/randomness/dkg_with_validator_join_leave.rs new file mode 100644 index 0000000000000..3ba10aae34db3 --- /dev/null +++ b/testsuite/smoke-test/src/randomness/dkg_with_validator_join_leave.rs @@ -0,0 +1,144 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{decrypt_key_map, num_validators, verify_dkg_transcript, wait_for_dkg_finish}, + smoke_test_environment::SwarmBuilder, +}; +use aptos::test::CliTestFramework; +use aptos_forge::{Node, Swarm}; +use aptos_types::on_chain_config::{FeatureFlag, Features}; +use std::sync::Arc; + +#[tokio::test] +async fn dkg_with_validator_join_leave() { + let epoch_duration_secs = 40; + let estimated_dkg_latency_secs = 80; + let time_limit_secs = epoch_duration_secs + estimated_dkg_latency_secs; + + let mut swarm = SwarmBuilder::new_local(7) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + conf.allow_new_validators = true; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build() + .await; + + let decrypt_key_map = decrypt_key_map(&swarm); + + println!("Wait for a moment when DKG is not running."); + let client_endpoint = swarm.validators().nth(1).unwrap().rest_api_endpoint(); + let client = aptos_rest_client::Client::new(client_endpoint.clone()); + let dkg_session_1 = wait_for_dkg_finish(&client, None, time_limit_secs).await; + println!( + "Current epoch is {}. Number of validators: {}.", + dkg_session_1.target_epoch(), + num_validators(&dkg_session_1) + ); + + println!( + "Wait until we fully entered epoch {}.", + dkg_session_1.target_epoch() + 1 + ); + let dkg_session_2 = wait_for_dkg_finish( + &client, + Some(dkg_session_1.target_epoch() + 1), + time_limit_secs, + ) + .await; + + println!( + "Current epoch is {}. Number of validators: {}.", + dkg_session_2.target_epoch(), + num_validators(&dkg_session_2) + ); + + println!("Letting one of the validators leave."); + let (victim_validator_sk, victim_validator_addr) = { + let victim_validator = swarm.validators().next().unwrap(); + let sk = victim_validator + .account_private_key() + .clone() + .unwrap() + .private_key(); + let addr = victim_validator.peer_id(); + (sk, addr) + }; + + println!("Give the victim some money so it can first send transactions."); + let mut public_info = swarm.chain_info().into_aptos_public_info(); + public_info + .mint(victim_validator_addr, 100000000000000) + .await + .unwrap(); + + println!("Send the txn to request leave."); + let faucet_endpoint: reqwest::Url = "http://localhost:8081".parse().unwrap(); + let mut cli = CliTestFramework::new( + client_endpoint, + faucet_endpoint, + /*num_cli_accounts=*/ 0, + ) + .await; + let idx = cli.add_account_to_cli(victim_validator_sk); + let txn_result = cli.leave_validator_set(idx, None).await.unwrap(); + println!("Txn result: {:?}", txn_result); + + println!( + "Wait until we fully entered epoch {}.", + dkg_session_2.target_epoch() + 1 + ); + let dkg_session_3 = wait_for_dkg_finish( + &client, + Some(dkg_session_2.target_epoch() + 1), + time_limit_secs, + ) + .await; + + println!( + "Current epoch is {}. Number of validators: {}.", + dkg_session_3.target_epoch(), + num_validators(&dkg_session_3) + ); + + assert!(verify_dkg_transcript(&dkg_session_3, &decrypt_key_map).is_ok()); + assert_eq!( + num_validators(&dkg_session_3), + num_validators(&dkg_session_2) - 1 + ); + + println!("Now re-join."); + let txn_result = cli.join_validator_set(idx, None).await; + println!("Txn result: {:?}", txn_result); + println!( + "Wait until we fully entered epoch {}.", + dkg_session_3.target_epoch() + 1 + ); + let dkg_session_4 = wait_for_dkg_finish( + &client, + Some(dkg_session_3.target_epoch() + 1), + time_limit_secs, + ) + .await; + + println!( + "Current epoch is {}. Number of validators: {}.", + dkg_session_4.target_epoch(), + num_validators(&dkg_session_4) + ); + + assert!(verify_dkg_transcript(&dkg_session_4, &decrypt_key_map).is_ok()); + assert_eq!( + num_validators(&dkg_session_4), + num_validators(&dkg_session_3) + 1 + ); +} diff --git a/testsuite/smoke-test/src/randomness/e2e_basic_consumption.rs b/testsuite/smoke-test/src/randomness/e2e_basic_consumption.rs new file mode 100644 index 0000000000000..f11d02e255eb6 --- /dev/null +++ b/testsuite/smoke-test/src/randomness/e2e_basic_consumption.rs @@ -0,0 +1,107 @@ +// Copyright © Aptos Foundation + +use crate::smoke_test_environment::SwarmBuilder; +use aptos::{move_tool::MemberId, test::CliTestFramework}; +use aptos_forge::{NodeExt, Swarm, SwarmExt}; +use aptos_logger::info; +use aptos_types::on_chain_config::{FeatureFlag, Features}; +use serde::{Deserialize, Serialize}; +use std::{collections::BTreeMap, str::FromStr, sync::Arc, time::Duration}; + +/// Publish the `on-chain-dice` example module, +/// run its function that consume on-chain randomness, and +/// print out the random results. +#[tokio::test] +async fn e2e_basic_consumption() { + let epoch_duration_secs = 20; + + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let rest_client = swarm.validators().next().unwrap().rest_client(); + + info!("Wait for epoch 2. Epoch 1 does not have randomness."); + swarm + .wait_for_all_nodes_to_catchup_to_epoch(2, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Epoch 2 taking too long to arrive!"); + + let root_address = swarm.chain_info().root_account().address(); + info!("Root account: {}", root_address); + let _root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_address); + + info!("Publishing OnChainDice module."); + publish_on_chain_dice_module(&mut cli, 0).await; + + info!("Rolling the dice."); + let account = cli.account_id(0).to_hex_literal(); + let roll_func_id = MemberId::from_str(&format!("{}::dice::roll", account)).unwrap(); + for _ in 0..10 { + let txn_summary = cli + .run_function(0, None, roll_func_id.clone(), vec![], vec![]) + .await + .unwrap(); + info!("Roll txn summary: {:?}", txn_summary); + } + + info!("Collecting roll history."); + let dice_roll_history = rest_client + .get_account_resource_bcs::( + root_address, + format!("{}::dice::DiceRollHistory", account).as_str(), + ) + .await + .unwrap() + .into_inner(); + + info!("Roll history: {:?}", dice_roll_history.rolls); +} + +#[derive(Deserialize, Serialize)] +struct DiceRollHistory { + rolls: Vec, +} + +async fn publish_on_chain_dice_module(cli: &mut CliTestFramework, publisher_account_idx: usize) { + cli.init_move_dir(); + let mut package_addresses = BTreeMap::new(); + package_addresses.insert("module_owner", "_"); + + cli.init_package( + "OnChainDice".to_string(), + package_addresses, + Some(CliTestFramework::aptos_framework_dir()), + ) + .await + .unwrap(); + + let content = + include_str!("../../../../aptos-move/move-examples/on_chain_dice/sources/dice.move") + .to_string(); + cli.add_file_in_package("sources/dice.move", content); + + cli.wait_for_account(publisher_account_idx).await.unwrap(); + + info!("Move package dir: {}", cli.move_dir().display()); + + let mut named_addresses = BTreeMap::new(); + let account_str = cli.account_id(publisher_account_idx).to_string(); + named_addresses.insert("module_owner", account_str.as_str()); + cli.publish_package(0, None, named_addresses, None) + .await + .unwrap(); +} diff --git a/testsuite/smoke-test/src/randomness/e2e_correctness.rs b/testsuite/smoke-test/src/randomness/e2e_correctness.rs new file mode 100644 index 0000000000000..56a96cf239735 --- /dev/null +++ b/testsuite/smoke-test/src/randomness/e2e_correctness.rs @@ -0,0 +1,82 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{ + decrypt_key_map, get_current_version, get_on_chain_resource, verify_dkg_transcript, + verify_randomness, + }, + smoke_test_environment::SwarmBuilder, +}; +use aptos_forge::{NodeExt, SwarmExt}; +use aptos_logger::info; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{FeatureFlag, Features}, +}; +use std::{sync::Arc, time::Duration}; + +/// Verify the correctness of DKG transcript and block-level randomness seed. +#[tokio::test] +async fn randomness_correctness() { + let epoch_duration_secs = 20; + + let (swarm, _cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let decrypt_key_map = decrypt_key_map(&swarm); + let rest_client = swarm.validators().next().unwrap().rest_client(); + + info!("Wait for epoch 2. Epoch 1 does not have randomness."); + swarm + .wait_for_all_nodes_to_catchup_to_epoch(2, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Epoch 2 taking too long to arrive!"); + + info!("Verify DKG correctness for epoch 2."); + let dkg_session = get_on_chain_resource::(&rest_client).await; + assert!(verify_dkg_transcript(dkg_session.last_complete(), &decrypt_key_map).is_ok()); + + // Verify the randomness in 5 versions. + for _ in 0..5 { + let cur_txn_version = get_current_version(&rest_client).await; + info!("Verifying WVUF output for version {}.", cur_txn_version); + let wvuf_verify_result = + verify_randomness(&decrypt_key_map, &rest_client, cur_txn_version).await; + println!("wvuf_verify_result={:?}", wvuf_verify_result); + assert!(wvuf_verify_result.is_ok()); + } + + info!("Wait for epoch 3."); + swarm + .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Epoch 3 taking too long to arrive!"); + + info!("Verify DKG correctness for epoch 3."); + let dkg_session = get_on_chain_resource::(&rest_client).await; + assert!(verify_dkg_transcript(dkg_session.last_complete(), &decrypt_key_map).is_ok()); + + // Again, verify the randomness in 5 versions. + for _ in 0..5 { + let cur_txn_version = get_current_version(&rest_client).await; + info!("Verifying WVUF output for version {}.", cur_txn_version); + let wvuf_verify_result = + verify_randomness(&decrypt_key_map, &rest_client, cur_txn_version).await; + println!("wvuf_verify_result={:?}", wvuf_verify_result); + assert!(wvuf_verify_result.is_ok()); + } +} diff --git a/testsuite/smoke-test/src/randomness/enable_feature_0.rs b/testsuite/smoke-test/src/randomness/enable_feature_0.rs new file mode 100644 index 0000000000000..54659dedf62ff --- /dev/null +++ b/testsuite/smoke-test/src/randomness/enable_feature_0.rs @@ -0,0 +1,132 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{decrypt_key_map, get_on_chain_resource, verify_dkg_transcript}, + smoke_test_environment::SwarmBuilder, + utils::get_current_consensus_config, +}; +use aptos_forge::{Node, Swarm, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{FeatureFlag, Features}, +}; +use std::{sync::Arc, time::Duration}; + +/// Enable on-chain randomness in the following steps. +/// - Enable feature `RECONFIGURE_WITH_DKG` in epoch `e`. +/// - Enable validator transactions in consensus config in epoch `e + 1`. +#[tokio::test] +async fn enable_feature_0() { + let epoch_duration_secs = 20; + let estimated_dkg_latency_secs = 40; + + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + conf.allow_new_validators = true; + + // start with vtxn disabled. + conf.consensus_config.disable_validator_txns(); + + // start with dkg disabled. + let mut features = Features::default(); + features.disable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let root_addr = swarm.chain_info().root_account().address(); + let root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_addr); + + let decrypt_key_map = decrypt_key_map(&swarm); + + let client_endpoint = swarm.validators().nth(1).unwrap().rest_api_endpoint(); + let client = aptos_rest_client::Client::new(client_endpoint.clone()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 3."); + + info!("Now in epoch 3. Enabling feature RECONFIGURE_WITH_DKG."); + let enable_dkg_script = r#" +script { + use aptos_framework::aptos_governance; + fun main(core_resources: &signer) { + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let dkg_feature_id: u64 = std::features::get_reconfigure_with_dkg_feature(); + aptos_governance::toggle_features(&framework_signer, vector[dkg_feature_id], vector[]); + } +} +"#; + + let txn_summary = cli + .run_script(root_idx, enable_dkg_script) + .await + .expect("Txn execution error."); + debug!("enabling_dkg_summary={:?}", txn_summary); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(4, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 4."); + + info!("Now in epoch 4. Enabling validator transactions."); + let mut config = get_current_consensus_config(&client).await; + config.enable_validator_txns(); + let config_bytes = bcs::to_bytes(&config).unwrap(); + let enable_vtxn_script = format!( + r#" +script {{ + use aptos_framework::aptos_governance; + use aptos_framework::consensus_config; + fun main(core_resources: &signer) {{ + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let config_bytes = vector{:?}; + consensus_config::set_for_next_epoch(&framework_signer, config_bytes); + aptos_governance::reconfigure(&framework_signer); + }} +}} +"#, + config_bytes + ); + debug!("enable_vtxn_script={}", enable_vtxn_script); + let txn_summary = cli + .run_script(root_idx, enable_vtxn_script.as_str()) + .await + .expect("Txn execution error."); + debug!("enabling_vtxn_summary={:?}", txn_summary); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(5, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 5."); + + info!("Now in epoch 5. Both DKG and vtxn are enabled. There should be no randomness since DKG did not happen at the end of last epoch."); + let maybe_last_complete = get_on_chain_resource::(&client) + .await + .last_completed; + assert!( + maybe_last_complete.is_none() || maybe_last_complete.as_ref().unwrap().target_epoch() != 5 + ); + + info!("Waiting for epoch 6."); + swarm + .wait_for_all_nodes_to_catchup_to_epoch( + 6, + Duration::from_secs(epoch_duration_secs + estimated_dkg_latency_secs), + ) + .await + .expect("Waited too long for epoch 6."); + + let dkg_session = get_on_chain_resource::(&client) + .await + .last_completed + .expect("dkg result for epoch 6 should be present"); + assert_eq!(6, dkg_session.target_epoch()); + assert!(verify_dkg_transcript(&dkg_session, &decrypt_key_map).is_ok()); +} diff --git a/testsuite/smoke-test/src/randomness/enable_feature_1.rs b/testsuite/smoke-test/src/randomness/enable_feature_1.rs new file mode 100644 index 0000000000000..e4b124dae90f1 --- /dev/null +++ b/testsuite/smoke-test/src/randomness/enable_feature_1.rs @@ -0,0 +1,133 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{decrypt_key_map, get_on_chain_resource, verify_dkg_transcript}, + smoke_test_environment::SwarmBuilder, + utils::get_current_consensus_config, +}; +use aptos_forge::{Node, Swarm, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{FeatureFlag, Features}, +}; +use std::{sync::Arc, time::Duration}; + +/// Enable on-chain randomness in the following steps. +/// - Enable validator transactions in consensus config in epoch `e`. +/// - Enable feature `RECONFIGURE_WITH_DKG` in epoch `e + 1`. +#[tokio::test] +async fn enable_feature_1() { + let epoch_duration_secs = 20; + let estimated_dkg_latency_secs = 40; + + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + conf.allow_new_validators = true; + + // start with vtxn disabled. + conf.consensus_config.disable_validator_txns(); + + // start with dkg disabled. + let mut features = Features::default(); + features.disable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let root_addr = swarm.chain_info().root_account().address(); + let root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_addr); + + let decrypt_key_map = decrypt_key_map(&swarm); + + let client_endpoint = swarm.validators().nth(1).unwrap().rest_api_endpoint(); + let client = aptos_rest_client::Client::new(client_endpoint.clone()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 3."); + + info!("Now in epoch 3. Enabling validator transactions."); + let mut config = get_current_consensus_config(&client).await; + config.enable_validator_txns(); + let config_bytes = bcs::to_bytes(&config).unwrap(); + let enable_vtxn_script = format!( + r#" +script {{ + use aptos_framework::aptos_governance; + use aptos_framework::consensus_config; + fun main(core_resources: &signer) {{ + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let config_bytes = vector{:?}; + consensus_config::set_for_next_epoch(&framework_signer, config_bytes); + aptos_governance::reconfigure(&framework_signer); + }} +}} +"#, + config_bytes + ); + + debug!("enable_vtxn_script={}", enable_vtxn_script); + let txn_summary = cli + .run_script(root_idx, enable_vtxn_script.as_str()) + .await + .expect("Txn execution error."); + debug!("enabling_vtxn_summary={:?}", txn_summary); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(4, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 4."); + + info!("Now in epoch 4. Enabling feature RECONFIGURE_WITH_DKG."); + let enable_dkg_script = r#" +script { + use aptos_framework::aptos_governance; + fun main(core_resources: &signer) { + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let dkg_feature_id: u64 = std::features::get_reconfigure_with_dkg_feature(); + aptos_governance::toggle_features(&framework_signer, vector[dkg_feature_id], vector[]); + } +} +"#; + + let txn_summary = cli + .run_script(root_idx, enable_dkg_script) + .await + .expect("Txn execution error."); + debug!("enabling_dkg_summary={:?}", txn_summary); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(5, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 5."); + + info!("Now in epoch 5. Both DKG and vtxn are enabled. There should be no randomness since DKG did not happen at the end of last epoch."); + let maybe_last_complete = get_on_chain_resource::(&client) + .await + .last_completed; + assert!( + maybe_last_complete.is_none() || maybe_last_complete.as_ref().unwrap().target_epoch() != 5 + ); + + info!("Waiting for epoch 6."); + swarm + .wait_for_all_nodes_to_catchup_to_epoch( + 6, + Duration::from_secs(epoch_duration_secs + estimated_dkg_latency_secs), + ) + .await + .expect("Waited too long for epoch 6."); + + let dkg_session = get_on_chain_resource::(&client) + .await + .last_completed + .expect("dkg result for epoch 6 should be present"); + assert_eq!(6, dkg_session.target_epoch()); + assert!(verify_dkg_transcript(&dkg_session, &decrypt_key_map).is_ok()); +} diff --git a/testsuite/smoke-test/src/randomness/enable_feature_2.rs b/testsuite/smoke-test/src/randomness/enable_feature_2.rs new file mode 100644 index 0000000000000..a291258d2ad7c --- /dev/null +++ b/testsuite/smoke-test/src/randomness/enable_feature_2.rs @@ -0,0 +1,111 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{decrypt_key_map, get_on_chain_resource, verify_dkg_transcript}, + smoke_test_environment::SwarmBuilder, + utils::get_current_consensus_config, +}; +use aptos_forge::{Node, Swarm, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{FeatureFlag, Features}, +}; +use std::{sync::Arc, time::Duration}; + +/// Enable on-chain randomness by enabling validator transactions and feature `RECONFIGURE_WITH_DKG` simultaneously. +#[tokio::test] +async fn enable_feature_2() { + let epoch_duration_secs = 20; + let estimated_dkg_latency_secs = 40; + + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + conf.allow_new_validators = true; + + // start with vtxn disabled. + conf.consensus_config.disable_validator_txns(); + + // start with dkg disabled. + let mut features = Features::default(); + features.disable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let root_addr = swarm.chain_info().root_account().address(); + let root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_addr); + + let decrypt_key_map = decrypt_key_map(&swarm); + + let client_endpoint = swarm.validators().nth(1).unwrap().rest_api_endpoint(); + let client = aptos_rest_client::Client::new(client_endpoint.clone()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 3."); + + info!("Now in epoch 3. Enabling features."); + let mut config = get_current_consensus_config(&client).await; + config.enable_validator_txns(); + let config_bytes = bcs::to_bytes(&config).unwrap(); + let script = format!( + r#" +script {{ + use aptos_framework::aptos_governance; + use aptos_framework::consensus_config; + use std::features; + fun main(core_resources: &signer) {{ + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let config_bytes = vector{:?}; + consensus_config::set_for_next_epoch(&framework_signer, config_bytes); + let dkg_feature_id: u64 = features::get_reconfigure_with_dkg_feature(); + features::change_feature_flags_for_next_epoch(&framework_signer, vector[dkg_feature_id], vector[]); + aptos_governance::reconfigure(&framework_signer); + }} +}} +"#, + config_bytes + ); + + debug!("script={}", script); + let txn_summary = cli + .run_script(root_idx, script.as_str()) + .await + .expect("Txn execution error."); + debug!("txn_summary={:?}", txn_summary); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(4, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 4."); + + info!("Now in epoch 4. Both DKG and vtxn are enabled. There should be no randomness since DKG did not happen at the end of last epoch."); + let maybe_last_complete = get_on_chain_resource::(&client) + .await + .last_completed; + assert!( + maybe_last_complete.is_none() || maybe_last_complete.as_ref().unwrap().target_epoch() != 4 + ); + + info!("Waiting for epoch 5."); + swarm + .wait_for_all_nodes_to_catchup_to_epoch( + 5, + Duration::from_secs(epoch_duration_secs + estimated_dkg_latency_secs), + ) + .await + .expect("Waited too long for epoch 5."); + + let dkg_session = get_on_chain_resource::(&client) + .await + .last_completed + .expect("dkg result for epoch 6 should be present"); + assert_eq!(5, dkg_session.target_epoch()); + assert!(verify_dkg_transcript(&dkg_session, &decrypt_key_map).is_ok()); +} diff --git a/testsuite/smoke-test/src/randomness/failure_indicator_block_dkg.rs b/testsuite/smoke-test/src/randomness/failure_indicator_block_dkg.rs new file mode 100644 index 0000000000000..c37f976a7397f --- /dev/null +++ b/testsuite/smoke-test/src/randomness/failure_indicator_block_dkg.rs @@ -0,0 +1,77 @@ +// Copyright © Aptos Foundation + +use crate::{randomness::get_on_chain_resource, smoke_test_environment::SwarmBuilder}; +use aptos_forge::{Node, Swarm, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{ConfigurationResource, FeatureFlag, Features}, +}; +use std::{sync::Arc, time::Duration}; + +/// The presence of `FailureInjectionBlockDKG` should block DKG. +#[tokio::test] +async fn failure_indicator_block_dkg() { + let epoch_duration_secs = 20; + + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + conf.allow_new_validators = true; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let root_addr = swarm.chain_info().root_account().address(); + let root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_addr); + + let client_endpoint = swarm.validators().nth(1).unwrap().rest_api_endpoint(); + let client = aptos_rest_client::Client::new(client_endpoint.clone()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 3."); + + info!("Now in epoch 3. Set flag to block randomness."); + let script = r#" +script { + use aptos_framework::aptos_governance; + use aptos_framework::dkg; + fun main(core_resources: &signer) { + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + dkg::block_dkg(&framework_signer); + } +} +"#; + let txn_summary = cli + .run_script(root_idx, script) + .await + .expect("Txn execution error."); + debug!("txn_summary={:?}", txn_summary); + + info!("Chain should be stuck at the end of epoch 3."); + tokio::time::sleep(Duration::from_secs(60)).await; + let config_resource = get_on_chain_resource::(&client).await; + assert_eq!(3, config_resource.epoch()); + let dkg_state = get_on_chain_resource::(&client).await; + assert_eq!( + 3, + dkg_state + .in_progress + .as_ref() + .unwrap() + .metadata + .dealer_epoch + ); +} diff --git a/testsuite/smoke-test/src/randomness/failure_indicator_block_randomness.rs b/testsuite/smoke-test/src/randomness/failure_indicator_block_randomness.rs new file mode 100644 index 0000000000000..7202b8ea7d929 --- /dev/null +++ b/testsuite/smoke-test/src/randomness/failure_indicator_block_randomness.rs @@ -0,0 +1,69 @@ +// Copyright © Aptos Foundation + +use crate::{randomness::get_on_chain_resource, smoke_test_environment::SwarmBuilder}; +use aptos_forge::{Node, Swarm, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{ConfigurationResource, FeatureFlag, Features}, +}; +use std::{sync::Arc, time::Duration}; + +/// The presence of `FailureInjectionBlockRandomness` should block execution. +#[tokio::test] +async fn failure_indicator_block_randomness() { + let epoch_duration_secs = 20; + + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_num_fullnodes(1) + .with_aptos() + .with_init_genesis_config(Arc::new(move |conf| { + conf.epoch_duration_secs = epoch_duration_secs; + conf.allow_new_validators = true; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build_with_cli(0) + .await; + + let root_addr = swarm.chain_info().root_account().address(); + let root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_addr); + + let client_endpoint = swarm.validators().nth(1).unwrap().rest_api_endpoint(); + let client = aptos_rest_client::Client::new(client_endpoint.clone()); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(3, Duration::from_secs(epoch_duration_secs * 2)) + .await + .expect("Waited too long for epoch 3."); + + info!("Now in epoch 3. Set flag to block randomness."); + let script = r#" +script { + use aptos_framework::aptos_governance; + use aptos_framework::dkg; + fun main(core_resources: &signer) { + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + dkg::block_randomness(&framework_signer); + } +} +"#; + let txn_summary = cli + .run_script(root_idx, script) + .await + .expect("Txn execution error."); + debug!("txn_summary={:?}", txn_summary); + + info!("Chain should be stuck at the beginning of epoch 4."); + tokio::time::sleep(Duration::from_secs(60)).await; + let config_resource = get_on_chain_resource::(&client).await; + assert_eq!(4, config_resource.epoch()); + let dkg_state = get_on_chain_resource::(&client).await; + assert!(dkg_state.in_progress.is_none()); +} diff --git a/testsuite/smoke-test/src/randomness/mod.rs b/testsuite/smoke-test/src/randomness/mod.rs new file mode 100644 index 0000000000000..72e99c79845f6 --- /dev/null +++ b/testsuite/smoke-test/src/randomness/mod.rs @@ -0,0 +1,262 @@ +// Copyright © Aptos Foundation + +use anyhow::{anyhow, ensure, Result}; +use aptos_crypto::{compat::Sha3_256, Uniform}; +use aptos_dkg::weighted_vuf::traits::WeightedVUF; +use aptos_forge::LocalSwarm; +use aptos_logger::info; +use aptos_rest_client::Client; +use aptos_types::{ + dkg::{DKGSessionState, DKGState, DKGTrait, DefaultDKG}, + on_chain_config::OnChainConfig, + randomness::{PerBlockRandomness, RandMetadataToSign, WVUF}, + validator_verifier::ValidatorConsensusInfo, +}; +use digest::Digest; +use move_core_types::{account_address::AccountAddress, language_storage::CORE_CODE_ADDRESS}; +use rand::{prelude::StdRng, SeedableRng}; +use std::{collections::HashMap, time::Duration}; +use tokio::time::Instant; + +mod disable_feature_0; +mod disable_feature_1; +mod dkg_with_validator_down; +mod dkg_with_validator_join_leave; +mod e2e_basic_consumption; +mod e2e_correctness; +mod enable_feature_0; +mod enable_feature_1; +mod enable_feature_2; +mod failure_indicator_block_dkg; +mod failure_indicator_block_randomness; +mod validator_restart_during_dkg; + +#[allow(dead_code)] +async fn get_current_version(rest_client: &Client) -> u64 { + rest_client + .get_ledger_information() + .await + .unwrap() + .inner() + .version +} + +async fn get_on_chain_resource(rest_client: &Client) -> T { + let maybe_response = rest_client + .get_account_resource_bcs::(CORE_CODE_ADDRESS, T::struct_tag().to_string().as_str()) + .await; + let response = maybe_response.unwrap(); + response.into_inner() +} + +#[allow(dead_code)] +async fn get_on_chain_resource_at_version( + rest_client: &Client, + version: u64, +) -> T { + let maybe_response = rest_client + .get_account_resource_at_version_bcs::( + CORE_CODE_ADDRESS, + T::struct_tag().to_string().as_str(), + version, + ) + .await; + let response = maybe_response.unwrap(); + response.into_inner() +} + +/// Poll the on-chain state until we see a DKG session finishes. +/// Return a `DKGSessionState` of the DKG session seen. +#[allow(dead_code)] +async fn wait_for_dkg_finish( + client: &Client, + target_epoch: Option, + time_limit_secs: u64, +) -> DKGSessionState { + let mut dkg_state = get_on_chain_resource::(client).await; + let timer = Instant::now(); + while timer.elapsed().as_secs() < time_limit_secs + && !(dkg_state.in_progress.is_none() + && dkg_state.last_completed.is_some() + && (target_epoch.is_none() + || dkg_state + .last_completed + .as_ref() + .map(|session| session.metadata.dealer_epoch + 1) + == target_epoch)) + { + tokio::time::sleep(Duration::from_secs(1)).await; + dkg_state = get_on_chain_resource::(client).await; + } + assert!(timer.elapsed().as_secs() < time_limit_secs); + dkg_state.last_complete().clone() +} + +/// Verify that DKG transcript of epoch i (stored in `new_dkg_state`) is correctly generated +/// by the validator set in epoch i-1 (stored in `new_dkg_state`). +fn verify_dkg_transcript( + dkg_session: &DKGSessionState, + decrypt_key_map: &HashMap::NewValidatorDecryptKey>, +) -> Result<()> { + info!( + "Verifying the transcript generated in epoch {}.", + dkg_session.metadata.dealer_epoch, + ); + let pub_params = DefaultDKG::new_public_params(&dkg_session.metadata); + let transcript = bcs::from_bytes(dkg_session.transcript.as_slice()).map_err(|e| { + anyhow!("DKG transcript verification failed with transcript deserialization error: {e}") + })?; + println!("transcript={:?}", transcript); + DefaultDKG::verify_transcript(&pub_params, &transcript)?; + + info!("Double-verifying by reconstructing the dealt secret."); + let dealt_secret_from_shares = dealt_secret_from_shares( + dkg_session + .metadata + .target_validator_consensus_infos_cloned(), + decrypt_key_map, + &pub_params, + &transcript, + ); + + println!("dealt_secret_from_shares={:?}", dealt_secret_from_shares); + + let dealt_secret_from_inputs = dealt_secret_from_input( + &transcript, + &pub_params, + &pub_params.session_metadata.dealer_consensus_infos_cloned(), + ); + println!("dealt_secret_from_inputs={:?}", dealt_secret_from_inputs); + + ensure!( + dealt_secret_from_shares == dealt_secret_from_inputs, + "dkg transcript verification failed with final check failure" + ); + Ok(()) +} + +fn dealt_secret_from_shares( + target_validator_set: Vec, + decrypt_key_map: &HashMap::NewValidatorDecryptKey>, + pub_params: &::PublicParams, + transcript: &::Transcript, +) -> ::DealtSecret { + let player_share_pairs = target_validator_set + .iter() + .enumerate() + .map(|(idx, validator_info)| { + let dk = decrypt_key_map.get(&validator_info.address).unwrap(); + let (secret_share, _pub_key_share) = DefaultDKG::decrypt_secret_share_from_transcript( + pub_params, transcript, idx as u64, dk, + ) + .unwrap(); + (idx as u64, secret_share) + }) + .collect(); + + DefaultDKG::reconstruct_secret_from_shares(pub_params, player_share_pairs).unwrap() +} + +fn dealt_secret_from_input( + trx: &::Transcript, + pub_params: &::PublicParams, + dealer_validator_infos: &[ValidatorConsensusInfo], +) -> ::DealtSecret { + let dealers = DefaultDKG::get_dealers(trx); + println!("dealers={:?}", dealers); + let input_secrets = dealers + .into_iter() + .map(|dealer_idx| { + let cur_addr = dealer_validator_infos[dealer_idx as usize].address; + // Same seed is used in `DKGManager::setup_deal_broadcast` for smoke tests. + let mut rng = StdRng::from_seed(cur_addr.into_bytes()); + ::InputSecret::generate(&mut rng) + }) + .collect(); + + let aggregated_input_secret = DefaultDKG::aggregate_input_secret(input_secrets); + DefaultDKG::dealt_secret_from_input(pub_params, &aggregated_input_secret) +} + +#[allow(dead_code)] +fn num_validators(dkg_state: &DKGSessionState) -> usize { + dkg_state.metadata.target_validator_set.len() +} + +fn decrypt_key_map( + swarm: &LocalSwarm, +) -> HashMap::NewValidatorDecryptKey> { + swarm + .validators() + .map(|validator| { + let dk = validator + .config() + .consensus + .safety_rules + .initial_safety_rules_config + .identity_blob() + .unwrap() + .try_into_dkg_new_validator_decrypt_key() + .unwrap(); + (validator.peer_id(), dk) + }) + .collect::>() +} + +/// Fetch the DKG result and the block randomness (from aggregation) for a specific version. +/// Derive the distributed secret from DKG result. +/// Verify that the randomness from aggregation (the actual one store on chain) equals to +/// the randomness from direct evaluation using the distributed secret (the expected one). +async fn verify_randomness( + decrypt_key_map: &HashMap::NewValidatorDecryptKey>, + rest_client: &Client, + version: u64, +) -> Result<()> { + // Fetch resources. + let (dkg_state, on_chain_block_randomness) = tokio::join!( + get_on_chain_resource_at_version::(rest_client, version), + get_on_chain_resource_at_version::(rest_client, version) + ); + + ensure!( + on_chain_block_randomness.seed.is_some(), + "randomness verification failed with seed missing" + ); + + // Derive the shared secret. + let dkg_session = dkg_state + .last_completed + .ok_or_else(|| anyhow!("randomness verification failed with missing dkg result"))?; + let dkg_pub_params = DefaultDKG::new_public_params(&dkg_session.metadata); + let transcript = + bcs::from_bytes::<::Transcript>(dkg_session.transcript.as_slice()) + .map_err(|_| { + anyhow!( + "randomness verification failed with on-chain dkg transcript deserialization error" + ) + })?; + let dealt_secret = dealt_secret_from_shares( + dkg_session + .metadata + .target_validator_consensus_infos_cloned(), + decrypt_key_map, + &dkg_pub_params, + &transcript, + ); + + // Compare the outputs from 2 paths. + let rand_metadata = RandMetadataToSign { + epoch: on_chain_block_randomness.epoch, + round: on_chain_block_randomness.round, + }; + let input = bcs::to_bytes(&rand_metadata).unwrap(); + let output = WVUF::eval(&dealt_secret, input.as_slice()); + let output_serialized = bcs::to_bytes(&output).unwrap(); + let expected_randomness_seed = Sha3_256::digest(output_serialized.as_slice()).to_vec(); + + ensure!( + expected_randomness_seed == on_chain_block_randomness.seed.clone().unwrap(), + "randomness verification failed with final check failure" + ); + Ok(()) +} diff --git a/testsuite/smoke-test/src/randomness/validator_restart_during_dkg.rs b/testsuite/smoke-test/src/randomness/validator_restart_during_dkg.rs new file mode 100644 index 0000000000000..4c1fbd748a9eb --- /dev/null +++ b/testsuite/smoke-test/src/randomness/validator_restart_during_dkg.rs @@ -0,0 +1,109 @@ +// Copyright © Aptos Foundation + +use crate::{ + randomness::{ + decrypt_key_map, get_on_chain_resource, verify_dkg_transcript, wait_for_dkg_finish, + }, + smoke_test_environment::SwarmBuilder, +}; +use aptos_forge::{NodeExt, SwarmExt}; +use aptos_logger::{debug, info}; +use aptos_rest_client::Client; +use aptos_types::{ + dkg::DKGState, + on_chain_config::{FeatureFlag, Features}, +}; +use futures::future::join_all; +use std::{sync::Arc, time::Duration}; + +#[tokio::test] +async fn validator_restart_during_dkg() { + let epoch_duration_secs = 30; + let estimated_dkg_latency_secs = 30; + let time_limit_secs = epoch_duration_secs + estimated_dkg_latency_secs; + let num_validators = 4; + let num_validators_to_restart = 3; + let mut swarm = SwarmBuilder::new_local(num_validators) + .with_num_fullnodes(1) + .with_aptos() + .with_init_config(Arc::new(|_, conf, _| { + conf.api.failpoints_enabled = true; + })) + .with_init_genesis_config(Arc::new(|conf| { + conf.epoch_duration_secs = 30; + + // Ensure vtxn is enabled. + conf.consensus_config.enable_validator_txns(); + + // Ensure randomness flag is set. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) + .build() + .await; + + swarm + .wait_for_all_nodes_to_catchup_to_epoch(2, Duration::from_secs(epoch_duration_secs * 10)) + .await + .unwrap(); + + let decrypt_key_map = decrypt_key_map(&swarm); + + info!("Wait for an epoch start."); + let validator_clients: Vec = + swarm.validators().map(|node| node.rest_client()).collect(); + let dkg_session_1 = wait_for_dkg_finish(&validator_clients[3], None, time_limit_secs).await; + + info!( + "Current epoch is {}.", + dkg_session_1.metadata.dealer_epoch + 1 + ); + + info!("Inject fault to all validators so they get stuck upon the first DKG message received."); + let tasks = validator_clients + .iter() + .take(num_validators_to_restart) + .map(|client| { + client.set_failpoint( + "dkg::process_dkg_start_event".to_string(), + "panic".to_string(), + ) + }) + .collect::>(); + let aptos_results = join_all(tasks).await; + debug!("aptos_results={:?}", aptos_results); + + info!("Restart nodes after they panic."); + for (node_idx, node) in swarm + .validators_mut() + .enumerate() + .take(num_validators_to_restart) + { + while node.health_check().await.is_ok() { + tokio::time::sleep(Duration::from_secs(1)).await; + } + info!("node {} panicked", node_idx); + node.restart().await.unwrap(); + info!("node {} restarted", node_idx); + } + + info!( + "DKG should be able to continue. Wait until we fully entered epoch {}.", + dkg_session_1.target_epoch() + 1 + ); + + swarm + .wait_for_all_nodes_to_catchup_to_epoch( + dkg_session_1.target_epoch() + 1, + Duration::from_secs(time_limit_secs), + ) + .await + .unwrap(); + let dkg_session_2 = get_on_chain_resource::(&validator_clients[3]) + .await + .last_completed + .clone() + .unwrap(); + assert!(verify_dkg_transcript(&dkg_session_2, &decrypt_key_map).is_ok()); +} diff --git a/testsuite/smoke-test/src/validator_txns.rs b/testsuite/smoke-test/src/validator_txns.rs index ad0473b1aa6b2..12bc8277635cc 100644 --- a/testsuite/smoke-test/src/validator_txns.rs +++ b/testsuite/smoke-test/src/validator_txns.rs @@ -7,15 +7,26 @@ use crate::{ use aptos_forge::{NodeExt, SwarmExt}; use aptos_logger::{debug, info}; use aptos_rest_client::Client; +use aptos_types::on_chain_config::{FeatureFlag, Features}; use futures::future::join_all; use std::{sync::Arc, time::Duration}; +/// Chain should not be blocked by failing validator txns. #[tokio::test] async fn dummy_validator_txns() { let swarm = SwarmBuilder::new_local(4) .with_init_config(Arc::new(|_, config, _| { config.api.failpoints_enabled = true; })) + .with_init_genesis_config(Arc::new(move |conf| { + // start with vtxn disabled. + conf.consensus_config.disable_validator_txns(); + + // start with dkg enabled. + let mut features = Features::default(); + features.enable(FeatureFlag::RECONFIGURE_WITH_DKG); + conf.initial_features_override = Some(features); + })) .with_aptos() .build() .await; diff --git a/third_party/move/move-stdlib/docs/hash.md b/third_party/move/move-stdlib/docs/hash.md index 7197a1f714bf8..312e5651e0504 100644 --- a/third_party/move/move-stdlib/docs/hash.md +++ b/third_party/move/move-stdlib/docs/hash.md @@ -59,6 +59,19 @@ as in the Move prover's prelude. + + +

+Specification + + + +
aborts_if [abstract] false;
+ensures [abstract] len(result) == 32;
+
+ + +
diff --git a/third_party/move/move-stdlib/sources/hash.move b/third_party/move/move-stdlib/sources/hash.move index daadc4e815770..ad1d0ae794afa 100644 --- a/third_party/move/move-stdlib/sources/hash.move +++ b/third_party/move/move-stdlib/sources/hash.move @@ -5,4 +5,9 @@ module std::hash { native public fun sha2_256(data: vector): vector; native public fun sha3_256(data: vector): vector; + + spec sha3_256(data: vector): vector { + aborts_if [abstract] false; + ensures [abstract] len(result) == 32; + } } diff --git a/third_party/move/move-vm/runtime/src/session.rs b/third_party/move/move-vm/runtime/src/session.rs index 312dc5d8048a9..50aaf71376fc8 100644 --- a/third_party/move/move-vm/runtime/src/session.rs +++ b/third_party/move/move-vm/runtime/src/session.rs @@ -349,6 +349,24 @@ impl<'r, 'l> Session<'r, 'l> { Ok(instantiation) } + /// Note: Cannot return a `Function` struct here due to its `pub(crate)` visibility. + pub fn load_function_def_is_friend_or_private( + &self, + module_id: &ModuleId, + function_name: &IdentStr, + type_arguments: &[TypeTag], + ) -> VMResult { + let (_, func, _) = self.move_vm.runtime.loader().load_function( + module_id, + function_name, + type_arguments, + &self.data_cache, + &self.module_store, + )?; + + Ok(func.is_friend_or_private()) + } + /// Load a module, a function, and all of its types into cache pub fn load_function_with_type_arg_inference( &self, diff --git a/types/src/block_metadata_ext.rs b/types/src/block_metadata_ext.rs index 5abf5dfd1ffaa..10fa5efbfd94c 100644 --- a/types/src/block_metadata_ext.rs +++ b/types/src/block_metadata_ext.rs @@ -103,13 +103,6 @@ impl BlockMetadataExt { BlockMetadataExt::V1(obj) => obj.round, } } - - pub fn randomness(&self) -> &Option { - match self { - BlockMetadataExt::V0(_) => unreachable!(), - BlockMetadataExt::V1(obj) => &obj.randomness, - } - } } impl From for BlockMetadataExt { diff --git a/types/src/dkg/dummy_dkg/tests.rs b/types/src/dkg/dummy_dkg/tests.rs index 527d3d81c8207..ada04cb081a9a 100644 --- a/types/src/dkg/dummy_dkg/tests.rs +++ b/types/src/dkg/dummy_dkg/tests.rs @@ -97,6 +97,8 @@ fn test_dummy_dkg_correctness() { dealer_epoch: 999, dealer_validator_set: dealer_infos.clone(), target_validator_set: new_validator_infos.clone(), + block_dkg: false, + block_randomness: false, }; let pub_params = DummyDKG::new_public_params(&dkg_session_metadata); diff --git a/types/src/dkg/mod.rs b/types/src/dkg/mod.rs index 36cba59e72087..3eb89998bdc78 100644 --- a/types/src/dkg/mod.rs +++ b/types/src/dkg/mod.rs @@ -82,6 +82,8 @@ pub struct DKGSessionMetadata { pub dealer_epoch: u64, pub dealer_validator_set: Vec, pub target_validator_set: Vec, + pub block_dkg: bool, + pub block_randomness: bool, } impl DKGSessionMetadata { diff --git a/types/src/on_chain_config/aptos_features.rs b/types/src/on_chain_config/aptos_features.rs index d2a642d644cb9..3ea9bfa322bd4 100644 --- a/types/src/on_chain_config/aptos_features.rs +++ b/types/src/on_chain_config/aptos_features.rs @@ -193,6 +193,10 @@ impl Features { self.is_enabled(FeatureFlag::OIDB_ZKLESS_SIGNATURE) } + pub fn is_reconfigure_with_dkg_enabled(&self) -> bool { + self.is_enabled(FeatureFlag::RECONFIGURE_WITH_DKG) + } + pub fn is_remove_detailed_error_from_hash_enabled(&self) -> bool { self.is_enabled(FeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH) } @@ -201,3 +205,18 @@ impl Features { self.is_enabled(FeatureFlag::REFUNDABLE_BYTES) } } + +#[test] +fn test_features_into_flag_vec() { + let mut features = Features { features: vec![] }; + features.enable(FeatureFlag::BLS12_381_STRUCTURES); + features.enable(FeatureFlag::BN254_STRUCTURES); + let flag_vec = features.into_flag_vec(); + assert_eq!( + vec![ + FeatureFlag::BLS12_381_STRUCTURES, + FeatureFlag::BN254_STRUCTURES + ], + flag_vec + ); +} diff --git a/types/src/on_chain_config/consensus_config.rs b/types/src/on_chain_config/consensus_config.rs index b62ea5358ac05..ef5c76e684ad5 100644 --- a/types/src/on_chain_config/consensus_config.rs +++ b/types/src/on_chain_config/consensus_config.rs @@ -2,7 +2,10 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{block_info::Round, on_chain_config::OnChainConfig}; +use crate::{ + block_info::Round, + on_chain_config::{ConsensusAlgorithmConfig::Jolteon, OnChainConfig}, +}; use anyhow::{format_err, Result}; use move_core_types::account_address::AccountAddress; use serde::{Deserialize, Serialize}; @@ -241,6 +244,48 @@ impl OnChainConsensusConfig { pub fn is_vtxn_enabled(&self) -> bool { self.effective_validator_txn_config().enabled() } + + pub fn disable_validator_txns(&mut self) { + match self { + OnChainConsensusConfig::V1(_) | OnChainConsensusConfig::V2(_) => { + // vtxn not supported. No-op. + }, + OnChainConsensusConfig::V3 { vtxn, .. } => { + *vtxn = ValidatorTxnConfig::V0; + }, + } + } + + pub fn enable_validator_txns(&mut self) { + let new_self = match std::mem::take(self) { + OnChainConsensusConfig::V1(config) => OnChainConsensusConfig::V3 { + alg: Jolteon { + main: config, + quorum_store_enabled: false, + }, + vtxn: ValidatorTxnConfig::default_enabled(), + }, + OnChainConsensusConfig::V2(config) => OnChainConsensusConfig::V3 { + alg: Jolteon { + main: config, + quorum_store_enabled: true, + }, + vtxn: ValidatorTxnConfig::default_enabled(), + }, + OnChainConsensusConfig::V3 { + vtxn: ValidatorTxnConfig::V0, + alg, + } => OnChainConsensusConfig::V3 { + alg, + vtxn: ValidatorTxnConfig::default_enabled(), + }, + item @ OnChainConsensusConfig::V3 { + vtxn: ValidatorTxnConfig::V1 { .. }, + .. + } => item, + }; + *self = new_self; + } } /// This is used when on-chain config is not initialized. diff --git a/types/src/on_chain_config/mod.rs b/types/src/on_chain_config/mod.rs index 2af3047334161..163f340c34054 100644 --- a/types/src/on_chain_config/mod.rs +++ b/types/src/on_chain_config/mod.rs @@ -45,8 +45,8 @@ pub use self::{ ValidatorTxnConfig, }, execution_config::{ - BlockGasLimitType, ExecutionConfigV1, ExecutionConfigV2, OnChainExecutionConfig, - TransactionDeduperType, TransactionShufflerType, + BlockGasLimitType, ExecutionConfigV1, ExecutionConfigV2, ExecutionConfigV4, + OnChainExecutionConfig, TransactionDeduperType, TransactionShufflerType, }, gas_schedule::{GasSchedule, GasScheduleV2, StorageGasSchedule}, timed_features::{TimedFeatureFlag, TimedFeatureOverride, TimedFeatures, TimedFeaturesBuilder},