From f28fc55b652ff984a44bda56ce57f9f81650d10b Mon Sep 17 00:00:00 2001 From: igor-aptos <110557261+igor-aptos@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:59:22 -0700 Subject: [PATCH 01/22] [txn-emitter][simple] Reduce flakiness in txn emitter (#14299) * fixes to txn emitter * [txn-emitter][simple] Reduce flakiness in txn emitter * increase price for account create * use view balance instead of get balance --------- --- Cargo.lock | 8 +- .../src/tests/coin_transfer.rs | 12 +-- crates/aptos-api-tester/src/utils.rs | 6 +- .../aptos-faucet/core/src/funder/transfer.rs | 4 +- crates/aptos-faucet/core/src/server/run.rs | 22 ++++-- crates/aptos-rest-client/src/lib.rs | 74 +++++++++---------- crates/aptos/src/common/types.rs | 4 +- crates/transaction-emitter-lib/src/cluster.rs | 26 ++++--- .../src/emitter/account_minter.rs | 39 ++++++---- .../src/emitter/local_account_generator.rs | 20 ++++- .../src/emitter/mod.rs | 4 +- .../src/emitter/submission_worker.rs | 4 +- .../src/emitter/transaction_executor.rs | 33 ++++++--- sdk/src/coin_client.rs | 4 +- testsuite/forge-cli/src/main.rs | 4 +- .../smoke-test/src/aptos/mint_transfer.rs | 5 +- testsuite/smoke-test/src/fullnode.rs | 4 +- testsuite/smoke-test/src/rosetta.rs | 21 ++---- testsuite/smoke-test/src/utils.rs | 4 +- 19 files changed, 169 insertions(+), 129 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 734e4e9ecc6b7..73f7e4ecc3e55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -12234,9 +12234,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.62" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8cde4d2d9200ad5909f8dac647e29482e07c3a35de8a13fce7c9c7747ad9f671" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", "cfg-if", @@ -12266,9 +12266,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.98" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1665caf8ab2dc9aef43d1c0023bd904633a6a05cb30b0ad59bec2ae986e57a7" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", diff --git a/crates/aptos-api-tester/src/tests/coin_transfer.rs b/crates/aptos-api-tester/src/tests/coin_transfer.rs index 18b8b8006ff23..1f07e317997cf 100644 --- a/crates/aptos-api-tester/src/tests/coin_transfer.rs +++ b/crates/aptos-api-tester/src/tests/coin_transfer.rs @@ -204,11 +204,11 @@ async fn check_account_balance( address: AccountAddress, ) -> Result<(), TestFailure> { // expected - let expected = U64(TRANSFER_AMOUNT); + let expected = TRANSFER_AMOUNT; // actual - let actual = match client.get_account_balance(address).await { - Ok(response) => response.into_inner().coin.value, + let actual = match client.view_apt_account_balance(address).await { + Ok(response) => response.into_inner(), Err(e) => { error!( "test: coin_transfer part: check_account_balance ERROR: {}, with error {:?}", @@ -236,14 +236,14 @@ async fn check_account_balance_at_version( transaction_version: u64, ) -> Result<(), TestFailure> { // expected - let expected = U64(0); + let expected = 0; // actual let actual = match client - .get_account_balance_at_version(address, transaction_version - 1) + .view_apt_account_balance_at_version(address, transaction_version - 1) .await { - Ok(response) => response.into_inner().coin.value, + Ok(response) => response.into_inner(), Err(e) => { error!( "test: coin_transfer part: check_account_balance_at_version ERROR: {}, with error {:?}", diff --git a/crates/aptos-api-tester/src/utils.rs b/crates/aptos-api-tester/src/utils.rs index fb1ad99d72ae0..44c08a0b4cb6c 100644 --- a/crates/aptos-api-tester/src/utils.rs +++ b/crates/aptos-api-tester/src/utils.rs @@ -181,8 +181,8 @@ pub async fn check_balance( expected: U64, ) -> Result<(), TestFailure> { // actual - let actual = match client.get_account_balance(address).await { - Ok(response) => response.into_inner().coin.value, + let actual = match client.view_apt_account_balance(address).await { + Ok(response) => response.into_inner(), Err(e) => { error!( "test: {} part: check_account_data ERROR: {}, with error {:?}", @@ -195,7 +195,7 @@ pub async fn check_balance( }; // compare - if expected != actual { + if expected.0 != actual { error!( "test: {} part: check_account_data FAIL: {}, expected {:?}, got {:?}", &test_name.to_string(), diff --git a/crates/aptos-faucet/core/src/funder/transfer.rs b/crates/aptos-faucet/core/src/funder/transfer.rs index c47fdad80fe28..5b3ac87206527 100644 --- a/crates/aptos-faucet/core/src/funder/transfer.rs +++ b/crates/aptos-faucet/core/src/funder/transfer.rs @@ -22,7 +22,7 @@ use aptos_sdk::{ account_address::AccountAddress, chain_id::ChainId, transaction::{authenticator::AuthenticationKey, SignedTransaction, TransactionPayload}, - AptosCoinType, LocalAccount, + LocalAccount, }, }; use async_trait::async_trait; @@ -314,7 +314,7 @@ impl FunderTrait for TransferFunder { let account_address = self.faucet_account.read().await.address(); let funder_balance = match self .get_api_client() - .get_account_balance_bcs::(account_address) + .view_apt_account_balance(account_address) .await { Ok(response) => response.into_inner(), diff --git a/crates/aptos-faucet/core/src/server/run.rs b/crates/aptos-faucet/core/src/server/run.rs index 4717f2afc4be0..b5b80e6440377 100644 --- a/crates/aptos-faucet/core/src/server/run.rs +++ b/crates/aptos-faucet/core/src/server/run.rs @@ -836,10 +836,10 @@ mod test { // Assert that the account exists now with the expected balance. let response = aptos_node_api_client - .get_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) + .view_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) .await?; - assert_eq!(response.into_inner().get(), 10); + assert_eq!(response.into_inner(), 10); Ok(()) } @@ -894,10 +894,12 @@ mod test { // Assert that the account exists now with the expected balance. let response = aptos_node_api_client - .get_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) + .view_apt_account_balance( + AccountAddress::from_str(&fund_request.address.unwrap()).unwrap(), + ) .await?; - assert_eq!(response.into_inner().get(), 10); + assert_eq!(response.into_inner(), 10); Ok(()) } @@ -942,10 +944,12 @@ mod test { // Confirm that the account was given the full 1000 OCTA as requested. let response = aptos_node_api_client - .get_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) + .view_apt_account_balance( + AccountAddress::from_str(&fund_request.address.unwrap()).unwrap(), + ) .await?; - assert_eq!(response.into_inner().get(), 1000); + assert_eq!(response.into_inner(), 1000); // This time, don't include the auth token. We request more than maximum_amount, // but later we'll see that the faucet will only give us maximum_amount, not @@ -960,10 +964,12 @@ mod test { // Confirm that the account was only given 100 OCTA (maximum_amount), not 1000. let response = aptos_node_api_client - .get_account_balance(AccountAddress::from_str(&fund_request.address.unwrap()).unwrap()) + .view_apt_account_balance( + AccountAddress::from_str(&fund_request.address.unwrap()).unwrap(), + ) .await?; - assert_eq!(response.into_inner().get(), 100); + assert_eq!(response.into_inner(), 100); Ok(()) } diff --git a/crates/aptos-rest-client/src/lib.rs b/crates/aptos-rest-client/src/lib.rs index ef4ca6d8ff602..30b8542c70d05 100644 --- a/crates/aptos-rest-client/src/lib.rs +++ b/crates/aptos-rest-client/src/lib.rs @@ -15,10 +15,7 @@ pub mod state; pub mod types; pub use crate::client_builder::{AptosBaseUrl, ClientBuilder}; -use crate::{ - aptos::{AptosVersion, Balance}, - error::RestError, -}; +use crate::{aptos::AptosVersion, error::RestError}; use anyhow::{anyhow, Result}; pub use aptos_api_types::{ self, IndexResponseBcs, MoveModuleBytecode, PendingTransaction, Transaction, @@ -34,13 +31,15 @@ use aptos_crypto::HashValue; use aptos_logger::{debug, info, sample, sample::SampleRate}; use aptos_types::{ account_address::AccountAddress, - account_config::{AccountResource, CoinStoreResource, NewBlockEvent, CORE_CODE_ADDRESS}, + account_config::{AccountResource, NewBlockEvent, CORE_CODE_ADDRESS}, contract_event::EventWithVersion, state_store::state_key::StateKey, transaction::SignedTransaction, - CoinType, }; -use move_core_types::language_storage::StructTag; +use move_core_types::{ + ident_str, + language_storage::{ModuleId, StructTag, TypeTag}, +}; use reqwest::{ header::{ACCEPT, CONTENT_TYPE}, Client as ReqwestClient, StatusCode, @@ -48,7 +47,7 @@ use reqwest::{ use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::{json, Value}; pub use state::State; -use std::{collections::BTreeMap, future::Future, time::Duration}; +use std::{collections::BTreeMap, future::Future, str::FromStr, time::Duration}; use tokio::time::Instant; pub use types::{deserialize_from_prefixed_hex_string, Account, Resource}; use url::Url; @@ -205,51 +204,50 @@ impl Client { Ok(response.and_then(|inner| bcs::from_bytes(&inner))?) } - pub async fn get_account_balance( + async fn view_account_balance_bcs_impl( &self, address: AccountAddress, - ) -> AptosResult> { - let resp = self - .get_account_resource(address, "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>") + coin_type: &str, + version: Option, + ) -> AptosResult> { + let resp: Response> = self + .view_bcs( + &ViewFunction { + module: ModuleId::new(AccountAddress::ONE, ident_str!("coin").into()), + function: ident_str!("balance").into(), + ty_args: vec![TypeTag::Struct(Box::new( + StructTag::from_str(coin_type).unwrap(), + ))], + args: vec![bcs::to_bytes(&address).unwrap()], + }, + version, + ) .await?; - resp.and_then(|resource| { - if let Some(res) = resource { - Ok(serde_json::from_value::(res.data)?) + + resp.and_then(|result| { + if result.len() != 1 { + Err(anyhow!("Wrong data size returned: {:?}", result).into()) } else { - Err(anyhow!("No data returned").into()) + Ok(result[0]) } }) } - pub async fn get_account_balance_bcs( + pub async fn view_apt_account_balance_at_version( &self, address: AccountAddress, + version: u64, ) -> AptosResult> { - let resp = self - .get_account_resource_bcs::>(address, &C::type_tag().to_string()) - .await?; - resp.and_then(|resource| Ok(resource.coin())) + self.view_account_balance_bcs_impl(address, "0x1::aptos_coin::AptosCoin", Some(version)) + .await } - pub async fn get_account_balance_at_version( + pub async fn view_apt_account_balance( &self, address: AccountAddress, - version: u64, - ) -> AptosResult> { - let resp = self - .get_account_resource_at_version( - address, - "0x1::coin::CoinStore<0x1::aptos_coin::AptosCoin>", - version, - ) - .await?; - resp.and_then(|resource| { - if let Some(res) = resource { - Ok(serde_json::from_value::(res.data)?) - } else { - Err(anyhow!("No data returned").into()) - } - }) + ) -> AptosResult> { + self.view_account_balance_bcs_impl(address, "0x1::aptos_coin::AptosCoin", None) + .await } pub async fn get_index(&self) -> AptosResult> { diff --git a/crates/aptos/src/common/types.rs b/crates/aptos/src/common/types.rs index c9a6be08e4c23..779e1c26b163d 100644 --- a/crates/aptos/src/common/types.rs +++ b/crates/aptos/src/common/types.rs @@ -1899,7 +1899,7 @@ impl TransactionOptions { let sequence_number = account.sequence_number; let balance = client - .get_account_balance_at_version(sender_address, version) + .view_apt_account_balance_at_version(sender_address, version) .await .map_err(|err| CliError::ApiError(err.to_string()))? .into_inner(); @@ -1908,7 +1908,7 @@ impl TransactionOptions { if gas_unit_price == 0 { DEFAULT_MAX_GAS } else { - std::cmp::min(balance.coin.value.0 / gas_unit_price, DEFAULT_MAX_GAS) + std::cmp::min(balance / gas_unit_price, DEFAULT_MAX_GAS) } }); diff --git a/crates/transaction-emitter-lib/src/cluster.rs b/crates/transaction-emitter-lib/src/cluster.rs index a1584b3ef992f..72686d672cb9c 100644 --- a/crates/transaction-emitter-lib/src/cluster.rs +++ b/crates/transaction-emitter-lib/src/cluster.rs @@ -12,7 +12,7 @@ use aptos_rest_client::{Client as RestClient, State}; use aptos_sdk::types::{chain_id::ChainId, AccountKey, LocalAccount}; use futures::{stream::FuturesUnordered, StreamExt}; use rand::seq::SliceRandom; -use std::{convert::TryFrom, time::Instant}; +use std::convert::TryFrom; use url::Url; #[derive(Debug)] @@ -42,7 +42,7 @@ impl Cluster { let mut instance_states = Vec::new(); let mut errors = Vec::new(); - let start = Instant::now(); + let fetch_timestamp = aptos_infallible::duration_since_epoch().as_secs(); let futures = FuturesUnordered::new(); for url in &peers { let instance = Instance::new( @@ -62,7 +62,7 @@ impl Cluster { } let results: Vec<_> = futures.collect().await; - let fetch_time_s = start.elapsed().as_secs(); + for (instance, result) in results { match result { Ok(v) => instance_states.push((instance, v.into_inner())), @@ -89,6 +89,13 @@ impl Cluster { .map(|(_, s)| s.timestamp_usecs / 1000000) .max() .unwrap(); + if max_timestamp + 10 < fetch_timestamp { + return Err(anyhow!( + "None of the rest endpoints provided have chain timestamp within 10s of local time: {} < {}", + max_timestamp, + fetch_timestamp, + )); + } let chain_id_from_instances = get_chain_id_from_instances(instance_states.clone())?; let chain_id: ChainId = match maybe_chain_id { @@ -111,18 +118,19 @@ impl Cluster { state.chain_id, chain_id.id(), ); - } else if state_timestamp + 20 + fetch_time_s < max_timestamp { + } else if state_timestamp + 10 < fetch_timestamp { warn!( - "Excluding Client {} too stale, {}, while chain at {} (delta of {}s)", + "Excluding Client {} too stale, {}, while current time when fetching is {} (delta of {}s)", instance.peer_name(), state_timestamp, - max_timestamp, - max_timestamp - state_timestamp, + fetch_timestamp, + fetch_timestamp - state_timestamp, ); } else { info!( - "Client {} is healthy, adding to the list of end points for load testing", - instance.peer_name() + "Client {} is healthy ({}s delay), adding to the list of end points for load testing", + instance.peer_name(), + fetch_timestamp.saturating_sub(state_timestamp), ); instances.push(instance); } diff --git a/crates/transaction-emitter-lib/src/emitter/account_minter.rs b/crates/transaction-emitter-lib/src/emitter/account_minter.rs index 51eed9afa8de1..274f340a4bd70 100644 --- a/crates/transaction-emitter-lib/src/emitter/account_minter.rs +++ b/crates/transaction-emitter-lib/src/emitter/account_minter.rs @@ -349,9 +349,7 @@ impl<'t> AccountMinter<'t> { .await .into_iter() .collect::>>() - .map_err(|e| format_err!("Failed to create accounts: {:?}", e))? - .into_iter() - .collect(); + .map_err(|e| format_err!("Failed to create accounts: {:?}", e))?; info!( "Successfully completed creating {} accounts in {}s, request stats: {}", @@ -493,7 +491,7 @@ async fn create_and_fund_new_accounts( .map(|chunk| chunk.to_vec()) .collect::>(); let source_address = source_account.address(); - for batch in accounts_by_batch { + for (batch_index, batch) in accounts_by_batch.into_iter().enumerate() { let creation_requests: Vec<_> = batch .iter() .map(|account| { @@ -509,7 +507,12 @@ async fn create_and_fund_new_accounts( txn_executor .execute_transactions_with_counter(&creation_requests, counters) .await - .with_context(|| format!("Account {} couldn't mint", source_address))?; + .with_context(|| { + format!( + "Account {} couldn't mint batch {}", + source_address, batch_index + ) + })?; } Ok(()) } @@ -698,18 +701,24 @@ pub async fn bulk_create_accounts( .iter() .map(|account| txn_executor.get_account_balance(account.address())); let balances: Vec<_> = try_join_all(balance_futures).await?; - accounts + let underfunded = accounts .iter() .zip(balances) - .for_each(|(account, balance)| { - assert!( - balance >= coins_per_account, - "Account {} has balance {} < needed_min_balance {}", - account.address(), - balance, - coins_per_account - ); - }); + .enumerate() + .filter(|(_idx, (_account, balance))| *balance < coins_per_account) + .collect::>(); + + let first = underfunded.first(); + assert!( + underfunded.is_empty(), + "{} out of {} accounts are underfunded. For example Account[{}] {} has balance {} < needed_min_balance {}", + underfunded.len(), + accounts.len(), + first.unwrap().0, // idx + first.unwrap().1.0.address(), // account + first.unwrap().1.1, // balance + coins_per_account, + ); info!("Skipping funding accounts"); Ok(accounts) diff --git a/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs b/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs index 21504db166fd7..6033d7a49856e 100644 --- a/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs +++ b/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs @@ -6,13 +6,15 @@ use aptos_sdk::types::{AccountKey, EphemeralKeyPair, KeylessAccount, LocalAccoun use aptos_transaction_generator_lib::ReliableTransactionSubmitter; use aptos_types::keyless::{Claims, OpenIdSig, Pepper, ZeroKnowledgeSig}; use async_trait::async_trait; -use futures::future::try_join_all; +use futures::StreamExt; use rand::rngs::StdRng; use std::{ fs::File, io::{self, BufRead}, }; +const QUERY_PARALLELISM: usize = 300; + #[async_trait] pub trait LocalAccountGenerator: Send + Sync { async fn gen_local_accounts( @@ -73,7 +75,13 @@ impl LocalAccountGenerator for PrivateKeyAccountGenerator { .iter() .map(|address| txn_executor.query_sequence_number(*address)) .collect::>(); - let seq_nums: Vec<_> = try_join_all(result_futures).await?.into_iter().collect(); + + let seq_nums = futures::stream::iter(result_futures) + .buffered(QUERY_PARALLELISM) + .collect::>() + .await + .into_iter() + .collect::, _>>()?; let accounts = account_keys .into_iter() @@ -166,7 +174,13 @@ impl LocalAccountGenerator for KeylessAccountGenerator { .iter() .map(|address| txn_executor.query_sequence_number(*address)) .collect::>(); - let seq_nums: Vec<_> = try_join_all(result_futures).await?.into_iter().collect(); + + let seq_nums = futures::stream::iter(result_futures) + .buffered(QUERY_PARALLELISM) + .collect::>() + .await + .into_iter() + .collect::, _>>()?; let accounts = keyless_accounts .into_iter() diff --git a/crates/transaction-emitter-lib/src/emitter/mod.rs b/crates/transaction-emitter-lib/src/emitter/mod.rs index 39ff2c284d4d6..78a140cc4f997 100644 --- a/crates/transaction-emitter-lib/src/emitter/mod.rs +++ b/crates/transaction-emitter-lib/src/emitter/mod.rs @@ -55,8 +55,8 @@ use tokio::{runtime::Handle, task::JoinHandle, time}; const MAX_TXNS: u64 = 1_000_000_000; // TODO Transfer cost increases during Coin => FA migration, we can reduce back later. -pub const EXPECTED_GAS_PER_TRANSFER: u64 = 10; -pub const EXPECTED_GAS_PER_ACCOUNT_CREATE: u64 = 2000 + 8; +pub const EXPECTED_GAS_PER_TRANSFER: u64 = 22; +pub const EXPECTED_GAS_PER_ACCOUNT_CREATE: u64 = 1100 + 20; const MAX_RETRIES: usize = 12; diff --git a/crates/transaction-emitter-lib/src/emitter/submission_worker.rs b/crates/transaction-emitter-lib/src/emitter/submission_worker.rs index b51b6cba1d0c0..be3dafccaeaf2 100644 --- a/crates/transaction-emitter-lib/src/emitter/submission_worker.rs +++ b/crates/transaction-emitter-lib/src/emitter/submission_worker.rs @@ -469,9 +469,9 @@ pub async fn submit_transactions( None }; let balance = client - .get_account_balance(sender) + .view_apt_account_balance(sender) .await - .map_or(-1, |v| v.into_inner().get() as i64); + .map_or(-1, |v| v.into_inner() as i64); warn!( "[{:?}] Failed to submit {} txns in a batch, first failure due to {:?}, for account {}, chain id: {:?}, first asked: {}, failed seq nums: {:?}, failed error codes: {:?}, balance of {} and last transaction for account: {:?}", diff --git a/crates/transaction-emitter-lib/src/emitter/transaction_executor.rs b/crates/transaction-emitter-lib/src/emitter/transaction_executor.rs index 63d72060dfb29..55713307dd45c 100644 --- a/crates/transaction-emitter-lib/src/emitter/transaction_executor.rs +++ b/crates/transaction-emitter-lib/src/emitter/transaction_executor.rs @@ -82,6 +82,7 @@ impl RestApiReliableTransactionSubmitter { rest_client, txn, self.retry_after, + i == 0, &mut failed_submit, &mut failed_wait, ) @@ -188,9 +189,9 @@ async fn warn_detailed_error( (None, None) }; let balance = rest_client - .get_account_balance(sender) + .view_apt_account_balance(sender) .await - .map_or(-1, |v| v.into_inner().get() as i128); + .map_or(-1, |v| v.into_inner() as i128); warn!( "[{:?}] Failed {} transaction: {:?}, seq num: {}, gas: unit {} and max {}, for account {}, last seq_num {:?}, balance of {} and last transaction for account: {:?}", @@ -211,6 +212,7 @@ async fn submit_and_check( rest_client: &RestClient, txn: &SignedTransaction, wait_duration: Duration, + first_try: bool, failed_submit: &mut bool, failed_wait: &mut bool, ) -> Result<()> { @@ -221,7 +223,11 @@ async fn submit_and_check( warn_detailed_error("submitting", rest_client, txn, Err(&err)).await ); *failed_submit = true; - if format!("{}", err).contains("SEQUENCE_NUMBER_TOO_OLD") { + if first_try && format!("{}", err).contains("SEQUENCE_NUMBER_TOO_OLD") { + sample!( + SampleRate::Duration(Duration::from_secs(2)), + warn_detailed_error("submitting on first try", rest_client, txn, Err(&err)).await + ); // There's no point to wait or retry on this error. // TODO: find a better way to propogate this error to the caller. Err(err)? @@ -295,13 +301,22 @@ fn is_account_not_found(error: &RestError) -> bool { impl ReliableTransactionSubmitter for RestApiReliableTransactionSubmitter { async fn get_account_balance(&self, account_address: AccountAddress) -> Result { Ok(FETCH_ACCOUNT_RETRY_POLICY - .retry(move || { - self.random_rest_client() - .get_account_balance(account_address) - }) + .retry_if( + move || { + self.random_rest_client() + .view_apt_account_balance(account_address) + }, + |error: &RestError| match error { + RestError::Api(error) => !matches!( + error.error.error_code, + AptosErrorCode::AccountNotFound | AptosErrorCode::InvalidInput + ), + RestError::Unknown(_) => false, + _ => true, + }, + ) .await? - .into_inner() - .get()) + .into_inner()) } async fn query_sequence_number(&self, account_address: AccountAddress) -> Result { diff --git a/sdk/src/coin_client.rs b/sdk/src/coin_client.rs index a674789e42de4..6f136522e85e9 100644 --- a/sdk/src/coin_client.rs +++ b/sdk/src/coin_client.rs @@ -83,10 +83,10 @@ impl<'a> CoinClient<'a> { pub async fn get_account_balance(&self, account: &AccountAddress) -> Result { let response = self .api_client - .get_account_balance(*account) + .view_apt_account_balance(*account) .await .context("Failed to get account balance")?; - Ok(response.inner().get()) + Ok(response.into_inner()) } } diff --git a/testsuite/forge-cli/src/main.rs b/testsuite/forge-cli/src/main.rs index 59c88f4101416..794e564efaba1 100644 --- a/testsuite/forge-cli/src/main.rs +++ b/testsuite/forge-cli/src/main.rs @@ -2629,10 +2629,10 @@ pub async fn check_account_balance( expected: u64, ) -> Result<()> { let balance = client - .get_account_balance(account_address) + .view_apt_account_balance(account_address) .await? .into_inner(); - assert_eq!(balance.get(), expected); + assert_eq!(balance, expected); Ok(()) } diff --git a/testsuite/smoke-test/src/aptos/mint_transfer.rs b/testsuite/smoke-test/src/aptos/mint_transfer.rs index 8be365164a269..cb8b02b9186b6 100644 --- a/testsuite/smoke-test/src/aptos/mint_transfer.rs +++ b/testsuite/smoke-test/src/aptos/mint_transfer.rs @@ -36,11 +36,10 @@ async fn test_mint_transfer() { info.client().submit_and_wait(&transfer_txn).await.unwrap(); assert_eq!( info.client() - .get_account_balance(account2.address()) + .view_apt_account_balance(account2.address()) .await .unwrap() - .into_inner() - .get(), + .into_inner(), 40000 ); diff --git a/testsuite/smoke-test/src/fullnode.rs b/testsuite/smoke-test/src/fullnode.rs index 0a5bbb139ae17..b0ff39eaf3401 100644 --- a/testsuite/smoke-test/src/fullnode.rs +++ b/testsuite/smoke-test/src/fullnode.rs @@ -80,12 +80,12 @@ async fn test_indexer() { client.submit_and_wait(&txn).await.unwrap(); let balance = client - .get_account_balance(account2.address()) + .view_apt_account_balance(account2.address()) .await .unwrap() .into_inner(); - assert_eq!(balance.get(), 10); + assert_eq!(balance, 10); } async fn wait_for_account(client: &RestClient, address: AccountAddress) -> Result<()> { diff --git a/testsuite/smoke-test/src/rosetta.rs b/testsuite/smoke-test/src/rosetta.rs index 8b7a36b4652e1..8d99e1199454e 100644 --- a/testsuite/smoke-test/src/rosetta.rs +++ b/testsuite/smoke-test/src/rosetta.rs @@ -593,13 +593,10 @@ async fn test_transfer() { let receiver = AccountAddress::from_hex_literal("0xBEEF").unwrap(); let sender_private_key = cli.private_key(0); let sender_balance = client - .get_account_balance(sender) + .view_apt_account_balance(sender) .await .unwrap() - .into_inner() - .coin - .value - .0; + .into_inner(); let network = NetworkIdentifier::from(chain_id); let node_clients = NodeClients { rosetta_client: &rosetta_client, @@ -663,25 +660,19 @@ async fn test_transfer() { // Sender balance should be 0 assert_eq!( client - .get_account_balance(sender) + .view_apt_account_balance(sender) .await .unwrap() - .into_inner() - .coin - .value - .0, + .into_inner(), 0 ); // Receiver should be sent coins assert_eq!( client - .get_account_balance(receiver) + .view_apt_account_balance(receiver) .await .unwrap() - .into_inner() - .coin - .value - .0, + .into_inner(), max_sent ); } diff --git a/testsuite/smoke-test/src/utils.rs b/testsuite/smoke-test/src/utils.rs index 05d0f5c9bbcd4..ca8f761d86071 100644 --- a/testsuite/smoke-test/src/utils.rs +++ b/testsuite/smoke-test/src/utils.rs @@ -189,12 +189,12 @@ pub async fn transfer_and_maybe_reconfig( pub async fn assert_balance(client: &RestClient, account: &LocalAccount, balance: u64) { let on_chain_balance = client - .get_account_balance(account.address()) + .view_apt_account_balance(account.address()) .await .unwrap() .into_inner(); - assert_eq!(on_chain_balance.get(), balance); + assert_eq!(on_chain_balance, balance); } /// This helper function creates 3 new accounts, mints funds, transfers funds From 543d75f4451f6dcfc6c1dfcb14fce67f027b9188 Mon Sep 17 00:00:00 2001 From: zhoujunma Date: Thu, 10 Oct 2024 13:23:38 -0700 Subject: [PATCH 02/22] handle max_exp_data_secs overflow (#14931) --- keyless/pepper/service/src/lib.rs | 12 ++++++- keyless/pepper/service/src/tests.rs | 50 ++++++++++++++++++++++++++ types/src/keyless/circuit_testcases.rs | 42 +++++++++++++++------- types/src/keyless/mod.rs | 2 +- types/src/keyless/test_utils.rs | 16 +++++---- 5 files changed, 101 insertions(+), 21 deletions(-) create mode 100644 keyless/pepper/service/src/tests.rs diff --git a/keyless/pepper/service/src/lib.rs b/keyless/pepper/service/src/lib.rs index 0c52182fd1945..f231a7fd4d8f7 100644 --- a/keyless/pepper/service/src/lib.rs +++ b/keyless/pepper/service/src/lib.rs @@ -320,7 +320,14 @@ async fn process_common( return Err(BadRequest("epk expired".to_string())); } - if exp_date_secs >= claims.claims.iat + config.max_exp_horizon_secs { + let (max_exp_data_secs, overflowed) = claims + .claims + .iat + .overflowing_add(config.max_exp_horizon_secs); + if overflowed { + return Err(BadRequest("max_exp_data_secs overflowed".to_string())); + } + if exp_date_secs >= max_exp_data_secs { return Err(BadRequest("epk expiry date too far".to_string())); } @@ -538,3 +545,6 @@ async fn update_account_recovery_db(input: &PepperInput) -> Result<(), Processin }, } } + +#[cfg(test)] +mod tests; diff --git a/keyless/pepper/service/src/tests.rs b/keyless/pepper/service/src/tests.rs new file mode 100644 index 0000000000000..58b09c0bb79a6 --- /dev/null +++ b/keyless/pepper/service/src/tests.rs @@ -0,0 +1,50 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{process_common, ProcessingFailure}; +use aptos_crypto::ed25519::Ed25519PublicKey; +use aptos_types::{ + keyless::{ + circuit_testcases::{ + sample_jwt_payload_json_overrides, SAMPLE_EXP_DATE, SAMPLE_JWT_EXTRA_FIELD, + SAMPLE_NONCE, SAMPLE_TEST_ISS_VALUE, SAMPLE_UID_VAL, + }, + test_utils::{get_sample_epk_blinder, get_sample_esk, get_sample_jwt_token_from_payload}, + }, + transaction::authenticator::EphemeralPublicKey, +}; +use uuid::Uuid; + +#[tokio::test] +async fn process_common_should_fail_if_max_exp_data_secs_overflowed() { + let session_id = Uuid::new_v4(); + let sk = get_sample_esk(); + let pk = Ed25519PublicKey::from(&sk); + + let jwt_payload = sample_jwt_payload_json_overrides( + SAMPLE_TEST_ISS_VALUE, + SAMPLE_UID_VAL, + SAMPLE_JWT_EXTRA_FIELD.as_str(), + u64::MAX - 1, // unusual iat + SAMPLE_NONCE.as_str(), + ); + + let jwt = get_sample_jwt_token_from_payload(&jwt_payload); + + let process_result = process_common( + &session_id, + jwt, + EphemeralPublicKey::ed25519(pk), + SAMPLE_EXP_DATE, + get_sample_epk_blinder(), + None, + None, + false, + None, + false, + ) + .await; + assert!( + matches!(process_result, Err(ProcessingFailure::BadRequest(e)) if e.as_str() == "max_exp_data_secs overflowed") + ); +} diff --git a/types/src/keyless/circuit_testcases.rs b/types/src/keyless/circuit_testcases.rs index f58ae07a08ed7..b542d26965232 100644 --- a/types/src/keyless/circuit_testcases.rs +++ b/types/src/keyless/circuit_testcases.rs @@ -38,7 +38,7 @@ pub(crate) static SAMPLE_JWT_HEADER_B64: Lazy = /// The JWT payload, decoded as JSON -static SAMPLE_NONCE: Lazy = Lazy::new(|| { +pub static SAMPLE_NONCE: Lazy = Lazy::new(|| { let config = Configuration::new_for_testing(); OpenIdSig::reconstruct_oauth_nonce( SAMPLE_EPK_BLINDER.as_slice(), @@ -49,9 +49,25 @@ static SAMPLE_NONCE: Lazy = Lazy::new(|| { .unwrap() }); -pub(crate) const SAMPLE_TEST_ISS_VALUE: &str = "test.oidc.provider"; +pub const SAMPLE_TEST_ISS_VALUE: &str = "test.oidc.provider"; -pub(crate) static SAMPLE_JWT_PAYLOAD_JSON: Lazy = Lazy::new(|| { +pub fn sample_jwt_payload_json() -> String { + sample_jwt_payload_json_overrides( + SAMPLE_TEST_ISS_VALUE, + SAMPLE_UID_VAL, + SAMPLE_JWT_EXTRA_FIELD.as_str(), + SAMPLE_JWT_IAT, + SAMPLE_NONCE.as_str(), + ) +} + +pub fn sample_jwt_payload_json_overrides( + iss: &str, + uid_val: &str, + extra_field: &str, + iat: u64, + nonce: &str, +) -> String { format!( r#"{{ "iss":"{}", @@ -67,27 +83,27 @@ pub(crate) static SAMPLE_JWT_PAYLOAD_JSON: Lazy = Lazy::new(|| { "given_name":"Michael", {} "locale":"en", - "iat":1700255944, + "iat":{}, "nonce":"{}", "exp":2700259544 }}"#, - SAMPLE_TEST_ISS_VALUE, - SAMPLE_UID_VAL, - SAMPLE_JWT_EXTRA_FIELD.as_str(), - SAMPLE_NONCE.as_str() + iss, uid_val, extra_field, iat, nonce ) -}); +} + +/// An example IAT. +pub const SAMPLE_JWT_IAT: u64 = 1700255944; /// Consistent with what is in `SAMPLE_JWT_PAYLOAD_JSON` pub(crate) const SAMPLE_JWT_EXTRA_FIELD_KEY: &str = "family_name"; /// Consistent with what is in `SAMPLE_JWT_PAYLOAD_JSON` -pub(crate) static SAMPLE_JWT_EXTRA_FIELD: Lazy = +pub static SAMPLE_JWT_EXTRA_FIELD: Lazy = Lazy::new(|| format!(r#""{}":"Straka","#, SAMPLE_JWT_EXTRA_FIELD_KEY)); /// The JWT parsed as a struct pub(crate) static SAMPLE_JWT_PARSED: Lazy = - Lazy::new(|| serde_json::from_str(SAMPLE_JWT_PAYLOAD_JSON.as_str()).unwrap()); + Lazy::new(|| serde_json::from_str(sample_jwt_payload_json().as_str()).unwrap()); pub(crate) static SAMPLE_JWK: Lazy = Lazy::new(insecure_test_rsa_jwk); @@ -97,10 +113,10 @@ pub(crate) static SAMPLE_JWK_SK: Lazy<&RsaKeyPair> = Lazy::new(|| &*INSECURE_TES pub(crate) const SAMPLE_UID_KEY: &str = "sub"; -pub(crate) const SAMPLE_UID_VAL: &str = "113990307082899718775"; +pub const SAMPLE_UID_VAL: &str = "113990307082899718775"; /// The nonce-committed expiration date (not the JWT `exp`), 12/21/5490 -pub(crate) const SAMPLE_EXP_DATE: u64 = 111_111_111_111; +pub const SAMPLE_EXP_DATE: u64 = 111_111_111_111; /// ~31,710 years pub(crate) const SAMPLE_EXP_HORIZON_SECS: u64 = 999_999_999_999; diff --git a/types/src/keyless/mod.rs b/types/src/keyless/mod.rs index c4209cfaada15..78e5b5365b7d4 100644 --- a/types/src/keyless/mod.rs +++ b/types/src/keyless/mod.rs @@ -23,7 +23,7 @@ use std::{ mod bn254_circom; mod circuit_constants; -mod circuit_testcases; +pub mod circuit_testcases; mod configuration; mod groth16_sig; mod groth16_vk; diff --git a/types/src/keyless/test_utils.rs b/types/src/keyless/test_utils.rs index a35e9602173a5..250175d9428b2 100644 --- a/types/src/keyless/test_utils.rs +++ b/types/src/keyless/test_utils.rs @@ -7,9 +7,9 @@ use crate::{ keyless::{ base64url_encode_str, circuit_testcases::{ - SAMPLE_EPK, SAMPLE_EPK_BLINDER, SAMPLE_ESK, SAMPLE_EXP_DATE, SAMPLE_EXP_HORIZON_SECS, - SAMPLE_JWK, SAMPLE_JWK_SK, SAMPLE_JWT_EXTRA_FIELD, SAMPLE_JWT_HEADER_B64, - SAMPLE_JWT_HEADER_JSON, SAMPLE_JWT_PARSED, SAMPLE_JWT_PAYLOAD_JSON, SAMPLE_PEPPER, + sample_jwt_payload_json, SAMPLE_EPK, SAMPLE_EPK_BLINDER, SAMPLE_ESK, SAMPLE_EXP_DATE, + SAMPLE_EXP_HORIZON_SECS, SAMPLE_JWK, SAMPLE_JWK_SK, SAMPLE_JWT_EXTRA_FIELD, + SAMPLE_JWT_HEADER_B64, SAMPLE_JWT_HEADER_JSON, SAMPLE_JWT_PARSED, SAMPLE_PEPPER, SAMPLE_PK, SAMPLE_PROOF, SAMPLE_PROOF_FOR_UPGRADED_VK, SAMPLE_PROOF_NO_EXTRA_FIELD, SAMPLE_UID_KEY, SAMPLE_UID_VAL, SAMPLE_UPGRADED_VK, }, @@ -272,8 +272,12 @@ pub fn get_sample_groth16_sig_and_pk_no_extra_field() -> (KeylessSignature, Keyl } pub fn get_sample_jwt_token() -> String { + get_sample_jwt_token_from_payload(sample_jwt_payload_json().as_str()) +} + +pub fn get_sample_jwt_token_from_payload(payload: &str) -> String { let jwt_header_b64 = SAMPLE_JWT_HEADER_B64.to_string(); - let jwt_payload_b64 = base64url_encode_str(SAMPLE_JWT_PAYLOAD_JSON.as_str()); + let jwt_payload_b64 = base64url_encode_str(payload); let msg = jwt_header_b64.clone() + "." + jwt_payload_b64.as_str(); let rng = ring::rand::SystemRandom::new(); let sk = &*SAMPLE_JWK_SK; @@ -296,7 +300,7 @@ pub fn get_sample_jwt_token() -> String { /// desired TXN. pub fn get_sample_openid_sig_and_pk() -> (KeylessSignature, KeylessPublicKey) { let jwt_header_b64 = SAMPLE_JWT_HEADER_B64.to_string(); - let jwt_payload_b64 = base64url_encode_str(SAMPLE_JWT_PAYLOAD_JSON.as_str()); + let jwt_payload_b64 = base64url_encode_str(sample_jwt_payload_json().as_str()); let msg = jwt_header_b64.clone() + "." + jwt_payload_b64.as_str(); let rng = ring::rand::SystemRandom::new(); let sk = *SAMPLE_JWK_SK; @@ -312,7 +316,7 @@ pub fn get_sample_openid_sig_and_pk() -> (KeylessSignature, KeylessPublicKey) { let openid_sig = OpenIdSig { jwt_sig, - jwt_payload_json: SAMPLE_JWT_PAYLOAD_JSON.to_string(), + jwt_payload_json: sample_jwt_payload_json().to_string(), uid_key: SAMPLE_UID_KEY.to_owned(), epk_blinder: SAMPLE_EPK_BLINDER.clone(), pepper: SAMPLE_PEPPER.clone(), From 136058aaf64bf1b47b1102790cc7ad1f3cc10b4e Mon Sep 17 00:00:00 2001 From: Alden Hu Date: Thu, 10 Oct 2024 14:01:59 -0700 Subject: [PATCH 03/22] introduce ChunkResultVerifier (#14912) --- .../src/ledger_update_output.rs | 61 ------ execution/executor/src/chunk_executor.rs | 174 +++++++++--------- .../src/components/chunk_commit_queue.rs | 14 +- .../executor/src/components/chunk_output.rs | 6 +- .../src/components/chunk_result_verifier.rs | 135 ++++++++++++++ execution/executor/src/components/mod.rs | 1 + .../src/components/transaction_chunk.rs | 97 ++++------ execution/executor/src/tests/mod.rs | 7 +- 8 files changed, 271 insertions(+), 224 deletions(-) create mode 100644 execution/executor/src/components/chunk_result_verifier.rs diff --git a/execution/executor-types/src/ledger_update_output.rs b/execution/executor-types/src/ledger_update_output.rs index b2226a2321a45..9bb7f58685447 100644 --- a/execution/executor-types/src/ledger_update_output.rs +++ b/execution/executor-types/src/ledger_update_output.rs @@ -10,7 +10,6 @@ use aptos_storage_interface::cached_state_view::ShardedStateCache; use aptos_types::{ contract_event::ContractEvent, epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, proof::accumulator::InMemoryTransactionAccumulator, state_store::{combine_or_add_sharded_state_updates, ShardedStateUpdates}, transaction::{ @@ -72,66 +71,6 @@ impl LedgerUpdateOutput { Ok(()) } - pub fn maybe_select_chunk_ending_ledger_info( - &self, - verified_target_li: &LedgerInfoWithSignatures, - epoch_change_li: Option<&LedgerInfoWithSignatures>, - next_epoch_state: Option<&EpochState>, - ) -> Result> { - if verified_target_li.ledger_info().version() + 1 - == self.transaction_accumulator.num_leaves() - { - // If the chunk corresponds to the target LI, the target LI can be added to storage. - ensure!( - verified_target_li - .ledger_info() - .transaction_accumulator_hash() - == self.transaction_accumulator.root_hash(), - "Root hash in target ledger info does not match local computation. {:?} != {:?}", - verified_target_li, - self.transaction_accumulator, - ); - Ok(Some(verified_target_li.clone())) - } else if let Some(epoch_change_li) = epoch_change_li { - // If the epoch change LI is present, it must match the version of the chunk: - - // Verify that the given ledger info corresponds to the new accumulator. - ensure!( - epoch_change_li.ledger_info().transaction_accumulator_hash() - == self.transaction_accumulator.root_hash(), - "Root hash of a given epoch LI does not match local computation. {:?} vs {:?}", - epoch_change_li, - self.transaction_accumulator, - ); - ensure!( - epoch_change_li.ledger_info().version() + 1 - == self.transaction_accumulator.num_leaves(), - "Version of a given epoch LI does not match local computation. {:?} vs {:?}", - epoch_change_li, - self.transaction_accumulator, - ); - ensure!( - epoch_change_li.ledger_info().ends_epoch(), - "Epoch change LI does not carry validator set. version:{}", - epoch_change_li.ledger_info().version(), - ); - ensure!( - epoch_change_li.ledger_info().next_epoch_state() == next_epoch_state, - "New validator set of a given epoch LI does not match local computation. {:?} vs {:?}", - epoch_change_li.ledger_info().next_epoch_state(), - next_epoch_state, - ); - Ok(Some(epoch_change_li.clone())) - } else { - ensure!( - next_epoch_state.is_none(), - "End of epoch chunk based on local computation but no EoE LedgerInfo provided. version: {:?}", - self.transaction_accumulator.num_leaves().checked_sub(1), - ); - Ok(None) - } - } - pub fn ensure_transaction_infos_match( &self, transaction_infos: &[TransactionInfo], diff --git a/execution/executor/src/chunk_executor.rs b/execution/executor/src/chunk_executor.rs index 134ed64f6cf32..fbe4d1f8d1b32 100644 --- a/execution/executor/src/chunk_executor.rs +++ b/execution/executor/src/chunk_executor.rs @@ -9,14 +9,14 @@ use crate::{ apply_chunk_output::{ensure_no_discard, ensure_no_retry, ApplyChunkOutput}, chunk_commit_queue::{ChunkCommitQueue, ChunkToUpdateLedger}, chunk_output::ChunkOutput, + chunk_result_verifier::{ChunkResultVerifier, StateSyncChunkVerifier}, executed_chunk::ExecutedChunk, - transaction_chunk::TransactionChunkWithProof, + transaction_chunk::{ChunkToApply, ChunkToExecute, TransactionChunk}, }, logging::{LogEntry, LogSchema}, metrics::{APPLY_CHUNK, CHUNK_OTHER_TIMERS, COMMIT_CHUNK, CONCURRENCY_GAUGE, EXECUTE_CHUNK}, }; -use anyhow::{ensure, Result}; -use aptos_crypto::HashValue; +use anyhow::{anyhow, ensure, Result}; use aptos_drop_helper::DEFAULT_DROPPER; use aptos_executor_types::{ ChunkCommitNotification, ChunkExecutorTrait, ParsedTransactionOutput, TransactionReplayer, @@ -33,8 +33,7 @@ use aptos_storage_interface::{ use aptos_types::{ block_executor::config::BlockExecutorConfigFromOnchain, contract_event::ContractEvent, - ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, - proof::TransactionInfoListWithProof, + ledger_info::LedgerInfoWithSignatures, state_store::StateViewId, transaction::{ signature_verified_transaction::SignatureVerifiedTransaction, Transaction, @@ -107,14 +106,36 @@ impl ChunkExecutorTrait for ChunkExecutor { let _timer = EXECUTE_CHUNK.start_timer(); self.maybe_initialize()?; - self.with_inner(|inner| { - inner.enqueue_chunk( - txn_list_with_proof, - verified_target_li, - epoch_change_li, - "execute", - ) - }) + + // Verify input data. + // In consensus-only mode, txn_list_with_proof is fake. + if !cfg!(feature = "consensus-only-perf-test") { + txn_list_with_proof.verify( + verified_target_li.ledger_info(), + txn_list_with_proof.first_transaction_version, + )?; + } + + // Compose enqueue_chunk parameters. + let TransactionListWithProof { + transactions, + events: _, + first_transaction_version: v, + proof: txn_infos_with_proof, + } = txn_list_with_proof; + + let chunk = ChunkToExecute { + transactions, + first_version: v.ok_or_else(|| anyhow!("first version is None"))?, + }; + let chunk_verifier = Arc::new(StateSyncChunkVerifier { + txn_infos_with_proof, + verified_target_li: verified_target_li.clone(), + epoch_change_li: epoch_change_li.cloned(), + }); + + // Call the shared implementation. + self.with_inner(|inner| inner.enqueue_chunk(chunk, chunk_verifier, "execute")) } fn enqueue_chunk_by_transaction_outputs( @@ -126,14 +147,36 @@ impl ChunkExecutorTrait for ChunkExecutor { let _guard = CONCURRENCY_GAUGE.concurrency_with(&["chunk", "enqueue_by_outputs"]); let _timer = APPLY_CHUNK.start_timer(); - self.with_inner(|inner| { - inner.enqueue_chunk( - txn_output_list_with_proof, - verified_target_li, - epoch_change_li, - "apply", + // Verify input data. + THREAD_MANAGER.get_exe_cpu_pool().install(|| { + let _timer = CHUNK_OTHER_TIMERS.timer_with(&["apply_chunk__verify"]); + txn_output_list_with_proof.verify( + verified_target_li.ledger_info(), + txn_output_list_with_proof.first_transaction_output_version, ) - }) + })?; + + // Compose enqueue_chunk parameters. + let TransactionOutputListWithProof { + transactions_and_outputs, + first_transaction_output_version: v, + proof: txn_infos_with_proof, + } = txn_output_list_with_proof; + let (transactions, transaction_outputs) = transactions_and_outputs.into_iter().unzip(); + + let chunk = ChunkToApply { + transactions, + transaction_outputs, + first_version: v.ok_or_else(|| anyhow!("first version is None"))?, + }; + let chunk_verifier = Arc::new(StateSyncChunkVerifier { + txn_infos_with_proof, + verified_target_li: verified_target_li.clone(), + epoch_change_li: epoch_change_li.cloned(), + }); + + // Call the shared implementation. + self.with_inner(|inner| inner.enqueue_chunk(chunk, chunk_verifier, "apply")) } fn update_ledger(&self) -> Result<()> { @@ -197,25 +240,6 @@ impl ChunkExecutorInner { )?) } - fn verify_extends_ledger( - &self, - proof: &TransactionInfoListWithProof, - first_version: Version, - my_root_hash: HashValue, - ) -> Result<()> { - // In consensus-only mode, we cannot verify the proof against the executed output, - // because the proof returned by the remote peer is an empty one. - if cfg!(feature = "consensus-only-perf-test") { - return Ok(()); - } - - let num_overlap = - proof.verify_extends_ledger(first_version, my_root_hash, Some(first_version))?; - assert_eq!(num_overlap, 0, "overlapped chunks"); - - Ok(()) - } - fn commit_chunk_impl(&self) -> Result { let _timer = CHUNK_OTHER_TIMERS.timer_with(&["commit_chunk_impl__total"]); let (persisted_state, chunk) = { @@ -255,42 +279,27 @@ impl ChunkExecutorInner { Ok(chunk) } - fn verify_chunk( - chunk: &Chunk, - ledger_info: &LedgerInfo, - first_version: Version, - ) -> Result<()> { - // In consensus-only mode, the [TransactionListWithProof](transaction list) is *not* - // verified against the proof and the [LedgerInfoWithSignatures](ledger info). - // This is because the [FakeAptosDB] from where these transactions come from - // returns an empty proof and not an actual proof, so proof verification will - // fail regardless. This function does not skip any transactions that may be - // already in the ledger, because it is not necessary as execution is disabled. - if cfg!(feature = "consensus-only-perf-test") { - return Ok(()); - } - - THREAD_MANAGER - .get_exe_cpu_pool() - .install(|| chunk.verify_chunk(ledger_info, first_version)) - } - // ************************* Chunk Executor Implementation ************************* - fn enqueue_chunk( + fn enqueue_chunk( &self, chunk: Chunk, - verified_target_li: &LedgerInfoWithSignatures, - epoch_change_li: Option<&LedgerInfoWithSignatures>, + chunk_verifier: Arc, mode_for_log: &'static str, ) -> Result<()> { let parent_state = self.commit_queue.lock().latest_state(); let first_version = parent_state.next_version(); - Self::verify_chunk(&chunk, verified_target_li.ledger_info(), first_version)?; + ensure!( + chunk.first_version() == parent_state.next_version(), + "Chunk carries unexpected first version. Expected: {}, got: {}", + parent_state.next_version(), + chunk.first_version(), + ); + let num_txns = chunk.len(); let state_view = self.latest_state_view(&parent_state)?; - let (chunk_output, txn_infos_with_proof) = chunk.into_chunk_output::(state_view)?; + let chunk_output = chunk.into_output::(state_view)?; // Calculate state snapshot let (result_state, next_epoch_state, state_checkpoint_output) = @@ -298,7 +307,7 @@ impl ChunkExecutorInner { chunk_output, &self.commit_queue.lock().latest_state(), None, // append_state_checkpoint_to_block - Some(txn_infos_with_proof.state_checkpoint_hashes()), + Some(chunk_verifier.state_checkpoint_hashes()), false, // is_block )?; @@ -309,9 +318,7 @@ impl ChunkExecutorInner { result_state, state_checkpoint_output, next_epoch_state, - verified_target_li: verified_target_li.clone(), - epoch_change_li: epoch_change_li.cloned(), - txn_infos_with_proof, + chunk_verifier, })?; info!( @@ -336,29 +343,24 @@ impl ChunkExecutorInner { result_state, state_checkpoint_output, next_epoch_state, - verified_target_li, - epoch_change_li, - txn_infos_with_proof, + chunk_verifier, } = chunk; let first_version = parent_accumulator.num_leaves(); - self.verify_extends_ledger( - &txn_infos_with_proof, - first_version, - parent_accumulator.root_hash(), - )?; - let (ledger_update_output, to_discard, to_retry) = { let _timer = CHUNK_OTHER_TIMERS.timer_with(&["chunk_update_ledger__calculate"]); - ApplyChunkOutput::calculate_ledger_update(state_checkpoint_output, parent_accumulator)? + ApplyChunkOutput::calculate_ledger_update( + state_checkpoint_output, + parent_accumulator.clone(), + )? }; + ensure!(to_discard.is_empty(), "Unexpected discard."); ensure!(to_retry.is_empty(), "Unexpected retry."); - ledger_update_output - .ensure_transaction_infos_match(&txn_infos_with_proof.transaction_infos)?; - let ledger_info_opt = ledger_update_output.maybe_select_chunk_ending_ledger_info( - &verified_target_li, - epoch_change_li.as_ref(), + chunk_verifier.verify_chunk_result(&parent_accumulator, &ledger_update_output)?; + + let ledger_info_opt = chunk_verifier.maybe_select_chunk_ending_ledger_info( + &ledger_update_output, next_epoch_state.as_ref(), )?; @@ -648,7 +650,7 @@ impl ChunkExecutorInner { ) -> Result<()> { let num_txns = (end_version - begin_version) as usize; let txn_infos: Vec<_> = transaction_infos.drain(..num_txns).collect(); - let txns_and_outputs = multizip(( + let (txns, txn_outs) = multizip(( transactions.drain(..num_txns), txn_infos.iter(), write_sets.drain(..num_txns), @@ -666,10 +668,10 @@ impl ChunkExecutorInner { ), ) }) - .collect(); + .unzip(); let state_view = self.latest_state_view(latest_view.state())?; - let chunk_output = ChunkOutput::by_transaction_output(txns_and_outputs, state_view)?; + let chunk_output = ChunkOutput::by_transaction_output(txns, txn_outs, state_view)?; let (executed_batch, to_discard, to_retry) = chunk_output.apply_to_ledger( latest_view, Some( diff --git a/execution/executor/src/components/chunk_commit_queue.rs b/execution/executor/src/components/chunk_commit_queue.rs index 244e24712ccc9..54c69a2f61afe 100644 --- a/execution/executor/src/components/chunk_commit_queue.rs +++ b/execution/executor/src/components/chunk_commit_queue.rs @@ -4,14 +4,14 @@ #![forbid(unsafe_code)] -use crate::components::executed_chunk::ExecutedChunk; +use crate::components::{ + chunk_result_verifier::ChunkResultVerifier, executed_chunk::ExecutedChunk, +}; use anyhow::{anyhow, ensure, Result}; use aptos_executor_types::state_checkpoint_output::StateCheckpointOutput; use aptos_storage_interface::{state_delta::StateDelta, DbReader, ExecutedTrees}; use aptos_types::{ - epoch_state::EpochState, - ledger_info::LedgerInfoWithSignatures, - proof::{accumulator::InMemoryTransactionAccumulator, TransactionInfoListWithProof}, + epoch_state::EpochState, proof::accumulator::InMemoryTransactionAccumulator, transaction::Version, }; use std::{collections::VecDeque, sync::Arc}; @@ -23,11 +23,9 @@ pub(crate) struct ChunkToUpdateLedger { /// If set, this is the new epoch info that should be changed to if this is committed. pub next_epoch_state: Option, - /// the below are from the input -- can be checked / used only after the transaction accumulator + /// from the input -- can be checked / used only after the transaction accumulator /// is updated. - pub verified_target_li: LedgerInfoWithSignatures, - pub epoch_change_li: Option, - pub txn_infos_with_proof: TransactionInfoListWithProof, + pub chunk_verifier: Arc, } /// It's a two stage pipeline: diff --git a/execution/executor/src/components/chunk_output.rs b/execution/executor/src/components/chunk_output.rs index 65777ef2e0d61..9c5019dd8db64 100644 --- a/execution/executor/src/components/chunk_output.rs +++ b/execution/executor/src/components/chunk_output.rs @@ -116,12 +116,10 @@ impl ChunkOutput { } pub fn by_transaction_output( - transactions_and_outputs: Vec<(Transaction, TransactionOutput)>, + transactions: Vec, + transaction_outputs: Vec, state_view: CachedStateView, ) -> Result { - let (transactions, transaction_outputs): (Vec<_>, Vec<_>) = - transactions_and_outputs.into_iter().unzip(); - update_counters_for_processed_chunk(&transactions, &transaction_outputs, "output"); // collect all accounts touched and dedup diff --git a/execution/executor/src/components/chunk_result_verifier.rs b/execution/executor/src/components/chunk_result_verifier.rs new file mode 100644 index 0000000000000..d9723dc0c57b5 --- /dev/null +++ b/execution/executor/src/components/chunk_result_verifier.rs @@ -0,0 +1,135 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::{ensure, Result}; +use aptos_crypto::HashValue; +use aptos_executor_types::LedgerUpdateOutput; +use aptos_experimental_runtimes::thread_manager::THREAD_MANAGER; +use aptos_types::{ + epoch_state::EpochState, + ledger_info::LedgerInfoWithSignatures, + proof::{accumulator::InMemoryTransactionAccumulator, TransactionInfoListWithProof}, + transaction::TransactionInfo, +}; + +pub trait ChunkResultVerifier { + fn verify_chunk_result( + &self, + parent_accumulator: &InMemoryTransactionAccumulator, + ledger_update_output: &LedgerUpdateOutput, + ) -> Result<()>; + + fn transaction_infos(&self) -> &[TransactionInfo]; + + fn state_checkpoint_hashes(&self) -> Vec> { + self.transaction_infos() + .iter() + .map(|t| t.state_checkpoint_hash()) + .collect() + } + + fn maybe_select_chunk_ending_ledger_info( + &self, + ledger_update_output: &LedgerUpdateOutput, + next_epoch_state: Option<&EpochState>, + ) -> Result>; +} + +pub struct StateSyncChunkVerifier { + pub txn_infos_with_proof: TransactionInfoListWithProof, + pub verified_target_li: LedgerInfoWithSignatures, + pub epoch_change_li: Option, +} + +impl ChunkResultVerifier for StateSyncChunkVerifier { + fn verify_chunk_result( + &self, + parent_accumulator: &InMemoryTransactionAccumulator, + ledger_update_output: &LedgerUpdateOutput, + ) -> Result<()> { + // In consensus-only mode, we cannot verify the proof against the executed output, + // because the proof returned by the remote peer is an empty one. + if cfg!(feature = "consensus-only-perf-test") { + return Ok(()); + } + + THREAD_MANAGER.get_exe_cpu_pool().install(|| { + let first_version = parent_accumulator.num_leaves(); + + // Verify the chunk extends the parent accumulator. + let parent_root_hash = parent_accumulator.root_hash(); + let num_overlap = self.txn_infos_with_proof.verify_extends_ledger( + first_version, + parent_root_hash, + Some(first_version), + )?; + assert_eq!(num_overlap, 0, "overlapped chunks"); + + // Verify transaction infos match + ledger_update_output + .ensure_transaction_infos_match(&self.txn_infos_with_proof.transaction_infos)?; + + Ok(()) + }) + } + + fn transaction_infos(&self) -> &[TransactionInfo] { + &self.txn_infos_with_proof.transaction_infos + } + + fn maybe_select_chunk_ending_ledger_info( + &self, + ledger_update_output: &LedgerUpdateOutput, + next_epoch_state: Option<&EpochState>, + ) -> Result> { + let li = self.verified_target_li.ledger_info(); + let txn_accumulator = &ledger_update_output.transaction_accumulator; + + if li.version() + 1 == txn_accumulator.num_leaves() { + // If the chunk corresponds to the target LI, the target LI can be added to storage. + ensure!( + li.transaction_accumulator_hash() == txn_accumulator.root_hash(), + "Root hash in target ledger info does not match local computation. {:?} != {:?}", + li, + txn_accumulator, + ); + Ok(Some(self.verified_target_li.clone())) + } else if let Some(epoch_change_li) = &self.epoch_change_li { + // If the epoch change LI is present, it must match the version of the chunk: + let li = epoch_change_li.ledger_info(); + + // Verify that the given ledger info corresponds to the new accumulator. + ensure!( + li.transaction_accumulator_hash() == txn_accumulator.root_hash(), + "Root hash of a given epoch LI does not match local computation. {:?} vs {:?}", + li, + txn_accumulator, + ); + ensure!( + li.version() + 1 == txn_accumulator.num_leaves(), + "Version of a given epoch LI does not match local computation. {:?} vs {:?}", + li, + txn_accumulator, + ); + ensure!( + li.ends_epoch(), + "Epoch change LI does not carry validator set. version:{}", + li.version(), + ); + ensure!( + li.next_epoch_state() == next_epoch_state, + "New validator set of a given epoch LI does not match local computation. {:?} vs {:?}", + li.next_epoch_state(), + next_epoch_state, + ); + Ok(Some(epoch_change_li.clone())) + } else { + ensure!( + next_epoch_state.is_none(), + "End of epoch chunk based on local computation but no EoE LedgerInfo provided. version: {:?}", + txn_accumulator.num_leaves().checked_sub(1), + ); + Ok(None) + } + } +} diff --git a/execution/executor/src/components/mod.rs b/execution/executor/src/components/mod.rs index ae25d94c4933e..e4a96049b339c 100644 --- a/execution/executor/src/components/mod.rs +++ b/execution/executor/src/components/mod.rs @@ -10,5 +10,6 @@ pub mod chunk_commit_queue; pub mod chunk_output; pub mod in_memory_state_calculator_v2; +pub mod chunk_result_verifier; pub mod executed_chunk; pub mod transaction_chunk; diff --git a/execution/executor/src/components/transaction_chunk.rs b/execution/executor/src/components/transaction_chunk.rs index 2346194ea3df4..41b995cfea428 100644 --- a/execution/executor/src/components/transaction_chunk.rs +++ b/execution/executor/src/components/transaction_chunk.rs @@ -11,9 +11,7 @@ use aptos_metrics_core::TimerHelper; use aptos_storage_interface::cached_state_view::CachedStateView; use aptos_types::{ block_executor::config::BlockExecutorConfigFromOnchain, - ledger_info::LedgerInfo, - proof::TransactionInfoListWithProof, - transaction::{TransactionListWithProof, TransactionOutputListWithProof, Version}, + transaction::{Transaction, TransactionOutput, Version}, }; use aptos_vm::VMExecutor; use once_cell::sync::Lazy; @@ -30,12 +28,8 @@ pub static SIG_VERIFY_POOL: Lazy> = Lazy::new(|| { ) }); -pub trait TransactionChunkWithProof { - fn verify_chunk( - &self, - ledger_info: &LedgerInfo, - first_transaction_version: Version, - ) -> Result<()>; +pub trait TransactionChunk { + fn first_version(&self) -> Version; fn len(&self) -> usize; @@ -43,37 +37,27 @@ pub trait TransactionChunkWithProof { self.len() == 0 } - fn into_chunk_output( - self, - state_view: CachedStateView, - ) -> Result<(ChunkOutput, TransactionInfoListWithProof)>; + fn into_output(self, state_view: CachedStateView) -> Result; } -impl TransactionChunkWithProof for TransactionListWithProof { - fn verify_chunk( - &self, - ledger_info: &LedgerInfo, - first_transaction_version: Version, - ) -> Result<()> { - let _timer = CHUNK_OTHER_TIMERS.timer_with(&["verify_txn_chunk"]); +pub struct ChunkToExecute { + pub transactions: Vec, + pub first_version: Version, +} - self.proof - .verify(ledger_info, Some(first_transaction_version)) +impl TransactionChunk for ChunkToExecute { + fn first_version(&self) -> Version { + self.first_version } fn len(&self) -> usize { self.transactions.len() } - fn into_chunk_output( - self, - state_view: CachedStateView, - ) -> Result<(ChunkOutput, TransactionInfoListWithProof)> { - let TransactionListWithProof { + fn into_output(self, state_view: CachedStateView) -> Result { + let ChunkToExecute { transactions, - events: _, - first_transaction_version: _, - proof: txn_infos_with_proof, + first_version: _, } = self; // TODO(skedia) In the chunk executor path, we ideally don't need to verify the signature @@ -91,46 +75,37 @@ impl TransactionChunkWithProof for TransactionListWithProof { }) }; - let chunk_out = { - let _timer = VM_EXECUTE_CHUNK.start_timer(); - - ChunkOutput::by_transaction_execution::( - sig_verified_txns.into(), - state_view, - BlockExecutorConfigFromOnchain::new_no_block_limit(), - )? - }; - - Ok((chunk_out, txn_infos_with_proof)) + let _timer = VM_EXECUTE_CHUNK.start_timer(); + ChunkOutput::by_transaction_execution::( + sig_verified_txns.into(), + state_view, + BlockExecutorConfigFromOnchain::new_no_block_limit(), + ) } } -impl TransactionChunkWithProof for TransactionOutputListWithProof { - fn verify_chunk( - &self, - ledger_info: &LedgerInfo, - first_transaction_version: Version, - ) -> Result<()> { - self.proof - .verify(ledger_info, Some(first_transaction_version)) +pub struct ChunkToApply { + pub transactions: Vec, + pub transaction_outputs: Vec, + pub first_version: Version, +} + +impl TransactionChunk for ChunkToApply { + fn first_version(&self) -> Version { + self.first_version } fn len(&self) -> usize { - self.transactions_and_outputs.len() + self.transactions.len() } - fn into_chunk_output( - self, - state_view: CachedStateView, - ) -> Result<(ChunkOutput, TransactionInfoListWithProof)> { - let TransactionOutputListWithProof { - transactions_and_outputs, - first_transaction_output_version: _, - proof: txn_infos_with_proof, + fn into_output(self, state_view: CachedStateView) -> Result { + let Self { + transactions, + transaction_outputs, + first_version: _, } = self; - let chunk_out = ChunkOutput::by_transaction_output(transactions_and_outputs, state_view)?; - - Ok((chunk_out, txn_infos_with_proof)) + ChunkOutput::by_transaction_output(transactions, transaction_outputs, state_view) } } diff --git a/execution/executor/src/tests/mod.rs b/execution/executor/src/tests/mod.rs index 153427baa1987..d7c65868196a3 100644 --- a/execution/executor/src/tests/mod.rs +++ b/execution/executor/src/tests/mod.rs @@ -450,7 +450,7 @@ fn apply_transaction_by_writeset( ) { let ledger_view: ExecutedTrees = db.reader.get_latest_executed_trees().unwrap(); - let transactions_and_outputs = transactions_and_writesets + let (txns, txn_outs) = transactions_and_writesets .iter() .map(|(txn, write_set)| { ( @@ -474,7 +474,7 @@ fn apply_transaction_by_writeset( TransactionAuxiliaryData::default(), ), ))) - .collect(); + .unzip(); let state_view = ledger_view .verified_state_view( @@ -484,8 +484,7 @@ fn apply_transaction_by_writeset( ) .unwrap(); - let chunk_output = - ChunkOutput::by_transaction_output(transactions_and_outputs, state_view).unwrap(); + let chunk_output = ChunkOutput::by_transaction_output(txns, txn_outs, state_view).unwrap(); let (executed, _, _) = chunk_output.apply_to_ledger(&ledger_view, None).unwrap(); let ExecutedChunk { From 85bba157c95a7851637237c91b295f3a3b74fa0d Mon Sep 17 00:00:00 2001 From: Alan Luong Date: Thu, 10 Oct 2024 17:08:04 -0400 Subject: [PATCH 04/22] use runs-on runner for release images workflow (#14907) --- .github/workflows/copy-images-to-dockerhub.yaml | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/.github/workflows/copy-images-to-dockerhub.yaml b/.github/workflows/copy-images-to-dockerhub.yaml index a0063eef1c5bc..edb72cc8c715a 100644 --- a/.github/workflows/copy-images-to-dockerhub.yaml +++ b/.github/workflows/copy-images-to-dockerhub.yaml @@ -10,6 +10,11 @@ on: required: false type: string description: the git sha to use for the image tag. If not provided, the git sha of the triggering branch will be used + dry_run: + required: false + type: boolean + default: false + description: If true, run the workflow without actually pushing images workflow_dispatch: inputs: image_tag_prefix: @@ -21,6 +26,11 @@ on: required: false type: string description: the git sha to use for the image tag. If not provided, the git sha of the triggering branch will be used + dry_run: + required: false + type: boolean + default: false + description: If true, run the workflow without actually pushing images permissions: contents: read @@ -29,7 +39,7 @@ permissions: jobs: copy-images: # Run on a machine with more local storage for large docker images - runs-on: medium-perf-docker-with-local-ssd + runs-on: runs-on,cpu=16,family=m6id,hdd=500,image=aptos-ubuntu-x64,run-id=${{ github.run_id }} steps: - uses: actions/checkout@v4 @@ -61,4 +71,5 @@ jobs: AWS_ACCOUNT_ID: ${{ secrets.AWS_ECR_ACCOUNT_NUM }} GCP_DOCKER_ARTIFACT_REPO: ${{ vars.GCP_DOCKER_ARTIFACT_REPO }} IMAGE_TAG_PREFIX: ${{ inputs.image_tag_prefix }} - run: ./docker/release-images.mjs --wait-for-image-seconds=3600 + DRY_RUN: ${{ inputs.dry_run }} + run: ./docker/release-images.mjs --wait-for-image-seconds=3600 ${{ inputs.dry_run && '--dry-run' || '' }} From 5e5544186189473def1767c3251c6bafb4fe0beb Mon Sep 17 00:00:00 2001 From: igor-aptos <110557261+igor-aptos@users.noreply.github.com> Date: Thu, 10 Oct 2024 16:24:35 -0700 Subject: [PATCH 05/22] [single-node-performance] Add runner information in the output (#14932) * [single-node-performance] Add runner information in the output * adding skip move e2e * recalibration --- .../workflow-run-execution-performance.yaml | 24 ++- testsuite/single_node_performance.py | 141 +++++++++--------- 2 files changed, 91 insertions(+), 74 deletions(-) diff --git a/.github/workflows/workflow-run-execution-performance.yaml b/.github/workflows/workflow-run-execution-performance.yaml index 7bc43b1e9b7de..d57b0ff678bd0 100644 --- a/.github/workflows/workflow-run-execution-performance.yaml +++ b/.github/workflows/workflow-run-execution-performance.yaml @@ -22,10 +22,19 @@ on: default: false type: boolean description: Ignore target determination and run the tests + SKIP_MOVE_E2E: + required: false + default: false + type: boolean + description: Whether to run or skip move-only e2e tests at the beginning. SOURCE: required: false default: CI type: string + NUMBER_OF_EXECUTION_THREADS: + required: false + default: "32" + type: string # This allows the workflow to be triggered manually from the Github UI or CLI # NOTE: because the "number" type is not supported, we default to 720 minute timeout workflow_dispatch: @@ -43,9 +52,17 @@ on: - benchmark-t2d-32 - benchmark-t2d-60 - benchmark-c3d-30 + - benchmark-c3d-60 + - benchmark-c3d-180 - benchmark-n4-32 - benchmark-c4-32 + - benchmark-c4-48 + - benchmark-c4-96 description: The name of the runner to use for the test. (which decides machine specs) + NUMBER_OF_EXECUTION_THREADS: + required: false + default: "32" + type: string FLOW: required: false default: LAND_BLOCKING @@ -56,6 +73,11 @@ on: - MAINNET_LARGE_DB type: choice description: Which set of tests to run. MAINNET/MAINNET_LARGE_DB are for performance validation of mainnet nodes. + SKIP_MOVE_E2E: + required: false + default: false + type: boolean + description: Whether to skip move-only e2e tests at the beginning. IGNORE_TARGET_DETERMINATION: required: false default: true @@ -98,7 +120,7 @@ jobs: - name: Run single node execution benchmark in performance build mode shell: bash - run: TABULATE_INSTALL=lib-only pip install tabulate && FLOW=${{ inputs.FLOW }} SOURCE=${{ inputs.SOURCE }} testsuite/single_node_performance.py + run: TABULATE_INSTALL=lib-only pip install tabulate && FLOW="${{ inputs.FLOW }}" SOURCE="${{ inputs.SOURCE }}" RUNNER_NAME="${{ inputs.RUNNER_NAME }}" SKIP_MOVE_E2E="${{ inputs.SKIP_MOVE_E2E && '1' || '' }}" NUMBER_OF_EXECUTION_THREADS="${{ inputs.NUMBER_OF_EXECUTION_THREADS }}" testsuite/single_node_performance.py if: ${{ (inputs.IGNORE_TARGET_DETERMINATION || needs.test-target-determinator.outputs.run_execution_performance_test == 'true') }} - run: echo "Skipping single node execution performance! Unrelated changes detected." diff --git a/testsuite/single_node_performance.py b/testsuite/single_node_performance.py index 3914acfbce9f1..228d202b656b9 100755 --- a/testsuite/single_node_performance.py +++ b/testsuite/single_node_performance.py @@ -45,10 +45,12 @@ class Flow(Flag): print(f"Unrecogznied source {SOURCE}") exit(1) +RUNNER_NAME = os.environ.get("RUNNER_NAME", default="none") + DEFAULT_NUM_INIT_ACCOUNTS = ( "100000000" if SELECTED_FLOW == Flow.MAINNET_LARGE_DB else "2000000" ) -DEFAULT_MAX_BLOCK_SIZE = "25000" if IS_MAINNET else "10000" +DEFAULT_MAX_BLOCK_SIZE = "10000" MAX_BLOCK_SIZE = int(os.environ.get("MAX_BLOCK_SIZE", default=DEFAULT_MAX_BLOCK_SIZE)) NUM_BLOCKS = int(os.environ.get("NUM_BLOCKS_PER_TEST", default=15)) @@ -62,11 +64,14 @@ class Flow(Flag): MAIN_SIGNER_ACCOUNTS = 2 * MAX_BLOCK_SIZE NOISE_LOWER_LIMIT = 0.98 if IS_MAINNET else 0.8 -NOISE_LOWER_LIMIT_WARN = None if IS_MAINNET else 0.9 +NOISE_LOWER_LIMIT_WARN = 0.9 # If you want to calibrate the upper limit for perf improvement, you can # increase this value temporarily (i.e. to 1.3) and readjust back after a day or two of runs -NOISE_UPPER_LIMIT = 5 if IS_MAINNET else 1.15 -NOISE_UPPER_LIMIT_WARN = None if IS_MAINNET else 1.05 +NOISE_UPPER_LIMIT = 1.15 +NOISE_UPPER_LIMIT_WARN = 1.05 + +SKIP_WARNS = IS_MAINNET +SKIP_PERF_IMPROVEMENT_NOTICE = IS_MAINNET # bump after a perf improvement, so you can easily distinguish runs # that are on top of this commit @@ -123,8 +128,6 @@ class RunGroupKeyExtra: transaction_weights_override: Optional[str] = field(default=None) sharding_traffic_flags: Optional[str] = field(default=None) - smaller_working_set: bool = field(default=False) - @dataclass class RunGroupConfig: @@ -156,46 +159,46 @@ class RunGroupConfig: # transaction_type module_working_set_size executor_type count min_ratio max_ratio median CALIBRATION = """ -no-op 1 VM 36 0.827 1.118 36723.0 -no-op 1000 VM 36 0.803 1.030 22352.6 -apt-fa-transfer 1 VM 36 0.858 1.080 28198.5 -account-generation 1 VM 36 0.863 1.046 22960.6 -account-resource32-b 1 VM 36 0.852 1.087 34327.5 -modify-global-resource 1 VM 36 0.890 1.023 2799.1 -modify-global-resource 100 VM 36 0.871 1.019 34327.5 -publish-package 1 VM 36 0.967 1.074 142.9 -mix_publish_transfer 1 VM 36 0.957 1.134 2145.5 -batch100-transfer 1 VM 36 0.862 1.024 743.6 -vector-picture30k 1 VM 36 0.973 1.018 112.2 -vector-picture30k 100 VM 36 0.826 1.026 1862.3 -smart-table-picture30-k-with200-change 1 VM 36 0.972 1.078 21.5 -smart-table-picture30-k-with200-change 100 VM 36 0.955 1.064 368.5 -modify-global-resource-agg-v2 1 VM 36 0.906 1.107 35479.7 -modify-global-flag-agg-v2 1 VM 36 0.969 1.023 5508.5 -modify-global-bounded-agg-v2 1 VM 36 0.909 1.085 9876.8 -modify-global-milestone-agg-v2 1 VM 36 0.872 1.037 28612.4 -resource-groups-global-write-tag1-kb 1 VM 36 0.889 1.044 9215.7 -resource-groups-global-write-and-read-tag1-kb 1 VM 36 0.917 1.018 6196.8 -resource-groups-sender-write-tag1-kb 1 VM 36 0.898 1.118 19644.1 -resource-groups-sender-multi-change1-kb 1 VM 36 0.912 1.083 16047.2 -token-v1ft-mint-and-transfer 1 VM 36 0.888 1.040 1264.5 -token-v1ft-mint-and-transfer 100 VM 36 0.897 1.024 17774 -token-v1nft-mint-and-transfer-sequential 1 VM 36 0.893 1.019 798.4 -token-v1nft-mint-and-transfer-sequential 100 VM 36 0.885 1.022 12796.9 -coin-init-and-mint 1 VM 36 0.788 1.071 28664.7 -coin-init-and-mint 100 VM 36 0.787 1.094 24092 -fungible-asset-mint 1 VM 36 0.775 1.034 26523.6 -fungible-asset-mint 100 VM 36 0.780 1.063 21446.3 -no-op5-signers 1 VM 36 0.813 1.105 38063.3 -token-v2-ambassador-mint 1 VM 36 0.780 1.037 17637.4 -token-v2-ambassador-mint 100 VM 36 0.778 1.045 16466.1 -liquidity-pool-swap 1 VM 36 0.852 1.017 966.8 -liquidity-pool-swap 100 VM 36 0.874 1.021 10977.4 -liquidity-pool-swap-stable 1 VM 36 0.908 1.019 938.1 -liquidity-pool-swap-stable 100 VM 36 0.916 1.016 10761.9 -deserialize-u256 1 VM 36 0.842 1.081 37424.8 -no-op-fee-payer 1 VM 36 0.869 1.018 2116.2 -no-op-fee-payer 100 VM 36 0.824 1.026 27295.8 +no-op 1 VM 59 0.815 1.101 37283.8 +no-op 1000 VM 59 0.679 1.036 22232.7 +apt-fa-transfer 1 VM 59 0.779 1.064 28096.3 +account-generation 1 VM 59 0.763 1.046 22960.6 +account-resource32-b 1 VM 59 0.794 1.085 34394.7 +modify-global-resource 1 VM 59 0.849 1.029 2784.1 +modify-global-resource 100 VM 17 0.845 1.071 33592.9 +publish-package 1 VM 59 0.926 1.076 142.6 +mix_publish_transfer 1 VM 59 0.917 1.134 2145.5 +batch100-transfer 1 VM 59 0.695 1.028 740.9 +vector-picture30k 1 VM 59 0.891 1.027 111.2 +vector-picture30k 100 VM 17 0.593 1.042 1982.6 +smart-table-picture30-k-with200-change 1 VM 59 0.844 1.078 21.5 +smart-table-picture30-k-with200-change 100 VM 17 0.786 1.018 405.6 +modify-global-resource-agg-v2 1 VM 59 0.706 1.113 35274.8 +modify-global-flag-agg-v2 1 VM 59 0.818 1.023 5508.5 +modify-global-bounded-agg-v2 1 VM 59 0.766 1.089 9840.3 +modify-global-milestone-agg-v2 1 VM 59 0.723 1.038 28560.2 +resource-groups-global-write-tag1-kb 1 VM 59 0.872 1.046 9198.2 +resource-groups-global-write-and-read-tag1-kb 1 VM 59 0.867 1.023 6174.8 +resource-groups-sender-write-tag1-kb 1 VM 59 0.843 1.129 19680.5 +resource-groups-sender-multi-change1-kb 1 VM 59 0.825 1.074 16174.0 +token-v1ft-mint-and-transfer 1 VM 59 0.811 1.045 1262.2 +token-v1ft-mint-and-transfer 100 VM 17 0.718 1.041 17535.3 +token-v1nft-mint-and-transfer-sequential 1 VM 59 0.820 1.032 795.5 +token-v1nft-mint-and-transfer-sequential 100 VM 17 0.586 1.035 12683.5 +coin-init-and-mint 1 VM 59 0.704 1.073 28612.4 +coin-init-and-mint 100 VM 17 0.716 1.087 23415.6 +fungible-asset-mint 1 VM 59 0.644 1.052 26193.9 +fungible-asset-mint 100 VM 17 0.698 1.070 20606.2 +no-op5-signers 1 VM 59 0.783 1.124 37424.8 +token-v2-ambassador-mint 1 VM 59 0.670 1.035 17671.5 +token-v2-ambassador-mint 100 VM 17 0.717 1.058 15617.8 +liquidity-pool-swap 1 VM 59 0.728 1.021 963.2 +liquidity-pool-swap 100 VM 17 0.717 1.019 11116.3 +liquidity-pool-swap-stable 1 VM 59 0.776 1.023 934.6 +liquidity-pool-swap-stable 100 VM 17 0.796 1.021 10839.9 +deserialize-u256 1 VM 59 0.817 1.093 37002.8 +no-op-fee-payer 1 VM 59 0.775 1.027 2103.7 +no-op-fee-payer 100 VM 17 0.585 1.021 27642.4 """ # when adding a new test, add estimated expected_tps to it, as well as waived=True. @@ -206,14 +209,14 @@ class RunGroupConfig: TESTS = [ RunGroupConfig(key=RunGroupKey("no-op"), included_in=LAND_BLOCKING_AND_C), RunGroupConfig(key=RunGroupKey("no-op", module_working_set_size=1000), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(key=RunGroupKey("apt-fa-transfer"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(key=RunGroupKey("apt-fa-transfer"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE | Flow.MAINNET), RunGroupConfig(key=RunGroupKey("apt-fa-transfer", executor_type="native"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(key=RunGroupKey("account-generation"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(key=RunGroupKey("account-generation"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE | Flow.MAINNET), RunGroupConfig(key=RunGroupKey("account-generation", executor_type="native"), included_in=Flow.CONTINUOUS), RunGroupConfig(key=RunGroupKey("account-resource32-b"), included_in=Flow.CONTINUOUS), RunGroupConfig(key=RunGroupKey("modify-global-resource"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), RunGroupConfig(key=RunGroupKey("modify-global-resource", module_working_set_size=DEFAULT_MODULE_WORKING_SET_SIZE), included_in=Flow.CONTINUOUS), - RunGroupConfig(key=RunGroupKey("publish-package"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(key=RunGroupKey("publish-package"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE | Flow.MAINNET), RunGroupConfig(key=RunGroupKey("mix_publish_transfer"), key_extra=RunGroupKeyExtra( transaction_type_override="publish-package apt-fa-transfer", transaction_weights_override="1 500", @@ -265,7 +268,7 @@ class RunGroupConfig: RunGroupConfig(key=RunGroupKey("no-op5-signers"), included_in=Flow.CONTINUOUS), - RunGroupConfig(key=RunGroupKey("token-v2-ambassador-mint"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(key=RunGroupKey("token-v2-ambassador-mint"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE | Flow.MAINNET), RunGroupConfig(key=RunGroupKey("token-v2-ambassador-mint", module_working_set_size=DEFAULT_MODULE_WORKING_SET_SIZE), included_in=Flow.CONTINUOUS), RunGroupConfig(key=RunGroupKey("liquidity-pool-swap"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), @@ -284,10 +287,10 @@ class RunGroupConfig: RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_hotspot", executor_type="sharded"), key_extra=RunGroupKeyExtra(sharding_traffic_flags="--hotspot-probability 0.8", transaction_type_override=""), included_in=Flow.REPRESENTATIVE, waived=True), # setting separately for previewnet, as we run on a different number of cores. - RunGroupConfig(expected_tps=26000 if NUM_ACCOUNTS < 5000000 else 20000, key=RunGroupKey("apt-fa-transfer"), key_extra=RunGroupKeyExtra(smaller_working_set=True), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), - RunGroupConfig(expected_tps=20000 if NUM_ACCOUNTS < 5000000 else 15000, key=RunGroupKey("account-generation"), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), - RunGroupConfig(expected_tps=140 if NUM_ACCOUNTS < 5000000 else 60, key=RunGroupKey("publish-package"), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), - RunGroupConfig(expected_tps=15400 if NUM_ACCOUNTS < 5000000 else 6800, key=RunGroupKey("token-v2-ambassador-mint"), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB), + RunGroupConfig(expected_tps=20000, key=RunGroupKey("apt-fa-transfer"), included_in=Flow.MAINNET_LARGE_DB), + RunGroupConfig(expected_tps=15000, key=RunGroupKey("account-generation"), included_in=Flow.MAINNET_LARGE_DB), + RunGroupConfig(expected_tps=60, key=RunGroupKey("publish-package"), included_in=Flow.MAINNET_LARGE_DB), + RunGroupConfig(expected_tps=6800, key=RunGroupKey("token-v2-ambassador-mint"), included_in=Flow.MAINNET_LARGE_DB), # RunGroupConfig(expected_tps=17000 if NUM_ACCOUNTS < 5000000 else 28000, key=RunGroupKey("coin_transfer_connected_components", executor_type="sharded"), key_extra=RunGroupKeyExtra(sharding_traffic_flags="--connected-tx-grps 5000", transaction_type_override=""), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB, waived=True), # RunGroupConfig(expected_tps=27000 if NUM_ACCOUNTS < 5000000 else 23000, key=RunGroupKey("coin_transfer_hotspot", executor_type="sharded"), key_extra=RunGroupKeyExtra(sharding_traffic_flags="--hotspot-probability 0.8", transaction_type_override=""), included_in=Flow.MAINNET | Flow.MAINNET_LARGE_DB, waived=True), ] @@ -665,11 +668,7 @@ def print_table( raise Exception(f"executor type not supported {test.key.executor_type}") txn_emitter_prefix_str = "" if NUM_BLOCKS > 200 else " --generate-then-execute" - ADDITIONAL_DST_POOL_ACCOUNTS = ( - 2 - * MAX_BLOCK_SIZE - * (1 if test.key_extra.smaller_working_set else NUM_BLOCKS) - ) + ADDITIONAL_DST_POOL_ACCOUNTS = 2 * MAX_BLOCK_SIZE * NUM_BLOCKS common_command_suffix = f"{executor_type_str} {txn_emitter_prefix_str} --block-size {cur_block_size} {DB_CONFIG_FLAGS} {DB_PRUNER_FLAGS} run-executor {FEATURE_FLAGS} {workload_args_str} --module-working-set-size {test.key.module_working_set_size} --main-signer-accounts {MAIN_SIGNER_ACCOUNTS} --additional-dst-pool-accounts {ADDITIONAL_DST_POOL_ACCOUNTS} --data-dir {tmpdirname}/db --checkpoint-dir {tmpdirname}/cp" @@ -726,18 +725,15 @@ def print_table( print( json.dumps( { - "grep": "grep_json_single_node_perf" - if SOURCE == "CI" - else ( - "grep_json_single_node_perf_adhoc" - if SOURCE == "ADHOC" - else "grep_json_single_node_perf_local" - ), + "grep": "grep_json_single_node_perf", + "source": SOURCE, + "runner_name": RUNNER_NAME, "transaction_type": test.key.transaction_type, "module_working_set_size": test.key.module_working_set_size, "executor_type": test.key.executor_type, "block_size": cur_block_size, "execution_threads": NUMBER_OF_EXECUTION_THREADS, + "warmup_num_accounts": NUM_ACCOUNTS, "expected_tps": criteria.expected_tps, "expected_min_tps": criteria.min_tps, "expected_max_tps": criteria.max_tps, @@ -791,20 +787,18 @@ def print_table( ) print_table(results, by_levels=False, single_field=None) - if NOISE_LOWER_LIMIT is not None and single_node_result.tps < criteria.min_tps: + if single_node_result.tps < criteria.min_tps: text = f"regression detected {single_node_result.tps} < {criteria.min_tps} (expected median {criteria.expected_tps}), {test.key} didn't meet TPS requirements" if not test.waived: errors.append(text) else: warnings.append(text) - elif ( - NOISE_LOWER_LIMIT_WARN is not None - and single_node_result.tps < criteria.min_warn_tps - ): + elif single_node_result.tps < criteria.min_warn_tps: text = f"potential (but within normal noise) regression detected {single_node_result.tps} < {criteria.min_warn_tps} (expected median {criteria.expected_tps}), {test.key} didn't meet TPS requirements" warnings.append(text) elif ( - NOISE_UPPER_LIMIT is not None and single_node_result.tps > criteria.max_tps + not SKIP_PERF_IMPROVEMENT_NOTICE + and single_node_result.tps > criteria.max_tps ): text = f"perf improvement detected {single_node_result.tps} > {criteria.max_tps} (expected median {criteria.expected_tps}), {test.key} exceeded TPS requirements, increase TPS requirements to match new baseline" if not test.waived: @@ -812,7 +806,7 @@ def print_table( else: warnings.append(text) elif ( - NOISE_UPPER_LIMIT_WARN is not None + not SKIP_PERF_IMPROVEMENT_NOTICE and single_node_result.tps > criteria.max_warn_tps ): text = f"potential (but within normal noise) perf improvement detected {single_node_result.tps} > {criteria.max_warn_tps} (expected median {criteria.expected_tps}), {test.key} exceeded TPS requirements, increase TPS requirements to match new baseline" @@ -824,6 +818,7 @@ def print_table( if warnings: print("Warnings: ") print("\n".join(warnings)) + print("You can run again to see if it is noise, or consistent.") if errors: print("Errors: ") From a84e2beca7004640e6e70b95a4a8e220d6be5399 Mon Sep 17 00:00:00 2001 From: Oliver He Date: Thu, 10 Oct 2024 19:53:01 -0400 Subject: [PATCH 06/22] Add federated keyless to the rust sdk (#14905) * Add federated keyless support to rust sdk * update * update local account generator * fix --- Cargo.lock | 1 + .../src/emitter/local_account_generator.rs | 8 +- sdk/Cargo.toml | 1 + sdk/src/types.rs | 332 +++++++++++++++--- testsuite/smoke-test/src/keyless.rs | 103 ++++-- types/src/keyless/test_utils.rs | 52 +-- types/src/transaction/authenticator.rs | 8 + 7 files changed, 401 insertions(+), 104 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 73f7e4ecc3e55..e6096f45511dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3704,6 +3704,7 @@ dependencies = [ "once_cell", "rand 0.7.3", "rand_core 0.5.1", + "serde", "serde_json", "tiny-bip39", "tokio", diff --git a/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs b/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs index 6033d7a49856e..db6c9383c023e 100644 --- a/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs +++ b/crates/transaction-emitter-lib/src/emitter/local_account_generator.rs @@ -2,7 +2,9 @@ use anyhow::bail; // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 use aptos_crypto::ed25519::Ed25519PrivateKey; -use aptos_sdk::types::{AccountKey, EphemeralKeyPair, KeylessAccount, LocalAccount}; +use aptos_sdk::types::{ + AccountKey, EphemeralKeyPair, EphemeralPrivateKey, KeylessAccount, LocalAccount, +}; use aptos_transaction_generator_lib::ReliableTransactionSubmitter; use aptos_types::keyless::{Claims, OpenIdSig, Pepper, ZeroKnowledgeSig}; use async_trait::async_trait; @@ -141,7 +143,9 @@ impl LocalAccountGenerator for KeylessAccountGenerator { // Cloning is disabled outside #[cfg(test)] let serialized: &[u8] = &(self.ephemeral_secret_key.to_bytes()); - let esk = Ed25519PrivateKey::try_from(serialized)?; + let esk = EphemeralPrivateKey::Ed25519 { + inner_private_key: Ed25519PrivateKey::try_from(serialized)?, + }; let keyless_account = KeylessAccount::new( &self.iss, diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index ee2105cbebd55..ed87ebb322a59 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -26,6 +26,7 @@ ed25519-dalek-bip32 = { workspace = true } hex = { workspace = true } move-core-types = { workspace = true } rand_core = { workspace = true } +serde = { workspace = true } serde_json = { workspace = true } tiny-bip39 = { workspace = true } diff --git a/sdk/src/types.rs b/sdk/src/types.rs index 9464ac593fff0..8bf98ac47f441 100644 --- a/sdk/src/types.rs +++ b/sdk/src/types.rs @@ -1,12 +1,13 @@ // Copyright © Aptos Foundation // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 - use crate::{ crypto::{ ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, + hash::CryptoHash, signing_message, traits::Uniform, + CryptoMaterialError, }, transaction_builder::TransactionBuilder, types::{ @@ -15,7 +16,7 @@ use crate::{ }, }; use anyhow::{Context, Result}; -use aptos_crypto::{ed25519::Ed25519Signature, PrivateKey, SigningKey}; +use aptos_crypto::{ed25519::Ed25519Signature, secp256r1_ecdsa, PrivateKey, SigningKey}; use aptos_ledger::AptosLedgerError; pub use aptos_types::*; use aptos_types::{ @@ -28,6 +29,8 @@ use aptos_types::{ }; use bip39::{Language, Mnemonic, Seed}; use ed25519_dalek_bip32::{DerivationPath, ExtendedSecretKey}; +use keyless::FederatedKeylessPublicKey; +use serde::{Deserialize, Serialize}; use std::{ str::FromStr, sync::atomic::{AtomicU64, Ordering}, @@ -38,7 +41,7 @@ use std::{ enum LocalAccountAuthenticator { PrivateKey(AccountKey), Keyless(KeylessAccount), - // TODO: Add support for keyless authentication + FederatedKeyless(FederatedKeylessAccount), } impl LocalAccountAuthenticator { @@ -49,26 +52,40 @@ impl LocalAccountAuthenticator { .expect("Signing a txn can't fail") .into_inner(), LocalAccountAuthenticator::Keyless(keyless_account) => { - let proof = keyless_account.zk_sig.proof; - let txn_and_zkp = TransactionAndProof { - message: txn.clone(), - proof: Some(proof), - }; - - let esk = &keyless_account.ephemeral_key_pair.private_key; - let ephemeral_signature = - EphemeralSignature::ed25519(esk.sign(&txn_and_zkp).unwrap()); - - let sig = KeylessSignature { - cert: EphemeralCertificate::ZeroKnowledgeSig(keyless_account.zk_sig.clone()), - jwt_header_json: keyless_account.jwt_header_json.clone(), - exp_date_secs: keyless_account.ephemeral_key_pair.expiry_date_secs, - ephemeral_pubkey: keyless_account.ephemeral_key_pair.public_key.clone(), - ephemeral_signature, - }; - + let sig = self.build_keyless_signature(txn.clone(), &keyless_account); SignedTransaction::new_keyless(txn, keyless_account.public_key.clone(), sig) }, + LocalAccountAuthenticator::FederatedKeyless(federated_keyless_account) => { + let sig = self.build_keyless_signature(txn.clone(), &federated_keyless_account); + SignedTransaction::new_federated_keyless( + txn, + federated_keyless_account.public_key.clone(), + sig, + ) + }, + } + } + + fn build_keyless_signature( + &self, + txn: RawTransaction, + account: &impl CommonKeylessAccount, + ) -> KeylessSignature { + let proof = account.zk_sig().proof; + let txn_and_zkp = TransactionAndProof { + message: txn, + proof: Some(proof), + }; + + let esk = account.ephem_private_key(); + let ephemeral_signature = esk.sign(&txn_and_zkp).unwrap(); + + KeylessSignature { + cert: EphemeralCertificate::ZeroKnowledgeSig(account.zk_sig().clone()), + jwt_header_json: account.jwt_header_json().clone(), + exp_date_secs: account.expiry_date_secs(), + ephemeral_pubkey: account.ephem_public_key().clone(), + ephemeral_signature, } } } @@ -123,6 +140,18 @@ impl LocalAccount { } } + pub fn new_federated_keyless( + address: AccountAddress, + federated_keyless_account: FederatedKeylessAccount, + sequence_number: u64, + ) -> Self { + Self { + address, + auth: LocalAccountAuthenticator::FederatedKeyless(federated_keyless_account), + sequence_number: AtomicU64::new(sequence_number), + } + } + /// Recover an account from derive path (e.g. m/44'/637'/0'/0'/0') and mnemonic phrase, pub fn from_derive_path( derive_path: &str, @@ -242,6 +271,7 @@ impl LocalAccount { match &self.auth { LocalAccountAuthenticator::PrivateKey(key) => key.private_key(), LocalAccountAuthenticator::Keyless(_) => todo!(), + LocalAccountAuthenticator::FederatedKeyless(_) => todo!(), } } @@ -249,6 +279,7 @@ impl LocalAccount { match &self.auth { LocalAccountAuthenticator::PrivateKey(key) => key.public_key(), LocalAccountAuthenticator::Keyless(_) => todo!(), + LocalAccountAuthenticator::FederatedKeyless(_) => todo!(), } } @@ -258,6 +289,9 @@ impl LocalAccount { LocalAccountAuthenticator::Keyless(keyless_account) => { keyless_account.authentication_key() }, + LocalAccountAuthenticator::FederatedKeyless(federated_keyless_account) => { + federated_keyless_account.authentication_key() + }, } } @@ -282,6 +316,7 @@ impl LocalAccount { match &mut self.auth { LocalAccountAuthenticator::PrivateKey(key) => std::mem::replace(key, new_key.into()), LocalAccountAuthenticator::Keyless(_) => todo!(), + LocalAccountAuthenticator::FederatedKeyless(_) => todo!(), } } @@ -468,9 +503,56 @@ impl From for AccountKey { } } +#[derive(Debug, Eq, PartialEq, Deserialize)] +pub enum EphemeralPrivateKey { + Ed25519 { + inner_private_key: Ed25519PrivateKey, + }, + Secp256r1Ecdsa { + inner_private_key: secp256r1_ecdsa::PrivateKey, + }, +} + +impl EphemeralPrivateKey { + pub fn public_key(&self) -> EphemeralPublicKey { + match self { + EphemeralPrivateKey::Ed25519 { inner_private_key } => { + EphemeralPublicKey::ed25519(inner_private_key.public_key()) + }, + EphemeralPrivateKey::Secp256r1Ecdsa { inner_private_key } => { + EphemeralPublicKey::secp256r1_ecdsa(inner_private_key.public_key()) + }, + } + } +} + +impl TryFrom<&[u8]> for EphemeralPrivateKey { + type Error = CryptoMaterialError; + + fn try_from(bytes: &[u8]) -> Result { + bcs::from_bytes::(bytes) + .map_err(|_e| CryptoMaterialError::DeserializationError) + } +} + +impl EphemeralPrivateKey { + pub fn sign( + &self, + message: &T, + ) -> Result { + match self { + EphemeralPrivateKey::Ed25519 { inner_private_key } => Ok(EphemeralSignature::ed25519( + inner_private_key.sign(message)?, + )), + EphemeralPrivateKey::Secp256r1Ecdsa { + inner_private_key: _, + } => todo!(), + } + } +} #[derive(Debug)] pub struct EphemeralKeyPair { - private_key: Ed25519PrivateKey, + private_key: EphemeralPrivateKey, public_key: EphemeralPublicKey, #[allow(dead_code)] nonce: String, @@ -481,11 +563,11 @@ pub struct EphemeralKeyPair { impl EphemeralKeyPair { pub fn new( - private_key: Ed25519PrivateKey, + private_key: EphemeralPrivateKey, expiry_date_secs: u64, blinder: Vec, ) -> Result { - let epk = EphemeralPublicKey::ed25519(private_key.public_key()); + let epk = private_key.public_key(); let nonce = OpenIdSig::reconstruct_oauth_nonce( &blinder, expiry_date_secs, @@ -507,14 +589,15 @@ impl EphemeralKeyPair { pub struct KeylessAccount { public_key: KeylessPublicKey, ephemeral_key_pair: EphemeralKeyPair, - #[allow(dead_code)] - uid_key: String, - #[allow(dead_code)] - uid_val: String, - #[allow(dead_code)] - aud: String, - #[allow(dead_code)] - pepper: Pepper, + zk_sig: ZeroKnowledgeSig, + jwt_header_json: String, + jwt: Option, +} + +#[derive(Debug)] +pub struct FederatedKeylessAccount { + public_key: FederatedKeylessPublicKey, + ephemeral_key_pair: EphemeralKeyPair, zk_sig: ZeroKnowledgeSig, jwt_header_json: String, jwt: Option, @@ -531,18 +614,10 @@ impl KeylessAccount { pepper: Pepper, zk_sig: ZeroKnowledgeSig, ) -> Result { - let idc = IdCommitment::new_from_preimage(&pepper, aud, uid_key, uid_val)?; - let public_key = KeylessPublicKey { - iss_val: iss.to_owned(), - idc, - }; + let public_key = create_keyless_public_key(iss, aud, uid_key, uid_val, &pepper)?; Ok(Self { public_key, ephemeral_key_pair, - uid_key: uid_key.to_string(), - uid_val: uid_val.to_string(), - aud: aud.to_string(), - pepper, zk_sig, jwt_header_json: jwt_header_json.to_string(), jwt: None, @@ -556,28 +631,23 @@ impl KeylessAccount { pepper: Option, zk_sig: Option, ) -> Result { - let parts: Vec<&str> = jwt.split('.').collect(); - let header_bytes = base64::decode(parts.first().context("jwt malformed")?)?; - let jwt_header_json = String::from_utf8(header_bytes)?; - let jwt_payload_json = - base64::decode_config(parts.get(1).context("jwt malformed")?, base64::URL_SAFE)?; - let claims: Claims = serde_json::from_slice(&jwt_payload_json)?; - + let claims = extract_claims_from_jwt(jwt)?; let uid_key = uid_key.unwrap_or("sub").to_string(); let uid_val = claims.get_uid_val(&uid_key)?; let aud = claims.oidc_claims.aud; - let account = Self::new( + let mut account = Self::new( &claims.oidc_claims.iss, &aud, &uid_key, &uid_val, - &jwt_header_json, + &extract_header_json_from_jwt(jwt)?, ephemeral_key_pair, pepper.expect("pepper fetch not implemented"), zk_sig.expect("proof fetch not implemented"), )?; - Ok(account.set_jwt(jwt)) + account.jwt = Some(jwt.to_string()); + Ok(account) } pub fn authentication_key(&self) -> AuthenticationKey { @@ -587,10 +657,164 @@ impl KeylessAccount { pub fn public_key(&self) -> &KeylessPublicKey { &self.public_key } +} + +impl FederatedKeylessAccount { + pub fn new( + iss: &str, + aud: &str, + uid_key: &str, + uid_val: &str, + jwt_header_json: &str, + ephemeral_key_pair: EphemeralKeyPair, + pepper: Pepper, + zk_sig: ZeroKnowledgeSig, + jwk_addr: AccountAddress, + ) -> Result { + let public_key = + create_federated_public_key(iss, aud, uid_key, uid_val, &pepper, jwk_addr)?; + Ok(Self { + public_key, + ephemeral_key_pair, + zk_sig, + jwt_header_json: jwt_header_json.to_string(), + jwt: None, + }) + } + + pub fn new_from_jwt( + jwt: &str, + ephemeral_key_pair: EphemeralKeyPair, + jwk_addr: AccountAddress, + uid_key: Option<&str>, + pepper: Option, + zk_sig: Option, + ) -> Result { + let claims = extract_claims_from_jwt(jwt)?; + let uid_key = uid_key.unwrap_or("sub").to_string(); + let uid_val = claims.get_uid_val(&uid_key)?; + let aud = claims.oidc_claims.aud; + + let mut account = Self::new( + &claims.oidc_claims.iss, + &aud, + &uid_key, + &uid_val, + &extract_header_json_from_jwt(jwt)?, + ephemeral_key_pair, + pepper.expect("pepper fetch not implemented"), + zk_sig.expect("proof fetch not implemented"), + jwk_addr, + )?; + account.jwt = Some(jwt.to_string()); + Ok(account) + } + + pub fn authentication_key(&self) -> AuthenticationKey { + AuthenticationKey::any_key(AnyPublicKey::federated_keyless(self.public_key.clone())) + } + + pub fn public_key(&self) -> &FederatedKeylessPublicKey { + &self.public_key + } +} + +fn create_keyless_public_key( + iss: &str, + aud: &str, + uid_key: &str, + uid_val: &str, + pepper: &Pepper, +) -> Result { + let idc = IdCommitment::new_from_preimage(pepper, aud, uid_key, uid_val)?; + Ok(KeylessPublicKey { + iss_val: iss.to_owned(), + idc, + }) +} + +fn create_federated_public_key( + iss: &str, + aud: &str, + uid_key: &str, + uid_val: &str, + pepper: &Pepper, + jwk_addr: AccountAddress, +) -> Result { + let idc = IdCommitment::new_from_preimage(pepper, aud, uid_key, uid_val)?; + Ok(FederatedKeylessPublicKey { + pk: KeylessPublicKey { + iss_val: iss.to_owned(), + idc, + }, + jwk_addr, + }) +} + +pub fn extract_claims_from_jwt(jwt: &str) -> Result { + let parts: Vec<&str> = jwt.split('.').collect(); + let jwt_payload_json = + base64::decode_config(parts.get(1).context("jwt malformed")?, base64::URL_SAFE)?; + let claims: Claims = serde_json::from_slice(&jwt_payload_json)?; + Ok(claims) +} + +pub fn extract_header_json_from_jwt(jwt: &str) -> Result { + let parts: Vec<&str> = jwt.split('.').collect(); + let header_bytes = base64::decode(parts.first().context("jwt malformed")?)?; + + Ok(String::from_utf8(header_bytes)?) +} + +trait CommonKeylessAccount { + fn zk_sig(&self) -> &ZeroKnowledgeSig; + fn ephem_private_key(&self) -> &EphemeralPrivateKey; + fn ephem_public_key(&self) -> &EphemeralPublicKey; + fn jwt_header_json(&self) -> &String; + fn expiry_date_secs(&self) -> u64; +} + +impl CommonKeylessAccount for &KeylessAccount { + fn zk_sig(&self) -> &ZeroKnowledgeSig { + &self.zk_sig + } + + fn ephem_private_key(&self) -> &EphemeralPrivateKey { + &self.ephemeral_key_pair.private_key + } + + fn ephem_public_key(&self) -> &EphemeralPublicKey { + &self.ephemeral_key_pair.public_key + } + + fn jwt_header_json(&self) -> &String { + &self.jwt_header_json + } + + fn expiry_date_secs(&self) -> u64 { + self.ephemeral_key_pair.expiry_date_secs + } +} + +impl CommonKeylessAccount for &FederatedKeylessAccount { + fn zk_sig(&self) -> &ZeroKnowledgeSig { + &self.zk_sig + } + + fn ephem_private_key(&self) -> &EphemeralPrivateKey { + &self.ephemeral_key_pair.private_key + } + + fn ephem_public_key(&self) -> &EphemeralPublicKey { + &self.ephemeral_key_pair.public_key + } + + fn jwt_header_json(&self) -> &String { + &self.jwt_header_json + } - pub fn set_jwt(mut self, jwt: &str) -> Self { - self.jwt = Some(jwt.to_string()); - self + fn expiry_date_secs(&self) -> u64 { + self.ephemeral_key_pair.expiry_date_secs } } diff --git a/testsuite/smoke-test/src/keyless.rs b/testsuite/smoke-test/src/keyless.rs index d7af6aafb1175..6e847afbc5dc5 100644 --- a/testsuite/smoke-test/src/keyless.rs +++ b/testsuite/smoke-test/src/keyless.rs @@ -10,7 +10,9 @@ use aptos_crypto::{ use aptos_forge::{AptosPublicInfo, LocalSwarm, NodeExt, Swarm, SwarmExt}; use aptos_logger::{debug, info}; use aptos_rest_client::Client; -use aptos_sdk::types::{EphemeralKeyPair, KeylessAccount, LocalAccount}; +use aptos_sdk::types::{ + EphemeralKeyPair, EphemeralPrivateKey, FederatedKeylessAccount, KeylessAccount, LocalAccount, +}; use aptos_types::{ jwks::{ jwk::{JWKMoveStruct, JWK}, @@ -21,11 +23,11 @@ use aptos_types::{ get_public_inputs_hash, test_utils::{ self, get_groth16_sig_and_pk_for_upgraded_vk, get_sample_aud, get_sample_epk_blinder, - get_sample_esk, get_sample_exp_date, get_sample_groth16_sig_and_fed_pk, - get_sample_groth16_sig_and_pk, get_sample_groth16_sig_and_pk_no_extra_field, - get_sample_iss, get_sample_jwk, get_sample_jwt_header_json, get_sample_jwt_token, - get_sample_openid_sig_and_pk, get_sample_pepper, get_sample_tw_sk, get_sample_uid_key, - get_sample_uid_val, get_sample_zk_sig, get_upgraded_vk, + get_sample_esk, get_sample_exp_date, get_sample_groth16_sig_and_pk, + get_sample_groth16_sig_and_pk_no_extra_field, get_sample_iss, get_sample_jwk, + get_sample_jwt_header_json, get_sample_jwt_token, get_sample_openid_sig_and_pk, + get_sample_pepper, get_sample_tw_sk, get_sample_uid_key, get_sample_uid_val, + get_sample_zk_sig, get_upgraded_vk, }, AnyKeylessPublicKey, Configuration, EphemeralCertificate, Groth16ProofAndStatement, Groth16VerificationKey, KeylessPublicKey, KeylessSignature, TransactionAndProof, @@ -248,7 +250,7 @@ async fn federated_keyless_scenario( install_fed_jwk: bool, expect_txn_succeed: bool, ) { - let (tw_sk, config, jwk, swarm, mut cli, _) = setup_local_net_inner(set_feature_flag).await; + let (_tw_sk, _config, _jwk, swarm, mut cli, _) = setup_local_net_inner(set_feature_flag).await; let root_addr = swarm.chain_info().root_account().address(); let _root_idx = cli.add_account_with_address_to_cli(swarm.root_key(), root_addr); @@ -317,19 +319,63 @@ script {{ assert_eq!(Some(true), txn_result.unwrap().success); } - // For simplicity we use the root account as the jwk owner. - let (sig, pk) = get_sample_groth16_sig_and_fed_pk(root_addr); let mut info = swarm.aptos_public_info(); - let signed_txn = sign_transaction_any_keyless_pk( - &mut info, - sig.clone(), - AnyKeylessPublicKey::Federated(pk), - &jwk, - &config, - Some(&tw_sk), - 1, + + let esk = EphemeralPrivateKey::Ed25519 { + inner_private_key: get_sample_esk(), + }; + let ephemeral_key_pair = + EphemeralKeyPair::new(esk, get_sample_exp_date(), get_sample_epk_blinder()).unwrap(); + let federated_keyless_account = FederatedKeylessAccount::new_from_jwt( + &get_sample_jwt_token(), + ephemeral_key_pair, + root_addr, + None, + Some(get_sample_pepper()), + Some(get_sample_zk_sig()), ) - .await; + .unwrap(); + + let federated_keyless_public_key = federated_keyless_account.public_key().clone(); + + let local_account = LocalAccount::new_federated_keyless( + federated_keyless_account + .authentication_key() + .account_address(), + federated_keyless_account, + 0, + ); + + // If the account does not exist, create it. + if info.account_exists(local_account.address()).await.is_err() { + info!( + "{} account does not exist. Creating...", + local_account.address().to_hex_literal() + ); + info.sync_root_account_sequence_number().await; + info.create_user_account_with_any_key(&AnyPublicKey::FederatedKeyless { + public_key: federated_keyless_public_key, + }) + .await + .unwrap(); + info.mint(local_account.address(), 10_000_000_000) + .await + .unwrap(); + } + info.sync_root_account_sequence_number().await; + let recipient = info + .create_and_fund_user_account(20_000_000_000) + .await + .unwrap(); + + let txn_builder = info + .transaction_factory() + .payload(aptos_stdlib::aptos_coin_transfer( + recipient.address(), + 1_000_000, + )); + + let signed_txn = local_account.sign_with_transaction_builder(txn_builder); let result = swarm .aptos_public_info() @@ -384,10 +430,11 @@ async fn test_keyless_no_training_wheels_groth16_verifies() { async fn test_keyless_groth16_verifies_using_rust_sdk() { let (_tw_sk, _, _, swarm, mut cli, root_idx) = setup_local_net().await; - let blinder = get_sample_epk_blinder(); - let exp_date = get_sample_exp_date(); - let esk = get_sample_esk(); - let ephemeral_key_pair = EphemeralKeyPair::new(esk, exp_date, blinder).unwrap(); + let esk = EphemeralPrivateKey::Ed25519 { + inner_private_key: get_sample_esk(), + }; + let ephemeral_key_pair = + EphemeralKeyPair::new(esk, get_sample_exp_date(), get_sample_epk_blinder()).unwrap(); let mut info = swarm.aptos_public_info(); let keyless_account = KeylessAccount::new( @@ -443,15 +490,15 @@ async fn test_keyless_groth16_verifies_using_rust_sdk() { async fn test_keyless_groth16_verifies_using_rust_sdk_from_jwt() { let (_tw_sk, _, _, swarm, mut cli, root_idx) = setup_local_net().await; - let jwt = get_sample_jwt_token(); - let blinder = get_sample_epk_blinder(); - let exp_date = get_sample_exp_date(); - let esk = get_sample_esk(); - let ephemeral_key_pair = EphemeralKeyPair::new(esk, exp_date, blinder).unwrap(); + let esk = EphemeralPrivateKey::Ed25519 { + inner_private_key: get_sample_esk(), + }; + let ephemeral_key_pair = + EphemeralKeyPair::new(esk, get_sample_exp_date(), get_sample_epk_blinder()).unwrap(); let mut info = swarm.aptos_public_info(); let keyless_account = KeylessAccount::new_from_jwt( - &jwt, + &get_sample_jwt_token(), ephemeral_key_pair, None, Some(get_sample_pepper()), diff --git a/types/src/keyless/test_utils.rs b/types/src/keyless/test_utils.rs index 250175d9428b2..c8babbd5913af 100644 --- a/types/src/keyless/test_utils.rs +++ b/types/src/keyless/test_utils.rs @@ -113,13 +113,41 @@ pub fn get_sample_groth16_zkp_and_statement() -> Groth16ProofAndStatement { pub fn get_sample_zk_sig() -> ZeroKnowledgeSig { let proof = *SAMPLE_PROOF; - ZeroKnowledgeSig { + let mut zks = ZeroKnowledgeSig { proof: proof.into(), extra_field: Some(SAMPLE_JWT_EXTRA_FIELD.to_string()), exp_horizon_secs: SAMPLE_EXP_HORIZON_SECS, override_aud_val: None, training_wheels_signature: None, - } + }; + + let sig = KeylessSignature { + cert: EphemeralCertificate::ZeroKnowledgeSig(zks.clone()), + jwt_header_json: SAMPLE_JWT_HEADER_JSON.to_string(), + exp_date_secs: SAMPLE_EXP_DATE, + ephemeral_pubkey: SAMPLE_EPK.clone(), + ephemeral_signature: DUMMY_EPHEMERAL_SIGNATURE.clone(), + }; + + let public_inputs_hash = fr_to_bytes_le( + &get_public_inputs_hash( + &sig, + &SAMPLE_PK.clone(), + &SAMPLE_JWK, + &Configuration::new_for_testing(), + ) + .unwrap(), + ); + + let proof_and_statement = Groth16ProofAndStatement { + proof, + public_inputs_hash, + }; + + zks.training_wheels_signature = Some(EphemeralSignature::ed25519( + get_sample_tw_sk().sign(&proof_and_statement).unwrap(), + )); + zks } /// Note: Does not have a valid ephemeral signature. Use the SAMPLE_ESK to compute one over the @@ -169,15 +197,7 @@ pub fn get_random_simulated_groth16_sig_and_pk() -> ( /// Note: Does not have a valid ephemeral signature. Use the SAMPLE_ESK to compute one over the /// desired TXN. pub fn get_sample_groth16_sig_and_pk() -> (KeylessSignature, KeylessPublicKey) { - let proof = *SAMPLE_PROOF; - - let zks = ZeroKnowledgeSig { - proof: proof.into(), - extra_field: Some(SAMPLE_JWT_EXTRA_FIELD.to_string()), - exp_horizon_secs: SAMPLE_EXP_HORIZON_SECS, - override_aud_val: None, - training_wheels_signature: None, - }; + let zks = get_sample_zk_sig(); let sig = KeylessSignature { cert: EphemeralCertificate::ZeroKnowledgeSig(zks.clone()), @@ -193,15 +213,7 @@ pub fn get_sample_groth16_sig_and_pk() -> (KeylessSignature, KeylessPublicKey) { pub fn get_sample_groth16_sig_and_fed_pk( jwk_addr: AccountAddress, ) -> (KeylessSignature, FederatedKeylessPublicKey) { - let proof = *SAMPLE_PROOF; - - let zks = ZeroKnowledgeSig { - proof: proof.into(), - extra_field: Some(SAMPLE_JWT_EXTRA_FIELD.to_string()), - exp_horizon_secs: SAMPLE_EXP_HORIZON_SECS, - override_aud_val: None, - training_wheels_signature: None, - }; + let zks = get_sample_zk_sig(); let sig = KeylessSignature { cert: EphemeralCertificate::ZeroKnowledgeSig(zks.clone()), diff --git a/types/src/transaction/authenticator.rs b/types/src/transaction/authenticator.rs index f61f698b1ef89..f5237eef056df 100644 --- a/types/src/transaction/authenticator.rs +++ b/types/src/transaction/authenticator.rs @@ -1164,6 +1164,10 @@ impl EphemeralSignature { Self::Ed25519 { signature } } + pub fn web_authn(signature: PartialAuthenticatorAssertionResponse) -> Self { + Self::WebAuthn { signature } + } + pub fn verify( &self, message: &T, @@ -1228,6 +1232,10 @@ impl EphemeralPublicKey { Self::Ed25519 { public_key } } + pub fn secp256r1_ecdsa(public_key: secp256r1_ecdsa::PublicKey) -> Self { + Self::Secp256r1Ecdsa { public_key } + } + pub fn to_bytes(&self) -> Vec { bcs::to_bytes(self).expect("Only unhandleable errors happen here.") } From b2781bff67c5842aed4d38ba3f5118f5f6565f7a Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Thu, 10 Oct 2024 21:36:13 -0700 Subject: [PATCH 07/22] [consensus] fallback heuristics for optimistic quorum store (#14346) * [consensus] Fallback heuristics for optimistic quorum store * [optqs] set minimum batch age for optimistic batch proposals * new unit tests and existing test fixes * allow handling OptQS payload by default --- consensus/consensus-types/src/common.rs | 4 +- consensus/consensus-types/src/lib.rs | 1 + .../src/payload_pull_params.rs | 91 ++++++++ .../consensus-types/src/request_response.rs | 5 +- .../consensus-types/src/round_timeout.rs | 9 +- consensus/src/block_storage/block_store.rs | 16 +- consensus/src/block_storage/block_tree.rs | 34 ++- consensus/src/dag/dag_driver.rs | 5 +- consensus/src/dag/tests/helpers.rs | 3 +- consensus/src/epoch_manager.rs | 12 + consensus/src/lib.rs | 2 + consensus/src/liveness/mod.rs | 1 + consensus/src/liveness/proposal_generator.rs | 13 +- .../src/liveness/proposal_generator_test.rs | 14 ++ .../src/liveness/proposal_status_tracker.rs | 210 ++++++++++++++++++ consensus/src/liveness/round_state.rs | 29 ++- consensus/src/liveness/round_state_test.rs | 21 +- consensus/src/payload_client/mixed.rs | 11 +- consensus/src/payload_client/mod.rs | 75 +------ consensus/src/payload_client/user/mod.rs | 3 +- .../user/quorum_store_client.rs | 13 +- consensus/src/payload_manager.rs | 39 ++-- consensus/src/pending_votes.rs | 160 +++++++++++-- consensus/src/pending_votes_test.rs | 161 ++++++++++++++ .../src/quorum_store/batch_proof_queue.rs | 40 +++- consensus/src/quorum_store/proof_manager.rs | 58 ++--- .../src/quorum_store/quorum_store_builder.rs | 3 +- .../tests/batch_proof_queue_test.rs | 12 +- .../tests/direct_mempool_quorum_store_test.rs | 2 +- .../quorum_store/tests/proof_manager_test.rs | 4 +- consensus/src/round_manager.rs | 84 ++++--- consensus/src/round_manager_fuzzing.rs | 6 +- consensus/src/round_manager_test.rs | 15 +- .../src/test_utils/mock_payload_manager.rs | 7 +- consensus/src/test_utils/mod.rs | 19 ++ .../src/bounded_vec_deque.rs | 8 + .../tests/staged/consensus.yaml | 7 + types/src/validator_verifier.rs | 21 +- 38 files changed, 980 insertions(+), 238 deletions(-) create mode 100644 consensus/consensus-types/src/payload_pull_params.rs create mode 100644 consensus/src/liveness/proposal_status_tracker.rs create mode 100644 consensus/src/pending_votes_test.rs diff --git a/consensus/consensus-types/src/common.rs b/consensus/consensus-types/src/common.rs index 7dbc1888b7203..db20a4fd9f3fd 100644 --- a/consensus/consensus-types/src/common.rs +++ b/consensus/consensus-types/src/common.rs @@ -6,7 +6,6 @@ use crate::{ payload::{OptQuorumStorePayload, PayloadExecutionLimit}, proof_of_store::{BatchInfo, ProofCache, ProofOfStore}, }; -use anyhow::bail; use aptos_crypto::{ hash::{CryptoHash, CryptoHasher}, HashValue, @@ -520,8 +519,7 @@ impl Payload { (true, Payload::OptQuorumStore(opt_quorum_store)) => { let proof_with_data = opt_quorum_store.proof_with_data(); Self::verify_with_cache(&proof_with_data.batch_summary, validator, proof_cache)?; - // TODO(ibalajiarun): Remove this log when OptQS is enabled. - bail!("OptQuorumStore Payload is not expected yet"); + Ok(()) }, (_, _) => Err(anyhow::anyhow!( "Wrong payload type. Expected Payload::InQuorumStore {} got {} ", diff --git a/consensus/consensus-types/src/lib.rs b/consensus/consensus-types/src/lib.rs index 5d351d279bf8e..27ca8b6f92874 100644 --- a/consensus/consensus-types/src/lib.rs +++ b/consensus/consensus-types/src/lib.rs @@ -13,6 +13,7 @@ pub mod order_vote; pub mod order_vote_msg; pub mod order_vote_proposal; pub mod payload; +pub mod payload_pull_params; pub mod pipeline; pub mod pipeline_execution_result; pub mod pipelined_block; diff --git a/consensus/consensus-types/src/payload_pull_params.rs b/consensus/consensus-types/src/payload_pull_params.rs new file mode 100644 index 0000000000000..682f9b2185194 --- /dev/null +++ b/consensus/consensus-types/src/payload_pull_params.rs @@ -0,0 +1,91 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + common::{Author, PayloadFilter}, + utils::PayloadTxnsSize, +}; +use std::{collections::HashSet, time::Duration}; + +#[derive(Clone)] +pub struct OptQSPayloadPullParams { + pub exclude_authors: HashSet, + pub minimum_batch_age_usecs: u64, +} + +pub struct PayloadPullParameters { + pub max_poll_time: Duration, + pub max_txns: PayloadTxnsSize, + pub max_txns_after_filtering: u64, + pub soft_max_txns_after_filtering: u64, + pub max_inline_txns: PayloadTxnsSize, + pub user_txn_filter: PayloadFilter, + pub pending_ordering: bool, + pub pending_uncommitted_blocks: usize, + pub recent_max_fill_fraction: f32, + pub block_timestamp: Duration, + pub maybe_optqs_payload_pull_params: Option, +} + +impl std::fmt::Debug for OptQSPayloadPullParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OptQSPayloadPullParams") + .field("exclude_authors", &self.exclude_authors) + .field("minimum_batch_age_useds", &self.minimum_batch_age_usecs) + .finish() + } +} + +impl PayloadPullParameters { + pub fn new_for_test( + max_poll_time: Duration, + max_txns: u64, + max_txns_bytes: u64, + max_txns_after_filtering: u64, + soft_max_txns_after_filtering: u64, + max_inline_txns: u64, + max_inline_txns_bytes: u64, + user_txn_filter: PayloadFilter, + pending_ordering: bool, + pending_uncommitted_blocks: usize, + recent_max_fill_fraction: f32, + block_timestamp: Duration, + ) -> Self { + Self { + max_poll_time, + max_txns: PayloadTxnsSize::new(max_txns, max_txns_bytes), + max_txns_after_filtering, + soft_max_txns_after_filtering, + max_inline_txns: PayloadTxnsSize::new(max_inline_txns, max_inline_txns_bytes), + user_txn_filter, + pending_ordering, + pending_uncommitted_blocks, + recent_max_fill_fraction, + block_timestamp, + maybe_optqs_payload_pull_params: None, + } + } +} + +impl std::fmt::Debug for PayloadPullParameters { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("PayloadPullParameters") + .field("max_poll_time", &self.max_poll_time) + .field("max_items", &self.max_txns) + .field("max_unique_items", &self.max_txns_after_filtering) + .field( + "soft_max_txns_after_filtering", + &self.soft_max_txns_after_filtering, + ) + .field("max_inline_items", &self.max_inline_txns) + .field("pending_ordering", &self.pending_ordering) + .field( + "pending_uncommitted_blocks", + &self.pending_uncommitted_blocks, + ) + .field("recent_max_fill_fraction", &self.recent_max_fill_fraction) + .field("block_timestamp", &self.block_timestamp) + .field("optqs_params", &self.maybe_optqs_payload_pull_params) + .finish() + } +} diff --git a/consensus/consensus-types/src/request_response.rs b/consensus/consensus-types/src/request_response.rs index c650141e7878a..f10e35285e532 100644 --- a/consensus/consensus-types/src/request_response.rs +++ b/consensus/consensus-types/src/request_response.rs @@ -3,6 +3,7 @@ use crate::{ common::{Payload, PayloadFilter}, + payload_pull_params::OptQSPayloadPullParams, utils::PayloadTxnsSize, }; use anyhow::Result; @@ -16,8 +17,8 @@ pub struct GetPayloadRequest { pub max_txns_after_filtering: u64, // soft max number of transactions after filtering in the block (i.e. include one that crosses it) pub soft_max_txns_after_filtering: u64, - // target txns with opt batches in max_txns as pct - pub opt_batch_txns_pct: u8, + // opt payload pull params + pub maybe_optqs_payload_pull_params: Option, // max number of inline transactions (transactions without a proof of store) pub max_inline_txns: PayloadTxnsSize, // return non full diff --git a/consensus/consensus-types/src/round_timeout.rs b/consensus/consensus-types/src/round_timeout.rs index c4596fc2a9d5b..e16d718f7dd38 100644 --- a/consensus/consensus-types/src/round_timeout.rs +++ b/consensus/consensus-types/src/round_timeout.rs @@ -7,15 +7,18 @@ use crate::{ timeout_2chain::TwoChainTimeout, }; use anyhow::{ensure, Context}; +use aptos_bitvec::BitVec; use aptos_crypto::bls12381; use aptos_short_hex_str::AsShortHexStr; use aptos_types::validator_verifier::ValidatorVerifier; use serde::{Deserialize, Serialize}; -#[derive(Deserialize, Serialize, Clone, PartialEq, Eq)] +#[derive(Deserialize, Serialize, Clone, PartialEq, Eq, Hash, Debug)] pub enum RoundTimeoutReason { Unknown, ProposalNotReceived, + PayloadUnavailable { missing_authors: BitVec }, + NoQC, } impl std::fmt::Display for RoundTimeoutReason { @@ -23,6 +26,10 @@ impl std::fmt::Display for RoundTimeoutReason { match self { RoundTimeoutReason::Unknown => write!(f, "Unknown"), RoundTimeoutReason::ProposalNotReceived => write!(f, "ProposalNotReceived"), + RoundTimeoutReason::PayloadUnavailable { .. } => { + write!(f, "PayloadUnavailable",) + }, + RoundTimeoutReason::NoQC => write!(f, "NoQC"), } } } diff --git a/consensus/src/block_storage/block_store.rs b/consensus/src/block_storage/block_store.rs index 8670e161602da..f56dfa80c23eb 100644 --- a/consensus/src/block_storage/block_store.rs +++ b/consensus/src/block_storage/block_store.rs @@ -18,6 +18,7 @@ use crate::{ util::time_service::TimeService, }; use anyhow::{bail, ensure, format_err, Context}; +use aptos_bitvec::BitVec; use aptos_consensus_types::{ block::Block, common::Round, @@ -472,18 +473,19 @@ impl BlockStore { self.pending_blocks.clone() } - pub async fn wait_for_payload(&self, block: &Block) -> anyhow::Result<()> { - tokio::time::timeout( - Duration::from_secs(1), - self.payload_manager.get_transactions(block), - ) - .await??; + pub async fn wait_for_payload(&self, block: &Block, deadline: Duration) -> anyhow::Result<()> { + let duration = deadline.saturating_sub(self.time_service.get_current_timestamp()); + tokio::time::timeout(duration, self.payload_manager.get_transactions(block)).await??; Ok(()) } - pub fn check_payload(&self, proposal: &Block) -> bool { + pub fn check_payload(&self, proposal: &Block) -> Result<(), BitVec> { self.payload_manager.check_payload_availability(proposal) } + + pub fn get_block_for_round(&self, round: Round) -> Option> { + self.inner.read().get_block_for_round(round) + } } impl BlockReader for BlockStore { diff --git a/consensus/src/block_storage/block_tree.rs b/consensus/src/block_storage/block_tree.rs index 0edb607579c72..5d1df54149cbf 100644 --- a/consensus/src/block_storage/block_tree.rs +++ b/consensus/src/block_storage/block_tree.rs @@ -15,10 +15,13 @@ use aptos_consensus_types::{ }; use aptos_crypto::HashValue; use aptos_logger::prelude::*; -use aptos_types::{block_info::BlockInfo, ledger_info::LedgerInfoWithSignatures}; +use aptos_types::{ + block_info::{BlockInfo, Round}, + ledger_info::LedgerInfoWithSignatures, +}; use mirai_annotations::{checked_verify_eq, precondition}; use std::{ - collections::{vec_deque::VecDeque, HashMap, HashSet}, + collections::{vec_deque::VecDeque, BTreeMap, HashMap, HashSet}, sync::Arc, }; @@ -89,6 +92,9 @@ pub struct BlockTree { pruned_block_ids: VecDeque, /// Num pruned blocks to keep in memory. max_pruned_blocks_in_mem: usize, + + /// Round to Block index. We expect only one block per round. + round_to_ids: BTreeMap, } impl BlockTree { @@ -108,6 +114,8 @@ impl BlockTree { let root_id = root.id(); let mut id_to_block = HashMap::new(); + let mut round_to_ids = BTreeMap::new(); + round_to_ids.insert(root.round(), root_id); id_to_block.insert(root_id, LinkableBlock::new(root)); counters::NUM_BLOCKS_IN_TREE.set(1); @@ -132,6 +140,7 @@ impl BlockTree { pruned_block_ids, max_pruned_blocks_in_mem, highest_2chain_timeout_cert, + round_to_ids, } } @@ -165,7 +174,10 @@ impl BlockTree { fn remove_block(&mut self, block_id: HashValue) { // Remove the block from the store - self.id_to_block.remove(&block_id); + if let Some(block) = self.id_to_block.remove(&block_id) { + let round = block.executed_block().round(); + self.round_to_ids.remove(&round); + }; self.id_to_quorum_cert.remove(&block_id); } @@ -178,6 +190,12 @@ impl BlockTree { .map(|lb| lb.executed_block().clone()) } + pub(super) fn get_block_for_round(&self, round: Round) -> Option> { + self.round_to_ids + .get(&round) + .and_then(|block_id| self.get_block(block_id)) + } + pub(super) fn ordered_root(&self) -> Arc { self.get_block(&self.ordered_root_id) .expect("Root must exist") @@ -241,6 +259,16 @@ impl BlockTree { let linkable_block = LinkableBlock::new(block); let arc_block = Arc::clone(linkable_block.executed_block()); assert!(self.id_to_block.insert(block_id, linkable_block).is_none()); + // Note: the assumption is that we have/enforce unequivocal proposer election. + if let Some(old_block_id) = self.round_to_ids.get(&arc_block.round()) { + warn!( + "Multiple blocks received for round {}. Previous block id: {}", + arc_block.round(), + old_block_id + ); + } else { + self.round_to_ids.insert(arc_block.round(), block_id); + } counters::NUM_BLOCKS_IN_TREE.inc(); Ok(arc_block) } diff --git a/consensus/src/dag/dag_driver.rs b/consensus/src/dag/dag_driver.rs index 2395ba30ef264..fa0caee1faa8a 100644 --- a/consensus/src/dag/dag_driver.rs +++ b/consensus/src/dag/dag_driver.rs @@ -21,13 +21,14 @@ use crate::{ }, DAGRpcResult, RpcHandler, }, - payload_client::{PayloadClient, PayloadPullParameters}, + payload_client::PayloadClient, }; use anyhow::{bail, ensure}; use aptos_collections::BoundedVecDeque; use aptos_config::config::DagPayloadConfig; use aptos_consensus_types::{ common::{Author, Payload, PayloadFilter}, + payload_pull_params::PayloadPullParameters, utils::PayloadTxnsSize, }; use aptos_crypto::hash::CryptoHash; @@ -266,7 +267,7 @@ impl DagDriver { max_txns_after_filtering: max_txns, soft_max_txns_after_filtering: max_txns, max_inline_txns: PayloadTxnsSize::new(100, 100 * 1024), - opt_batch_txns_pct: 0, + maybe_optqs_payload_pull_params: None, user_txn_filter: payload_filter, pending_ordering: false, pending_uncommitted_blocks: 0, diff --git a/consensus/src/dag/tests/helpers.rs b/consensus/src/dag/tests/helpers.rs index ff19b6876e2db..dab407099c303 100644 --- a/consensus/src/dag/tests/helpers.rs +++ b/consensus/src/dag/tests/helpers.rs @@ -8,6 +8,7 @@ use crate::{ }, payload_manager::TPayloadManager, }; +use aptos_bitvec::BitVec; use aptos_consensus_types::{ block::Block, common::{Author, Payload, Round}, @@ -26,7 +27,7 @@ impl TPayloadManager for MockPayloadManager { fn notify_commit(&self, _block_timestamp: u64, _payloads: Vec) {} - fn check_payload_availability(&self, _block: &Block) -> bool { + fn check_payload_availability(&self, _block: &Block) -> Result<(), BitVec> { unimplemented!() } diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index c0e1ba3ac7ecb..9dea39ef66045 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -21,6 +21,7 @@ use crate::{ proposal_generator::{ ChainHealthBackoffConfig, PipelineBackpressureConfig, ProposalGenerator, }, + proposal_status_tracker::{ExponentialWindowFailureTracker, OptQSPullParamsProvider}, proposer_election::ProposerElection, rotating_proposer_election::{choose_leader, RotatingProposer}, round_proposer_election::RoundProposer, @@ -826,6 +827,15 @@ impl EpochManager

{ self.pending_blocks.clone(), )); + let failures_tracker = Arc::new(Mutex::new(ExponentialWindowFailureTracker::new( + 100, + epoch_state.verifier.get_ordered_account_addresses(), + ))); + let opt_qs_payload_param_provider = Arc::new(OptQSPullParamsProvider::new( + self.config.quorum_store.enable_opt_quorum_store, + failures_tracker.clone(), + )); + info!(epoch = epoch, "Create ProposalGenerator"); // txn manager is required both by proposal generator (to pull the proposers) // and by event processor (to update their status). @@ -854,6 +864,7 @@ impl EpochManager

{ self.config .quorum_store .allow_batches_without_pos_in_proposal, + opt_qs_payload_param_provider, ); let (round_manager_tx, round_manager_rx) = aptos_channel::new( QueueStyle::KLAST, @@ -887,6 +898,7 @@ impl EpochManager

{ onchain_randomness_config, onchain_jwk_consensus_config, fast_rand_config, + failures_tracker, ); round_manager.init(last_vote).await; diff --git a/consensus/src/lib.rs b/consensus/src/lib.rs index f8545073966bd..3660afb3b49f5 100644 --- a/consensus/src/lib.rs +++ b/consensus/src/lib.rs @@ -31,6 +31,8 @@ mod network_tests; mod payload_client; mod pending_order_votes; mod pending_votes; +#[cfg(test)] +mod pending_votes_test; pub mod persistent_liveness_storage; mod pipeline; pub mod quorum_store; diff --git a/consensus/src/liveness/mod.rs b/consensus/src/liveness/mod.rs index f7e8f11bceb05..effa52291246f 100644 --- a/consensus/src/liveness/mod.rs +++ b/consensus/src/liveness/mod.rs @@ -5,6 +5,7 @@ pub(crate) mod cached_proposer_election; pub(crate) mod leader_reputation; pub(crate) mod proposal_generator; +pub(crate) mod proposal_status_tracker; pub(crate) mod proposer_election; pub(crate) mod rotating_proposer_election; pub(crate) mod round_proposer_election; diff --git a/consensus/src/liveness/proposal_generator.rs b/consensus/src/liveness/proposal_generator.rs index 334b0a76fbf4e..47bdea4c9ce95 100644 --- a/consensus/src/liveness/proposal_generator.rs +++ b/consensus/src/liveness/proposal_generator.rs @@ -2,7 +2,9 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use super::proposer_election::ProposerElection; +use super::{ + proposal_status_tracker::TOptQSPullParamsProvider, proposer_election::ProposerElection, +}; use crate::{ block_storage::BlockReader, counters::{ @@ -12,7 +14,7 @@ use crate::{ PROPOSER_MAX_BLOCK_TXNS_TO_EXECUTE, PROPOSER_PENDING_BLOCKS_COUNT, PROPOSER_PENDING_BLOCKS_FILL_FRACTION, }, - payload_client::{PayloadClient, PayloadPullParameters}, + payload_client::PayloadClient, util::time_service::TimeService, }; use anyhow::{bail, ensure, format_err, Context}; @@ -23,6 +25,7 @@ use aptos_consensus_types::{ block::Block, block_data::BlockData, common::{Author, Payload, PayloadFilter, Round}, + payload_pull_params::PayloadPullParameters, pipelined_block::ExecutionSummary, quorum_cert::QuorumCert, utils::PayloadTxnsSize, @@ -267,6 +270,7 @@ pub struct ProposalGenerator { vtxn_config: ValidatorTxnConfig, allow_batches_without_pos_in_proposal: bool, + opt_qs_payload_param_provider: Arc, } impl ProposalGenerator { @@ -287,6 +291,7 @@ impl ProposalGenerator { quorum_store_enabled: bool, vtxn_config: ValidatorTxnConfig, allow_batches_without_pos_in_proposal: bool, + opt_qs_payload_param_provider: Arc, ) -> Self { Self { author, @@ -305,6 +310,7 @@ impl ProposalGenerator { quorum_store_enabled, vtxn_config, allow_batches_without_pos_in_proposal, + opt_qs_payload_param_provider, } } @@ -353,6 +359,7 @@ impl ProposalGenerator { bail!("Already proposed in the round {}", round); } } + let maybe_optqs_payload_pull_params = self.opt_qs_payload_param_provider.get_params(); let hqc = self.ensure_highest_quorum_cert(round)?; @@ -456,7 +463,7 @@ impl ProposalGenerator { soft_max_txns_after_filtering: max_txns_from_block_to_execute .unwrap_or(max_block_txns_after_filtering), max_inline_txns: self.max_inline_txns, - opt_batch_txns_pct: 0, + maybe_optqs_payload_pull_params, user_txn_filter: payload_filter, pending_ordering, pending_uncommitted_blocks: pending_blocks.len(), diff --git a/consensus/src/liveness/proposal_generator_test.rs b/consensus/src/liveness/proposal_generator_test.rs index aae56dc864644..5aa907d7fe672 100644 --- a/consensus/src/liveness/proposal_generator_test.rs +++ b/consensus/src/liveness/proposal_generator_test.rs @@ -8,6 +8,7 @@ use crate::{ proposal_generator::{ ChainHealthBackoffConfig, PipelineBackpressureConfig, ProposalGenerator, }, + proposal_status_tracker::TOptQSPullParamsProvider, rotating_proposer_election::RotatingProposer, unequivocal_proposer_election::UnequivocalProposerElection, }, @@ -17,6 +18,7 @@ use crate::{ use aptos_consensus_types::{ block::{block_test_utils::certificate_for_genesis, Block}, common::Author, + payload_pull_params::OptQSPayloadPullParams, utils::PayloadTxnsSize, }; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_signer::ValidatorSigner}; @@ -27,6 +29,14 @@ fn empty_callback() -> BoxFuture<'static, ()> { async move {}.boxed() } +struct MockOptQSPayloadProvider {} + +impl TOptQSPullParamsProvider for MockOptQSPayloadProvider { + fn get_params(&self) -> Option { + None + } +} + #[tokio::test] async fn test_proposal_generation_empty_tree() { let signer = ValidatorSigner::random(None); @@ -47,6 +57,7 @@ async fn test_proposal_generation_empty_tree() { false, ValidatorTxnConfig::default_disabled(), true, + Arc::new(MockOptQSPayloadProvider {}), ); let proposer_election = Arc::new(UnequivocalProposerElection::new(Arc::new( RotatingProposer::new(vec![signer.author()], 1), @@ -92,6 +103,7 @@ async fn test_proposal_generation_parent() { false, ValidatorTxnConfig::default_disabled(), true, + Arc::new(MockOptQSPayloadProvider {}), ); let proposer_election = Arc::new(UnequivocalProposerElection::new(Arc::new( RotatingProposer::new(vec![inserter.signer().author()], 1), @@ -167,6 +179,7 @@ async fn test_old_proposal_generation() { false, ValidatorTxnConfig::default_disabled(), true, + Arc::new(MockOptQSPayloadProvider {}), ); let proposer_election = Arc::new(UnequivocalProposerElection::new(Arc::new( RotatingProposer::new(vec![inserter.signer().author()], 1), @@ -207,6 +220,7 @@ async fn test_correct_failed_authors() { false, ValidatorTxnConfig::default_disabled(), true, + Arc::new(MockOptQSPayloadProvider {}), ); let proposer_election = Arc::new(UnequivocalProposerElection::new(Arc::new( RotatingProposer::new(vec![author, peer1, peer2], 1), diff --git a/consensus/src/liveness/proposal_status_tracker.rs b/consensus/src/liveness/proposal_status_tracker.rs new file mode 100644 index 0000000000000..23f635e260fb8 --- /dev/null +++ b/consensus/src/liveness/proposal_status_tracker.rs @@ -0,0 +1,210 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use super::round_state::NewRoundReason; +use aptos_collections::BoundedVecDeque; +use aptos_consensus_types::{ + common::Author, payload_pull_params::OptQSPayloadPullParams, round_timeout::RoundTimeoutReason, +}; +use aptos_infallible::Mutex; +use std::{collections::HashSet, sync::Arc}; + +pub trait TPastProposalStatusTracker: Send + Sync { + fn push(&self, status: NewRoundReason); +} + +pub trait TOptQSPullParamsProvider: Send + Sync { + fn get_params(&self) -> Option; +} + +/// A exponential window based algorithm to decide whether to go optimistic or not, based on +/// configurable number of past proposal statuses +/// +/// Initialize the window at 2. +/// - For each proposal failure, double the window up to a MAX size +/// - If there are no failures within the window, then propose optimistic batch +/// - If there are no failures up to MAX proposals, reset the window to 2. +pub struct ExponentialWindowFailureTracker { + window: usize, + max_window: usize, + past_round_statuses: BoundedVecDeque, + last_consecutive_success_count: usize, + ordered_authors: Vec, +} + +impl ExponentialWindowFailureTracker { + pub(crate) fn new(max_window: usize, ordered_authors: Vec) -> Self { + Self { + window: 2, + max_window, + past_round_statuses: BoundedVecDeque::new(max_window), + last_consecutive_success_count: 0, + ordered_authors, + } + } + + pub(crate) fn push(&mut self, status: NewRoundReason) { + self.past_round_statuses.push_back(status); + self.compute_failure_window(); + } + + fn last_consecutive_statuses_matching(&self, matcher: F) -> usize + where + F: Fn(&NewRoundReason) -> bool, + { + self.past_round_statuses + .iter() + .rev() + .take_while(|reason| matcher(reason)) + .count() + } + + fn compute_failure_window(&mut self) { + self.last_consecutive_success_count = self.last_consecutive_statuses_matching(|reason| { + !matches!( + reason, + NewRoundReason::Timeout(RoundTimeoutReason::PayloadUnavailable { .. }) + ) + }); + if self.last_consecutive_success_count == 0 { + self.window *= 2; + self.window = self.window.min(self.max_window); + } else if self.last_consecutive_success_count == self.past_round_statuses.len() { + self.window = 2; + } + } + + fn get_exclude_authors(&self) -> HashSet { + let mut exclude_authors = HashSet::new(); + + let limit = self.window; + for round_reason in self.past_round_statuses.iter().rev().take(limit) { + if let NewRoundReason::Timeout(RoundTimeoutReason::PayloadUnavailable { + missing_authors, + }) = round_reason + { + for author_idx in missing_authors.iter_ones() { + if let Some(author) = self.ordered_authors.get(author_idx) { + exclude_authors.insert(*author); + } + } + } + } + + exclude_authors + } +} + +impl TPastProposalStatusTracker for Mutex { + fn push(&self, status: NewRoundReason) { + self.lock().push(status) + } +} + +pub struct OptQSPullParamsProvider { + enable_opt_qs: bool, + failure_tracker: Arc>, +} + +impl OptQSPullParamsProvider { + pub fn new( + enable_opt_qs: bool, + failure_tracker: Arc>, + ) -> Self { + Self { + enable_opt_qs, + failure_tracker, + } + } +} + +impl TOptQSPullParamsProvider for OptQSPullParamsProvider { + fn get_params(&self) -> Option { + if !self.enable_opt_qs { + return None; + } + + let tracker = self.failure_tracker.lock(); + + if tracker.last_consecutive_success_count < tracker.window { + return None; + } + + let exclude_authors = tracker.get_exclude_authors(); + Some(OptQSPayloadPullParams { + exclude_authors, + minimum_batch_age_usecs: 50_000_000, + }) + } +} + +#[cfg(test)] +mod tests { + use super::ExponentialWindowFailureTracker; + use crate::liveness::round_state::NewRoundReason; + use aptos_bitvec::BitVec; + use aptos_consensus_types::round_timeout::RoundTimeoutReason; + use aptos_types::validator_verifier::random_validator_verifier; + + #[test] + fn test_exponential_window_failure_tracker() { + let (_signers, verifier) = random_validator_verifier(4, None, false); + let mut tracker = + ExponentialWindowFailureTracker::new(100, verifier.get_ordered_account_addresses()); + assert_eq!(tracker.max_window, 100); + + tracker.push(NewRoundReason::QCReady); + assert_eq!(tracker.window, 2); + assert_eq!(tracker.last_consecutive_success_count, 1); + + tracker.push(NewRoundReason::QCReady); + assert_eq!(tracker.window, 2); + assert_eq!(tracker.last_consecutive_success_count, 2); + + tracker.push(NewRoundReason::QCReady); + assert_eq!(tracker.window, 2); + assert_eq!(tracker.last_consecutive_success_count, 3); + + tracker.push(NewRoundReason::Timeout( + RoundTimeoutReason::ProposalNotReceived, + )); + assert_eq!(tracker.window, 2); + assert_eq!(tracker.last_consecutive_success_count, 4); + + tracker.push(NewRoundReason::Timeout(RoundTimeoutReason::NoQC)); + assert_eq!(tracker.window, 2); + assert_eq!(tracker.last_consecutive_success_count, 5); + + tracker.push(NewRoundReason::Timeout(RoundTimeoutReason::Unknown)); + assert_eq!(tracker.window, 2); + assert_eq!(tracker.last_consecutive_success_count, 6); + + tracker.push(NewRoundReason::Timeout( + RoundTimeoutReason::PayloadUnavailable { + missing_authors: BitVec::with_num_bits(4), + }, + )); + assert_eq!(tracker.window, 4); + assert_eq!(tracker.last_consecutive_success_count, 0); + + tracker.push(NewRoundReason::QCReady); + assert_eq!(tracker.window, 4); + assert_eq!(tracker.last_consecutive_success_count, 1); + + // Check that the window does not grow beyond max_window + for _ in 0..10 { + tracker.push(NewRoundReason::Timeout( + RoundTimeoutReason::PayloadUnavailable { + missing_authors: BitVec::with_num_bits(4), + }, + )); + } + assert_eq!(tracker.window, tracker.max_window); + + for _ in 0..tracker.max_window { + tracker.push(NewRoundReason::QCReady); + } + assert_eq!(tracker.window, 2); + assert_eq!(tracker.last_consecutive_success_count, tracker.max_window); + } +} diff --git a/consensus/src/liveness/round_state.rs b/consensus/src/liveness/round_state.rs index 37912ecbdaaa7..2c7ea4c198da6 100644 --- a/consensus/src/liveness/round_state.rs +++ b/consensus/src/liveness/round_state.rs @@ -8,8 +8,11 @@ use crate::{ util::time_service::{SendTask, TimeService}, }; use aptos_consensus_types::{ - common::Round, round_timeout::RoundTimeout, sync_info::SyncInfo, - timeout_2chain::TwoChainTimeoutWithPartialSignatures, vote::Vote, + common::Round, + round_timeout::{RoundTimeout, RoundTimeoutReason}, + sync_info::SyncInfo, + timeout_2chain::TwoChainTimeoutWithPartialSignatures, + vote::Vote, }; use aptos_crypto::HashValue; use aptos_logger::{prelude::*, Schema}; @@ -19,17 +22,17 @@ use serde::Serialize; use std::{fmt, sync::Arc, time::Duration}; /// A reason for starting a new round: introduced for monitoring / debug purposes. -#[derive(Serialize, Debug, PartialEq, Eq)] +#[derive(Serialize, Debug, PartialEq, Eq, Clone)] pub enum NewRoundReason { QCReady, - Timeout, + Timeout(RoundTimeoutReason), } impl fmt::Display for NewRoundReason { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { NewRoundReason::QCReady => write!(f, "QCReady"), - NewRoundReason::Timeout => write!(f, "TCReady"), + NewRoundReason::Timeout(_) => write!(f, "TCReady"), } } } @@ -240,7 +243,11 @@ impl RoundState { /// Notify the RoundState about the potentially new QC, TC, and highest ordered round. /// Note that some of these values might not be available by the caller. - pub fn process_certificates(&mut self, sync_info: SyncInfo) -> Option { + pub fn process_certificates( + &mut self, + sync_info: SyncInfo, + verifier: &ValidatorVerifier, + ) -> Option { if sync_info.highest_ordered_round() > self.highest_ordered_round { self.highest_ordered_round = sync_info.highest_ordered_round(); } @@ -254,13 +261,21 @@ impl RoundState { self.vote_sent = None; self.timeout_sent = None; let timeout = self.setup_timeout(1); + + let (prev_round_timeout_votes, prev_round_timeout_reason) = prev_round_timeout_votes + .map(|votes| votes.unpack_aggregate(verifier)) + .unzip(); + // The new round reason is QCReady in case both QC.round + 1 == new_round, otherwise // it's Timeout and TC.round + 1 == new_round. let new_round_reason = if sync_info.highest_certified_round() + 1 == new_round { NewRoundReason::QCReady } else { - NewRoundReason::Timeout + let prev_round_timeout_reason = + prev_round_timeout_reason.unwrap_or(RoundTimeoutReason::Unknown); + NewRoundReason::Timeout(prev_round_timeout_reason) }; + let new_round_event = NewRoundEvent { round: self.current_round, reason: new_round_reason, diff --git a/consensus/src/liveness/round_state_test.rs b/consensus/src/liveness/round_state_test.rs index ad2eec8809e53..10027e86c351e 100644 --- a/consensus/src/liveness/round_state_test.rs +++ b/consensus/src/liveness/round_state_test.rs @@ -11,6 +11,7 @@ use crate::{ use aptos_consensus_types::{ common::Round, quorum_cert::QuorumCert, + round_timeout::RoundTimeoutReason, sync_info::SyncInfo, timeout_2chain::{TwoChainTimeout, TwoChainTimeoutCertificate}, vote_data::VoteData, @@ -20,6 +21,7 @@ use aptos_types::{ aggregate_signature::AggregateSignature, block_info::BlockInfo, ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, + validator_verifier::random_validator_verifier, }; use futures::StreamExt; use std::{sync::Arc, time::Duration}; @@ -40,10 +42,11 @@ fn test_round_time_interval() { #[tokio::test] /// Verify that RoundState properly outputs local timeout events upon timeout async fn test_basic_timeout() { + let (_, verifier) = random_validator_verifier(1, None, false); let (mut pm, mut timeout_rx) = make_round_state(); // jump start the round_state - pm.process_certificates(generate_sync_info(Some(0), None, None)); + pm.process_certificates(generate_sync_info(Some(0), None, None), &verifier); for _ in 0..2 { let round = timeout_rx.next().await.unwrap(); // Here we just test timeout send retry, @@ -55,30 +58,31 @@ async fn test_basic_timeout() { #[test] fn test_round_event_generation() { + let (_, verifier) = random_validator_verifier(1, None, false); let (mut pm, _) = make_round_state(); // Happy path with new QC expect_qc( 2, - pm.process_certificates(generate_sync_info(Some(1), None, None)), + pm.process_certificates(generate_sync_info(Some(1), None, None), &verifier), ); // Old QC does not generate anything assert!(pm - .process_certificates(generate_sync_info(Some(1), None, None)) + .process_certificates(generate_sync_info(Some(1), None, None), &verifier) .is_none()); // A TC for a higher round expect_timeout( 3, - pm.process_certificates(generate_sync_info(None, Some(2), None)), + pm.process_certificates(generate_sync_info(None, Some(2), None), &verifier), ); // In case both QC and TC are present choose the one with the higher value expect_timeout( 4, - pm.process_certificates(generate_sync_info(Some(2), Some(3), None)), + pm.process_certificates(generate_sync_info(Some(2), Some(3), None), &verifier), ); // In case both QC and TC are present with the same value, choose QC expect_qc( 5, - pm.process_certificates(generate_sync_info(Some(4), Some(4), None)), + pm.process_certificates(generate_sync_info(Some(4), Some(4), None), &verifier), ); } @@ -101,7 +105,10 @@ fn expect_qc(round: Round, event: Option) { fn expect_timeout(round: Round, event: Option) { let event = event.unwrap(); assert_eq!(round, event.round); - assert_eq!(event.reason, NewRoundReason::Timeout); + assert_eq!( + event.reason, + NewRoundReason::Timeout(RoundTimeoutReason::Unknown) + ); } fn generate_sync_info( diff --git a/consensus/src/payload_client/mixed.rs b/consensus/src/payload_client/mixed.rs index 5e35b5aff6bae..afc981ab4a3b8 100644 --- a/consensus/src/payload_client/mixed.rs +++ b/consensus/src/payload_client/mixed.rs @@ -1,12 +1,13 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::PayloadPullParameters; use crate::{ error::QuorumStoreError, payload_client::{user::UserPayloadClient, PayloadClient}, }; -use aptos_consensus_types::{common::Payload, utils::PayloadTxnsSize}; +use aptos_consensus_types::{ + common::Payload, payload_pull_params::PayloadPullParameters, utils::PayloadTxnsSize, +}; use aptos_logger::debug; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction}; use aptos_validator_transaction_pool::TransactionFilter; @@ -112,9 +113,11 @@ impl PayloadClient for MixedPayloadClient { mod tests { use crate::payload_client::{ mixed::MixedPayloadClient, user, validator::DummyValidatorTxnClient, PayloadClient, - PayloadPullParameters, }; - use aptos_consensus_types::common::{Payload, PayloadFilter}; + use aptos_consensus_types::{ + common::{Payload, PayloadFilter}, + payload_pull_params::PayloadPullParameters, + }; use aptos_types::{on_chain_config::ValidatorTxnConfig, validator_txn::ValidatorTransaction}; use aptos_validator_transaction_pool as vtxn_pool; use std::{collections::HashSet, sync::Arc, time::Duration}; diff --git a/consensus/src/payload_client/mod.rs b/consensus/src/payload_client/mod.rs index 1b769faa9c36a..e38ba3194329f 100644 --- a/consensus/src/payload_client/mod.rs +++ b/consensus/src/payload_client/mod.rs @@ -2,88 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 use crate::error::QuorumStoreError; -use aptos_consensus_types::{ - common::{Payload, PayloadFilter}, - utils::PayloadTxnsSize, -}; +use aptos_consensus_types::{common::Payload, payload_pull_params::PayloadPullParameters}; use aptos_types::validator_txn::ValidatorTransaction; use aptos_validator_transaction_pool::TransactionFilter; -use core::fmt; use futures::future::BoxFuture; -use std::time::Duration; pub mod mixed; pub mod user; pub mod validator; -pub struct PayloadPullParameters { - pub max_poll_time: Duration, - pub max_txns: PayloadTxnsSize, - pub max_txns_after_filtering: u64, - pub soft_max_txns_after_filtering: u64, - pub max_inline_txns: PayloadTxnsSize, - pub opt_batch_txns_pct: u8, - pub user_txn_filter: PayloadFilter, - pub pending_ordering: bool, - pub pending_uncommitted_blocks: usize, - pub recent_max_fill_fraction: f32, - pub block_timestamp: Duration, -} - -impl PayloadPullParameters { - #[cfg(test)] - fn new_for_test( - max_poll_time: Duration, - max_txns: u64, - max_txns_bytes: u64, - max_txns_after_filtering: u64, - soft_max_txns_after_filtering: u64, - max_inline_txns: u64, - max_inline_txns_bytes: u64, - user_txn_filter: PayloadFilter, - pending_ordering: bool, - pending_uncommitted_blocks: usize, - recent_max_fill_fraction: f32, - block_timestamp: Duration, - ) -> Self { - Self { - max_poll_time, - max_txns: PayloadTxnsSize::new(max_txns, max_txns_bytes), - max_txns_after_filtering, - soft_max_txns_after_filtering, - max_inline_txns: PayloadTxnsSize::new(max_inline_txns, max_inline_txns_bytes), - opt_batch_txns_pct: 0, - user_txn_filter, - pending_ordering, - pending_uncommitted_blocks, - recent_max_fill_fraction, - block_timestamp, - } - } -} - -impl fmt::Debug for PayloadPullParameters { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("PayloadPullParameters") - .field("max_poll_time", &self.max_poll_time) - .field("max_items", &self.max_txns) - .field("max_unique_items", &self.max_txns_after_filtering) - .field( - "soft_max_txns_after_filtering", - &self.soft_max_txns_after_filtering, - ) - .field("max_inline_items", &self.max_inline_txns) - .field("pending_ordering", &self.pending_ordering) - .field( - "pending_uncommitted_blocks", - &self.pending_uncommitted_blocks, - ) - .field("recent_max_fill_fraction", &self.recent_max_fill_fraction) - .field("block_timestamp", &self.block_timestamp) - .finish() - } -} - #[async_trait::async_trait] pub trait PayloadClient: Send + Sync { async fn pull_payload( diff --git a/consensus/src/payload_client/user/mod.rs b/consensus/src/payload_client/user/mod.rs index 9d6cafbed2322..e3f2ca8acba43 100644 --- a/consensus/src/payload_client/user/mod.rs +++ b/consensus/src/payload_client/user/mod.rs @@ -1,9 +1,8 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::PayloadPullParameters; use crate::error::QuorumStoreError; -use aptos_consensus_types::common::Payload; +use aptos_consensus_types::{common::Payload, payload_pull_params::PayloadPullParameters}; #[cfg(test)] use aptos_types::transaction::SignedTransaction; use futures::future::BoxFuture; diff --git a/consensus/src/payload_client/user/quorum_store_client.rs b/consensus/src/payload_client/user/quorum_store_client.rs index b145ba1f76f61..c8c541208c863 100644 --- a/consensus/src/payload_client/user/quorum_store_client.rs +++ b/consensus/src/payload_client/user/quorum_store_client.rs @@ -2,13 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - counters::WAIT_FOR_FULL_BLOCKS_TRIGGERED, - error::QuorumStoreError, - monitor, - payload_client::{user::UserPayloadClient, PayloadPullParameters}, + counters::WAIT_FOR_FULL_BLOCKS_TRIGGERED, error::QuorumStoreError, monitor, + payload_client::user::UserPayloadClient, }; use aptos_consensus_types::{ common::{Payload, PayloadFilter}, + payload_pull_params::{OptQSPayloadPullParams, PayloadPullParameters}, request_response::{GetPayloadCommand, GetPayloadRequest, GetPayloadResponse}, utils::PayloadTxnsSize, }; @@ -52,7 +51,7 @@ impl QuorumStoreClient { max_txns_after_filtering: u64, soft_max_txns_after_filtering: u64, max_inline_txns: PayloadTxnsSize, - txns_with_proofs_pct: u8, + maybe_optqs_payload_pull_params: Option, return_non_full: bool, exclude_payloads: PayloadFilter, block_timestamp: Duration, @@ -62,7 +61,7 @@ impl QuorumStoreClient { max_txns, max_txns_after_filtering, soft_max_txns_after_filtering, - opt_batch_txns_pct: txns_with_proofs_pct, + maybe_optqs_payload_pull_params, max_inline_txns, filter: exclude_payloads, return_non_full, @@ -119,7 +118,7 @@ impl UserPayloadClient for QuorumStoreClient { params.max_txns_after_filtering, params.soft_max_txns_after_filtering, params.max_inline_txns, - params.opt_batch_txns_pct, + params.maybe_optqs_payload_pull_params.clone(), return_non_full || return_empty || done, params.user_txn_filter.clone(), params.block_timestamp, diff --git a/consensus/src/payload_manager.rs b/consensus/src/payload_manager.rs index ca0fe612c0f8a..16f3e305ea585 100644 --- a/consensus/src/payload_manager.rs +++ b/consensus/src/payload_manager.rs @@ -10,6 +10,7 @@ use crate::{ counters, quorum_store::{batch_store::BatchReader, quorum_store_coordinator::CoordinatorCommand}, }; +use aptos_bitvec::BitVec; use aptos_consensus_types::{ block::Block, common::{DataStatus, Payload, ProofWithData, Round}, @@ -28,7 +29,7 @@ use async_trait::async_trait; use futures::{channel::mpsc::Sender, FutureExt}; use itertools::Itertools; use std::{ - collections::{btree_map::Entry, BTreeMap}, + collections::{btree_map::Entry, BTreeMap, HashMap}, ops::Deref, sync::Arc, }; @@ -49,7 +50,7 @@ pub trait TPayloadManager: Send + Sync { /// Check if the transactions corresponding are available. This is specific to payload /// manager implementations. For optimistic quorum store, we only check if optimistic /// batches are available locally. - fn check_payload_availability(&self, block: &Block) -> bool; + fn check_payload_availability(&self, block: &Block) -> Result<(), BitVec>; /// Get the transactions in a block's payload. This function returns a vector of transactions. async fn get_transactions( @@ -73,8 +74,8 @@ impl TPayloadManager for DirectMempoolPayloadManager { fn prefetch_payload_data(&self, _payload: &Payload, _timestamp: u64) {} - fn check_payload_availability(&self, _block: &Block) -> bool { - true + fn check_payload_availability(&self, _block: &Block) -> Result<(), BitVec> { + Ok(()) } async fn get_transactions( @@ -104,6 +105,7 @@ pub struct QuorumStorePayloadManager { coordinator_tx: Sender, maybe_consensus_publisher: Option>, ordered_authors: Vec, + address_to_validator_index: HashMap, } impl QuorumStorePayloadManager { @@ -112,12 +114,14 @@ impl QuorumStorePayloadManager { coordinator_tx: Sender, maybe_consensus_publisher: Option>, ordered_authors: Vec, + address_to_validator_index: HashMap, ) -> Self { Self { batch_reader, coordinator_tx, maybe_consensus_publisher, ordered_authors, + address_to_validator_index, } } @@ -295,17 +299,17 @@ impl TPayloadManager for QuorumStorePayloadManager { }; } - fn check_payload_availability(&self, block: &Block) -> bool { + fn check_payload_availability(&self, block: &Block) -> Result<(), BitVec> { let Some(payload) = block.payload() else { - return true; + return Ok(()); }; match payload { Payload::DirectMempool(_) => { unreachable!("QuorumStore doesn't support DirectMempool payload") }, - Payload::InQuorumStore(_) => true, - Payload::InQuorumStoreWithLimit(_) => true, + Payload::InQuorumStore(_) => Ok(()), + Payload::InQuorumStoreWithLimit(_) => Ok(()), Payload::QuorumStoreInlineHybrid(inline_batches, proofs, _) => { fn update_availability_metrics<'a>( batch_reader: &Arc, @@ -352,15 +356,24 @@ impl TPayloadManager for QuorumStorePayloadManager { // The payload is considered available because it contains only proofs that guarantee network availabiliy // or inlined transactions. - true + Ok(()) }, Payload::OptQuorumStore(opt_qs_payload) => { + let mut missing_authors = BitVec::with_num_bits(self.ordered_authors.len() as u16); for batch in opt_qs_payload.opt_batches().deref() { if self.batch_reader.exists(batch.digest()).is_none() { - return false; + let index = *self + .address_to_validator_index + .get(&batch.author()) + .expect("Payload author should have been verified"); + missing_authors.set(index as u16); } } - true + if missing_authors.all_zeros() { + Ok(()) + } else { + Err(missing_authors) + } }, } } @@ -450,7 +463,7 @@ impl TPayloadManager for QuorumStorePayloadManager { ) .await?; let inline_batch_txns = opt_qs_payload.inline_batches().transactions(); - let all_txns = [opt_batch_txns, proof_batch_txns, inline_batch_txns].concat(); + let all_txns = [proof_batch_txns, opt_batch_txns, inline_batch_txns].concat(); BlockTransactionPayload::new_opt_quorum_store( all_txns, opt_qs_payload.proof_with_data().deref().clone(), @@ -733,7 +746,7 @@ impl TPayloadManager for ConsensusObserverPayloadManager { // noop } - fn check_payload_availability(&self, _block: &Block) -> bool { + fn check_payload_availability(&self, _block: &Block) -> Result<(), BitVec> { unreachable!("this method isn't used in ConsensusObserver") } diff --git a/consensus/src/pending_votes.rs b/consensus/src/pending_votes.rs index 221ed6bca1970..e6651681766b3 100644 --- a/consensus/src/pending_votes.rs +++ b/consensus/src/pending_votes.rs @@ -9,14 +9,17 @@ //! Votes are automatically dropped when the structure goes out of scope. use crate::counters; +use aptos_bitvec::BitVec; use aptos_consensus_types::{ common::Author, quorum_cert::QuorumCert, - round_timeout::RoundTimeout, - timeout_2chain::{TwoChainTimeoutCertificate, TwoChainTimeoutWithPartialSignatures}, + round_timeout::{RoundTimeout, RoundTimeoutReason}, + timeout_2chain::{ + TwoChainTimeout, TwoChainTimeoutCertificate, TwoChainTimeoutWithPartialSignatures, + }, vote::Vote, }; -use aptos_crypto::{hash::CryptoHash, HashValue}; +use aptos_crypto::{bls12381, hash::CryptoHash, HashValue}; use aptos_logger::prelude::*; use aptos_types::{ ledger_info::{LedgerInfoWithSignatures, LedgerInfoWithUnverifiedSignatures}, @@ -59,6 +62,106 @@ pub enum VoteStatus { NotEnoughVotes(LedgerInfoWithUnverifiedSignatures), } +#[derive(Debug)] +pub(super) struct TwoChainTimeoutVotes { + timeout_reason: HashMap, + partial_2chain_tc: TwoChainTimeoutWithPartialSignatures, +} + +impl TwoChainTimeoutVotes { + pub(super) fn new(timeout: TwoChainTimeout) -> Self { + Self { + partial_2chain_tc: TwoChainTimeoutWithPartialSignatures::new(timeout.clone()), + timeout_reason: HashMap::new(), + } + } + + pub(super) fn add( + &mut self, + author: Author, + timeout: TwoChainTimeout, + signature: bls12381::Signature, + reason: RoundTimeoutReason, + ) { + self.partial_2chain_tc.add(author, timeout, signature); + self.timeout_reason.entry(author).or_insert(reason); + } + + pub(super) fn partial_2chain_tc_mut(&mut self) -> &mut TwoChainTimeoutWithPartialSignatures { + &mut self.partial_2chain_tc + } + + fn aggregated_timeout_reason(&self, verifier: &ValidatorVerifier) -> RoundTimeoutReason { + let mut reason_voting_power: HashMap = HashMap::new(); + let mut missing_batch_authors: HashMap = HashMap::new(); + // let ordered_authors = verifier.get_ordered_account_addresses(); + for (author, reason) in &self.timeout_reason { + // To aggregate the reason, we only care about the variant type itself and + // exclude any data within the variants. + let reason_key = match reason { + reason @ RoundTimeoutReason::Unknown + | reason @ RoundTimeoutReason::ProposalNotReceived + | reason @ RoundTimeoutReason::NoQC => reason.clone(), + RoundTimeoutReason::PayloadUnavailable { missing_authors } => { + for missing_idx in missing_authors.iter_ones() { + *missing_batch_authors.entry(missing_idx).or_default() += + verifier.get_voting_power(author).unwrap_or_default() as u128; + } + RoundTimeoutReason::PayloadUnavailable { + // Since we care only about the variant type, we replace the bitvec + // with a placeholder. + missing_authors: BitVec::with_num_bits(verifier.len() as u16), + } + }, + }; + *reason_voting_power.entry(reason_key).or_default() += + verifier.get_voting_power(author).unwrap_or_default() as u128; + } + // The aggregated timeout reason is the reason with the most voting power received from + // at least f+1 peers by voting power. If such voting power does not exist, then the + // reason is unknown. + + reason_voting_power + .into_iter() + .max_by_key(|(_, voting_power)| *voting_power) + .filter(|(_, voting_power)| { + verifier + .check_aggregated_voting_power(*voting_power, false) + .is_ok() + }) + .map(|(reason, _)| { + // If the aggregated reason is due to unavailable payload, we will compute the + // aggregated missing authors bitvec counting batch authors that have been reported + // missing by minority peers. + if matches!(reason, RoundTimeoutReason::PayloadUnavailable { .. }) { + let mut aggregated_bitvec = BitVec::with_num_bits(verifier.len() as u16); + for (author_idx, voting_power) in missing_batch_authors { + if verifier + .check_aggregated_voting_power(voting_power, false) + .is_ok() + { + aggregated_bitvec.set(author_idx as u16); + } + } + RoundTimeoutReason::PayloadUnavailable { + missing_authors: aggregated_bitvec, + } + } else { + reason + } + }) + .unwrap_or(RoundTimeoutReason::Unknown) + } + + pub(crate) fn unpack_aggregate( + self, + verifier: &ValidatorVerifier, + ) -> (TwoChainTimeoutWithPartialSignatures, RoundTimeoutReason) { + let aggregated_reason = self.aggregated_timeout_reason(verifier); + (self.partial_2chain_tc, aggregated_reason) + } +} + /// A PendingVotes structure keep track of votes pub struct PendingVotes { /// Maps LedgerInfo digest to associated signatures. @@ -66,7 +169,7 @@ pub struct PendingVotes { /// or due to different NIL proposals (clients can have a different view of what block to extend). li_digest_to_votes: HashMap, /// Tracks all the signatures of the 2-chain timeout for the given round. - maybe_partial_2chain_tc: Option, + maybe_2chain_timeout_votes: Option, /// Map of Author to (vote, li_digest). This is useful to discard multiple votes. author_to_vote: HashMap, /// Whether we have echoed timeout for this round. @@ -78,7 +181,7 @@ impl PendingVotes { pub fn new() -> Self { PendingVotes { li_digest_to_votes: HashMap::new(), - maybe_partial_2chain_tc: None, + maybe_2chain_timeout_votes: None, author_to_vote: HashMap::new(), echo_timeout: false, } @@ -119,10 +222,17 @@ impl PendingVotes { .with_label_values(&[&round_timeout.author().to_string()]) .set(cur_round as i64); - let partial_tc = self - .maybe_partial_2chain_tc - .get_or_insert_with(|| TwoChainTimeoutWithPartialSignatures::new(timeout.clone())); - partial_tc.add(round_timeout.author(), timeout.clone(), signature.clone()); + let two_chain_votes = self + .maybe_2chain_timeout_votes + .get_or_insert_with(|| TwoChainTimeoutVotes::new(timeout.clone())); + two_chain_votes.add( + round_timeout.author(), + timeout.clone(), + signature.clone(), + round_timeout.reason().clone(), + ); + + let partial_tc = two_chain_votes.partial_2chain_tc_mut(); let tc_voting_power = match validator_verifier.check_voting_power(partial_tc.signers(), true) { Ok(_) => { @@ -319,10 +429,17 @@ impl PendingVotes { .with_label_values(&[&vote.author().to_string()]) .set(cur_round); - let partial_tc = self - .maybe_partial_2chain_tc - .get_or_insert_with(|| TwoChainTimeoutWithPartialSignatures::new(timeout.clone())); - partial_tc.add(vote.author(), timeout.clone(), signature.clone()); + let two_chain_votes = self + .maybe_2chain_timeout_votes + .get_or_insert_with(|| TwoChainTimeoutVotes::new(timeout.clone())); + two_chain_votes.add( + vote.author(), + timeout.clone(), + signature.clone(), + RoundTimeoutReason::Unknown, + ); + + let partial_tc = two_chain_votes.partial_2chain_tc_mut(); let tc_voting_power = match validator_verifier.check_voting_power(partial_tc.signers(), true) { Ok(_) => { @@ -362,12 +479,7 @@ impl PendingVotes { VoteReceptionResult::VoteAdded(voting_power) } - pub fn drain_votes( - &mut self, - ) -> ( - Vec<(HashValue, VoteStatus)>, - Option, - ) { + pub fn drain_votes(&mut self) -> (Vec<(HashValue, VoteStatus)>, Option) { for (hash_index, _) in self.li_digest_to_votes.values() { let hash_index_str = hash_index_to_str(*hash_index); for author in self.author_to_vote.keys() { @@ -376,8 +488,8 @@ impl PendingVotes { .set(0_f64); } } - if let Some(partial_tc) = &self.maybe_partial_2chain_tc { - for author in partial_tc.signers() { + if let Some(votes) = &self.maybe_2chain_timeout_votes { + for author in votes.partial_2chain_tc.signers() { counters::CONSENSUS_CURRENT_ROUND_TIMEOUT_VOTED_POWER .with_label_values(&[&author.to_string()]) .set(0_f64); @@ -389,7 +501,7 @@ impl PendingVotes { .drain() .map(|(key, (_, vote_status))| (key, vote_status)) .collect(), - self.maybe_partial_2chain_tc.take(), + self.maybe_2chain_timeout_votes.take(), ) } } @@ -429,9 +541,9 @@ impl fmt::Display for PendingVotes { // collect timeout votes let timeout_votes = self - .maybe_partial_2chain_tc + .maybe_2chain_timeout_votes .as_ref() - .map(|partial_tc| partial_tc.signers().collect::>()); + .map(|votes| votes.partial_2chain_tc.signers().collect::>()); if let Some(authors) = timeout_votes { write!(f, "{} timeout {:?}", authors.len(), authors)?; diff --git a/consensus/src/pending_votes_test.rs b/consensus/src/pending_votes_test.rs new file mode 100644 index 0000000000000..e2fc1de8d3ff6 --- /dev/null +++ b/consensus/src/pending_votes_test.rs @@ -0,0 +1,161 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::pending_votes::TwoChainTimeoutVotes; +use aptos_bitvec::BitVec; +use aptos_consensus_types::{ + quorum_cert::QuorumCert, round_timeout::RoundTimeoutReason, timeout_2chain::TwoChainTimeout, +}; +use aptos_types::validator_verifier::{ + random_validator_verifier, random_validator_verifier_with_voting_power, +}; +use itertools::Itertools; + +#[test] +fn test_two_chain_timeout_votes_aggregation() { + let epoch = 1; + let round = 10; + let (signers, verifier) = random_validator_verifier(4, None, false); + let all_reasons = [ + RoundTimeoutReason::NoQC, + RoundTimeoutReason::ProposalNotReceived, + RoundTimeoutReason::Unknown, + RoundTimeoutReason::PayloadUnavailable { + missing_authors: BitVec::with_num_bits(signers.len() as u16), + }, + ]; + + // Majority nodes timeout with same reason + for reason in &all_reasons { + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let mut two_chain_timeout_votes = TwoChainTimeoutVotes::new(timeout); + for signer in signers.iter().take(3) { + let author = signer.author(); + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let signature = signer.sign(&timeout.signing_format()).unwrap(); + two_chain_timeout_votes.add(author, timeout, signature, reason.clone()); + } + let (_, aggregate_timeout_reason) = two_chain_timeout_votes.unpack_aggregate(&verifier); + assert_eq!(aggregate_timeout_reason, reason.clone()); + } + + // Minority nodes timeout with same reason and one with different reason + for permut in all_reasons.iter().permutations(2) { + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let mut two_chain_timeout_votes = TwoChainTimeoutVotes::new(timeout); + for signer in signers.iter().take(2) { + let author = signer.author(); + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let signature = signer.sign(&timeout.signing_format()).unwrap(); + two_chain_timeout_votes.add(author, timeout, signature, permut[0].clone()); + } + + let author = signers[2].author(); + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let signature = signers[2].sign(&timeout.signing_format()).unwrap(); + two_chain_timeout_votes.add(author, timeout, signature, permut[1].clone()); + + let (_, aggregate_timeout_reason) = two_chain_timeout_votes.unpack_aggregate(&verifier); + assert_eq!(aggregate_timeout_reason, permut[0].clone()); + } +} + +#[test] +fn test_two_chain_timeout_aggregate_missing_authors() { + let epoch = 1; + let round = 10; + let (signers, verifier) = + random_validator_verifier_with_voting_power(4, None, false, &[3, 3, 2, 1]); + + let permutations = [true, true, false, false] + .iter() + .copied() + .permutations(4) + .unique(); + + // Minority nodes report the same set of missing authors + for permut in permutations { + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let mut two_chain_timeout_votes = TwoChainTimeoutVotes::new(timeout); + for signer in signers.iter().take(2) { + let author = signer.author(); + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let signature = signer.sign(&timeout.signing_format()).unwrap(); + let reason = RoundTimeoutReason::PayloadUnavailable { + missing_authors: permut.clone().into(), + }; + two_chain_timeout_votes.add(author, timeout, signature, reason); + } + + let author = signers[2].author(); + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let signature = signers[2].sign(&timeout.signing_format()).unwrap(); + two_chain_timeout_votes.add(author, timeout, signature, RoundTimeoutReason::Unknown); + + let (_, aggregate_timeout_reason) = two_chain_timeout_votes.unpack_aggregate(&verifier); + + assert_eq!( + aggregate_timeout_reason, + RoundTimeoutReason::PayloadUnavailable { + missing_authors: permut.clone().into() + } + ); + } + + // Not enough votes to form a valid timeout reason + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let mut two_chain_timeout_votes = TwoChainTimeoutVotes::new(timeout); + + let author = signers[2].author(); + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let signature = signers[2].sign(&timeout.signing_format()).unwrap(); + two_chain_timeout_votes.add( + author, + timeout, + signature, + RoundTimeoutReason::PayloadUnavailable { + missing_authors: vec![true, false, false, false].into(), + }, + ); + + let (_, aggregate_timeout_reason) = two_chain_timeout_votes.unpack_aggregate(&verifier); + + assert_eq!(aggregate_timeout_reason, RoundTimeoutReason::Unknown); + + // Not enough nodes vote for the same node. + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let mut two_chain_timeout_votes = TwoChainTimeoutVotes::new(timeout); + + let author = signers[2].author(); + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let signature = signers[2].sign(&timeout.signing_format()).unwrap(); + two_chain_timeout_votes.add( + author, + timeout, + signature, + RoundTimeoutReason::PayloadUnavailable { + missing_authors: vec![false, true, false, false].into(), + }, + ); + + let author = signers[3].author(); + let timeout = TwoChainTimeout::new(epoch, round, QuorumCert::dummy()); + let signature = signers[3].sign(&timeout.signing_format()).unwrap(); + two_chain_timeout_votes.add( + author, + timeout, + signature, + RoundTimeoutReason::PayloadUnavailable { + missing_authors: vec![false, false, false, true].into(), + }, + ); + + let (_, aggregate_timeout_reason) = two_chain_timeout_votes.unpack_aggregate(&verifier); + + assert_eq!( + aggregate_timeout_reason, + RoundTimeoutReason::PayloadUnavailable { + missing_authors: BitVec::with_num_bits(4) + } + ); +} diff --git a/consensus/src/quorum_store/batch_proof_queue.rs b/consensus/src/quorum_store/batch_proof_queue.rs index cfff3bb9c7061..c542d97d7d9e3 100644 --- a/consensus/src/quorum_store/batch_proof_queue.rs +++ b/consensus/src/quorum_store/batch_proof_queue.rs @@ -7,7 +7,7 @@ use super::{ }; use crate::quorum_store::counters; use aptos_consensus_types::{ - common::TxnSummaryWithExpiration, + common::{Author, TxnSummaryWithExpiration}, payload::TDataInfo, proof_of_store::{BatchInfo, ProofOfStore}, utils::PayloadTxnsSize, @@ -69,10 +69,16 @@ pub struct BatchProofQueue { remaining_proofs: u64, remaining_local_txns: u64, remaining_local_proofs: u64, + + batch_expiry_gap_when_init_usecs: u64, } impl BatchProofQueue { - pub(crate) fn new(my_peer_id: PeerId, batch_store: Arc) -> Self { + pub(crate) fn new( + my_peer_id: PeerId, + batch_store: Arc, + batch_expiry_gap_when_init_usecs: u64, + ) -> Self { Self { my_peer_id, author_to_batches: HashMap::new(), @@ -85,6 +91,7 @@ impl BatchProofQueue { remaining_proofs: 0, remaining_local_txns: 0, remaining_local_proofs: 0, + batch_expiry_gap_when_init_usecs, } } @@ -389,11 +396,13 @@ impl BatchProofQueue { let (result, all_txns, unique_txns, is_full) = self.pull_internal( false, excluded_batches, + &HashSet::new(), max_txns, max_txns_after_filtering, soft_max_txns_after_filtering, return_non_full, block_timestamp, + None, ); let proof_of_stores: Vec<_> = result .into_iter() @@ -429,20 +438,24 @@ impl BatchProofQueue { pub fn pull_batches( &mut self, excluded_batches: &HashSet, + exclude_authors: &HashSet, max_txns: PayloadTxnsSize, max_txns_after_filtering: u64, soft_max_txns_after_filtering: u64, return_non_full: bool, block_timestamp: Duration, + minimum_batch_age_usecs: Option, ) -> (Vec, PayloadTxnsSize, u64) { let (result, all_txns, unique_txns, _) = self.pull_internal( true, excluded_batches, + exclude_authors, max_txns, max_txns_after_filtering, soft_max_txns_after_filtering, return_non_full, block_timestamp, + minimum_batch_age_usecs, ); let batches = result.into_iter().map(|item| item.info.clone()).collect(); (batches, all_txns, unique_txns) @@ -463,11 +476,13 @@ impl BatchProofQueue { ) { let (batches, all_txns, unique_txns) = self.pull_batches( excluded_batches, + &HashSet::new(), max_txns, max_txns_after_filtering, soft_max_txns_after_filtering, return_non_full, block_timestamp, + None, ); let mut result = Vec::new(); for batch in batches.into_iter() { @@ -489,11 +504,13 @@ impl BatchProofQueue { &mut self, batches_without_proofs: bool, excluded_batches: &HashSet, + exclude_authors: &HashSet, max_txns: PayloadTxnsSize, max_txns_after_filtering: u64, soft_max_txns_after_filtering: u64, return_non_full: bool, block_timestamp: Duration, + min_batch_age_usecs: Option, ) -> (Vec<&QueueItem>, PayloadTxnsSize, u64, bool) { let mut result = Vec::new(); let mut cur_unique_txns = 0; @@ -515,10 +532,27 @@ impl BatchProofQueue { } } + let max_batch_creation_ts_usecs = min_batch_age_usecs + .map(|min_age| aptos_infallible::duration_since_epoch().as_micros() as u64 - min_age); let mut iters = vec![]; - for (_, batches) in self.author_to_batches.iter() { + for (_, batches) in self + .author_to_batches + .iter() + .filter(|(author, _)| !exclude_authors.contains(author)) + { let batch_iter = batches.iter().rev().filter_map(|(sort_key, info)| { if let Some(item) = self.items.get(&sort_key.batch_key) { + let batch_create_ts_usecs = + item.info.expiration() - self.batch_expiry_gap_when_init_usecs; + + // Ensure that the batch was created at least `min_batch_age_usecs` ago to + // reduce the chance of inline fetches. + if max_batch_creation_ts_usecs + .is_some_and(|max_create_ts| batch_create_ts_usecs > max_create_ts) + { + return None; + } + if item.is_committed() { return None; } diff --git a/consensus/src/quorum_store/proof_manager.rs b/consensus/src/quorum_store/proof_manager.rs index a33e0c1165292..7df9ab38b2783 100644 --- a/consensus/src/quorum_store/proof_manager.rs +++ b/consensus/src/quorum_store/proof_manager.rs @@ -34,7 +34,6 @@ pub struct ProofManager { back_pressure_total_proof_limit: u64, remaining_total_proof_num: u64, allow_batches_without_pos_in_proposal: bool, - enable_opt_quorum_store: bool, } impl ProofManager { @@ -44,16 +43,19 @@ impl ProofManager { back_pressure_total_proof_limit: u64, batch_store: Arc, allow_batches_without_pos_in_proposal: bool, - enable_opt_quorum_store: bool, + batch_expiry_gap_when_init_usecs: u64, ) -> Self { Self { - batch_proof_queue: BatchProofQueue::new(my_peer_id, batch_store), + batch_proof_queue: BatchProofQueue::new( + my_peer_id, + batch_store, + batch_expiry_gap_when_init_usecs, + ), back_pressure_total_txn_limit, remaining_total_txn_num: 0, back_pressure_total_proof_limit, remaining_total_proof_num: 0, allow_batches_without_pos_in_proposal, - enable_opt_quorum_store, } } @@ -106,10 +108,6 @@ impl ProofManager { PayloadFilter::InQuorumStore(proofs) => proofs, }; - let max_txns_with_proof = request - .max_txns - .compute_pct(100 - request.opt_batch_txns_pct); - let ( proof_block, txns_with_proof_size, @@ -117,7 +115,7 @@ impl ProofManager { proof_queue_fully_utilized, ) = self.batch_proof_queue.pull_proofs( &excluded_batches, - max_txns_with_proof, + request.max_txns, request.max_txns_after_filtering, request.soft_max_txns_after_filtering, request.return_non_full, @@ -129,26 +127,30 @@ impl ProofManager { counters::PROOF_QUEUE_FULLY_UTILIZED .observe(if proof_queue_fully_utilized { 1.0 } else { 0.0 }); - let (opt_batches, opt_batch_txns_size) = if self.enable_opt_quorum_store { + let (opt_batches, opt_batch_txns_size) = // TODO(ibalajiarun): Support unique txn calculation - let max_opt_batch_txns_size = request.max_txns - txns_with_proof_size; - let (opt_batches, opt_payload_size, _) = self.batch_proof_queue.pull_batches( - &excluded_batches - .iter() - .cloned() - .chain(proof_block.iter().map(|proof| proof.info().clone())) - .collect(), - max_opt_batch_txns_size, - request.max_txns_after_filtering, - request.soft_max_txns_after_filtering, - request.return_non_full, - request.block_timestamp, - ); + if let Some(ref params) = request.maybe_optqs_payload_pull_params { + let max_opt_batch_txns_size = request.max_txns - txns_with_proof_size; + let (opt_batches, opt_payload_size, _) = + self.batch_proof_queue.pull_batches( + &excluded_batches + .iter() + .cloned() + .chain(proof_block.iter().map(|proof| proof.info().clone())) + .collect(), + ¶ms.exclude_authors, + max_opt_batch_txns_size, + request.max_txns_after_filtering, + request.soft_max_txns_after_filtering, + request.return_non_full, + request.block_timestamp, + Some(params.minimum_batch_age_usecs), + ); - (opt_batches, opt_payload_size) - } else { - (Vec::new(), PayloadTxnsSize::zero()) - }; + (opt_batches, opt_payload_size) + } else { + (Vec::new(), PayloadTxnsSize::zero()) + }; let cur_txns = txns_with_proof_size + opt_batch_txns_size; let (inline_block, inline_block_size) = @@ -183,7 +185,7 @@ impl ProofManager { counters::NUM_INLINE_BATCHES.observe(inline_block.len() as f64); counters::NUM_INLINE_TXNS.observe(inline_block_size.count() as f64); - let response = if self.enable_opt_quorum_store { + let response = if request.maybe_optqs_payload_pull_params.is_some() { let inline_batches = inline_block.into(); Payload::OptQuorumStore(OptQuorumStorePayload::new( inline_batches, diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index 74615b7a733ba..34eeb93233e98 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -365,7 +365,7 @@ impl InnerBuilder { * self.num_validators, self.batch_store.clone().unwrap(), self.config.allow_batches_without_pos_in_proposal, - self.config.enable_opt_quorum_store, + self.config.batch_expiry_gap_when_init_usecs, ); spawn_named!( "proof_manager", @@ -446,6 +446,7 @@ impl InnerBuilder { self.coordinator_tx.clone(), consensus_publisher, self.verifier.get_ordered_account_addresses(), + self.verifier.address_to_validator_index().clone(), )), Some(self.quorum_store_msg_tx.clone()), ) diff --git a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs index 2741ea3a6a912..96ab5414ab120 100644 --- a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs +++ b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs @@ -62,7 +62,7 @@ fn proof_of_store_with_size( fn test_proof_queue_sorting() { let my_peer_id = PeerId::random(); let batch_store = batch_store_for_test(5 * 1024 * 1024); - let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store, 1); let author_0 = PeerId::random(); let author_1 = PeerId::random(); @@ -149,7 +149,7 @@ fn test_proof_queue_sorting() { fn test_proof_calculate_remaining_txns_and_proofs() { let my_peer_id = PeerId::random(); let batch_store = batch_store_for_test(5 * 1024 * 1024); - let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store, 1); let now_in_secs = aptos_infallible::duration_since_epoch().as_secs() as u64; let now_in_usecs = aptos_infallible::duration_since_epoch().as_micros() as u64; let author_0 = PeerId::random(); @@ -409,7 +409,7 @@ fn test_proof_calculate_remaining_txns_and_proofs() { fn test_proof_pull_proofs_with_duplicates() { let my_peer_id = PeerId::random(); let batch_store = batch_store_for_test(5 * 1024 * 1024); - let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store, 1); let now_in_secs = aptos_infallible::duration_since_epoch().as_secs() as u64; let now_in_usecs = now_in_secs * 1_000_000; let txns = vec![ @@ -660,7 +660,7 @@ fn test_proof_pull_proofs_with_duplicates() { fn test_proof_queue_soft_limit() { let my_peer_id = PeerId::random(); let batch_store = batch_store_for_test(5 * 1024 * 1024); - let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store, 1); let author = PeerId::random(); @@ -702,7 +702,7 @@ fn test_proof_queue_soft_limit() { fn test_proof_queue_insert_after_commit() { let my_peer_id = PeerId::random(); let batch_store = batch_store_for_test(5 * 1024); - let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store, 1); let author = PeerId::random(); let author_batches = vec![ @@ -734,7 +734,7 @@ fn test_proof_queue_insert_after_commit() { fn test_proof_queue_pull_full_utilization() { let my_peer_id = PeerId::random(); let batch_store = batch_store_for_test(5 * 1024); - let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store); + let mut proof_queue = BatchProofQueue::new(my_peer_id, batch_store, 1); let author = PeerId::random(); let author_batches = vec![ diff --git a/consensus/src/quorum_store/tests/direct_mempool_quorum_store_test.rs b/consensus/src/quorum_store/tests/direct_mempool_quorum_store_test.rs index 7f04c4abf71ca..aa90aa5f03546 100644 --- a/consensus/src/quorum_store/tests/direct_mempool_quorum_store_test.rs +++ b/consensus/src/quorum_store/tests/direct_mempool_quorum_store_test.rs @@ -35,11 +35,11 @@ async fn test_block_request_no_txns() { max_txns_after_filtering: 100, soft_max_txns_after_filtering: 100, max_inline_txns: PayloadTxnsSize::new(50, 500), - opt_batch_txns_pct: 0, return_non_full: true, filter: PayloadFilter::DirectMempool(vec![]), callback: consensus_callback, block_timestamp: aptos_infallible::duration_since_epoch(), + maybe_optqs_payload_pull_params: None, })) .unwrap(); diff --git a/consensus/src/quorum_store/tests/proof_manager_test.rs b/consensus/src/quorum_store/tests/proof_manager_test.rs index cf87abfecba84..3eebe4c667937 100644 --- a/consensus/src/quorum_store/tests/proof_manager_test.rs +++ b/consensus/src/quorum_store/tests/proof_manager_test.rs @@ -17,7 +17,7 @@ use std::{cmp::max, collections::HashSet}; fn create_proof_manager() -> ProofManager { let batch_store = batch_store_for_test(5 * 1024 * 1024); - ProofManager::new(PeerId::random(), 10, 10, batch_store, true, false) + ProofManager::new(PeerId::random(), 10, 10, batch_store, true, 1) } fn create_proof(author: PeerId, expiration: u64, batch_sequence: u64) -> ProofOfStore { @@ -62,8 +62,8 @@ async fn get_proposal( filter: PayloadFilter::InQuorumStore(filter_set), callback: callback_tx, block_timestamp: aptos_infallible::duration_since_epoch(), - opt_batch_txns_pct: 0, return_non_full: true, + maybe_optqs_payload_pull_params: None, }); proof_manager.handle_proposal_request(req); let GetPayloadResponse::GetPayloadResponse(payload) = callback_rx.await.unwrap().unwrap(); diff --git a/consensus/src/round_manager.rs b/consensus/src/round_manager.rs index fedfd10e30c53..3c8cdb6993159 100644 --- a/consensus/src/round_manager.rs +++ b/consensus/src/round_manager.rs @@ -16,6 +16,7 @@ use crate::{ error::{error_kind, VerifyError}, liveness::{ proposal_generator::ProposalGenerator, + proposal_status_tracker::TPastProposalStatusTracker, proposer_election::ProposerElection, round_state::{NewRoundEvent, NewRoundReason, RoundState, RoundStateLogSchema}, unequivocal_proposer_election::UnequivocalProposerElection, @@ -267,6 +268,7 @@ pub struct RoundManager { futures: FuturesUnordered< Pin, Block, Instant)> + Send>>, >, + proposal_status_tracker: Arc, } impl RoundManager { @@ -286,6 +288,7 @@ impl RoundManager { randomness_config: OnChainRandomnessConfig, jwk_consensus_config: OnChainJWKConsensusConfig, fast_rand_config: Option, + proposal_status_tracker: Arc, ) -> Self { // when decoupled execution is false, // the counter is still static. @@ -316,6 +319,7 @@ impl RoundManager { pending_order_votes: PendingOrderVotes::new(), blocks_with_broadcasted_fast_shares: LruCache::new(5), futures: FuturesUnordered::new(), + proposal_status_tracker, } } @@ -356,7 +360,7 @@ impl RoundManager { NewRoundReason::QCReady => { counters::QC_ROUNDS_COUNT.inc(); }, - NewRoundReason::Timeout => { + NewRoundReason::Timeout(_) => { counters::TIMEOUT_ROUNDS_COUNT.inc(); }, }; @@ -367,6 +371,9 @@ impl RoundManager { self.pending_order_votes .garbage_collect(self.block_store.sync_info().highest_ordered_round()); + self.proposal_status_tracker + .push(new_round_event.reason.clone()); + if self .proposer_election .is_valid_proposer(self.proposal_generator.author(), new_round_event.round) @@ -405,10 +412,9 @@ impl RoundManager { safety_rules: Arc>, proposer_election: Arc, ) -> anyhow::Result<()> { - let epoch = epoch_state.epoch; Self::log_collected_vote_stats(epoch_state.clone(), &new_round_event); let proposal_msg = Self::generate_proposal( - epoch, + epoch_state.clone(), new_round_event, sync_info, network.clone(), @@ -514,9 +520,8 @@ impl RoundManager { &self, new_round_event: NewRoundEvent, ) -> anyhow::Result { - let epoch = self.epoch_state.epoch; Self::generate_proposal( - epoch, + self.epoch_state.clone(), new_round_event, self.block_store.sync_info(), self.network.clone(), @@ -528,7 +533,7 @@ impl RoundManager { } async fn generate_proposal( - epoch: u64, + epoch_state: Arc, new_round_event: NewRoundEvent, sync_info: SyncInfo, network: Arc, @@ -551,7 +556,11 @@ impl RoundManager { Block::new_proposal_from_block_data_and_signature(proposal, signature); observe_block(signed_proposal.timestamp_usecs(), BlockStage::SIGNED); info!( - Self::new_log_with_round_epoch(LogEvent::Propose, new_round_event.round, epoch), + Self::new_log_with_round_epoch( + LogEvent::Propose, + new_round_event.round, + epoch_state.epoch + ), "{}", signed_proposal ); Ok(ProposalMsg::new(signed_proposal, sync_info)) @@ -704,6 +713,23 @@ impl RoundManager { sync_or_not } + fn compute_timeout_reason(&self, round: Round) -> RoundTimeoutReason { + if self.round_state().vote_sent().is_some() { + return RoundTimeoutReason::NoQC; + } + + match self.block_store.get_block_for_round(round) { + None => RoundTimeoutReason::ProposalNotReceived, + Some(block) => { + if let Err(missing_authors) = self.block_store.check_payload(block.block()) { + RoundTimeoutReason::PayloadUnavailable { missing_authors } + } else { + RoundTimeoutReason::Unknown + } + }, + } + } + /// The replica broadcasts a "timeout vote message", which includes the round signature, which /// can be aggregated to a TimeoutCertificate. /// The timeout vote message can be one of the following three options: @@ -742,8 +768,8 @@ impl RoundManager { ) .context("[RoundManager] SafetyRules signs 2-chain timeout")?; - // TODO(ibalajiarun): placeholder, update with proper reason. - let timeout_reason = RoundTimeoutReason::Unknown; + let timeout_reason = self.compute_timeout_reason(round); + RoundTimeout::new( timeout, self.proposal_generator.author(), @@ -814,7 +840,11 @@ impl RoundManager { /// This function is called only after all the dependencies of the given QC have been retrieved. async fn process_certificates(&mut self) -> anyhow::Result<()> { let sync_info = self.block_store.sync_info(); - if let Some(new_round_event) = self.round_state.process_certificates(sync_info) { + let epoch_state = self.epoch_state.clone(); + if let Some(new_round_event) = self + .round_state + .process_certificates(sync_info, &epoch_state.verifier) + { self.process_new_round_event(new_round_event).await?; } Ok(()) @@ -941,16 +971,30 @@ impl RoundManager { observe_block(proposal.timestamp_usecs(), BlockStage::SYNCED); + // Since processing proposal is delayed due to backpressure or payload availability, we add + // the block to the block store so that we don't need to fetch it from remote once we + // are out of the backpressure. Please note that delayed processing of proposal is not + // guaranteed to add the block to the block store if we don't get out of the backpressure + // before the timeout, so this is needed to ensure that the proposed block is added to + // the block store irrespective. Also, it is possible that delayed processing of proposal + // tries to add the same block again, which is okay as `execute_and_insert_block` call + // is idempotent. + self.block_store + .insert_block(proposal.clone()) + .await + .context("[RoundManager] Failed to insert the block into BlockStore")?; + let block_store = self.block_store.clone(); - if !block_store.check_payload(&proposal) { + if block_store.check_payload(&proposal).is_err() { debug!("Payload not available locally for block: {}", proposal.id()); counters::CONSENSUS_PROPOSAL_PAYLOAD_AVAILABILITY .with_label_values(&["missing"]) .inc(); let start_time = Instant::now(); + let deadline = self.round_state.current_round_deadline(); let future = async move { ( - block_store.wait_for_payload(&proposal).await, + block_store.wait_for_payload(&proposal, deadline).await, proposal, start_time, ) @@ -978,18 +1022,7 @@ impl RoundManager { if self.block_store.vote_back_pressure() { counters::CONSENSUS_WITHOLD_VOTE_BACKPRESSURE_TRIGGERED.observe(1.0); // In case of back pressure, we delay processing proposal. This is done by resending the - // same proposal to self after some time. Even if processing proposal is delayed, we add - // the block to the block store so that we don't need to fetch it from remote once we - // are out of the backpressure. Please note that delayed processing of proposal is not - // guaranteed to add the block to the block store if we don't get out of the backpressure - // before the timeout, so this is needed to ensure that the proposed block is added to - // the block store irrespective. Also, it is possible that delayed processing of proposal - // tries to add the same block again, which is okay as `execute_and_insert_block` call - // is idempotent. - self.block_store - .insert_block(proposal.clone()) - .await - .context("[RoundManager] Failed to execute_and_insert the block")?; + // same proposal to self after some time. Self::resend_verified_proposal_to_self( self.block_store.clone(), self.buffered_proposal_tx.clone(), @@ -1601,9 +1634,10 @@ impl RoundManager { /// To jump start new round with the current certificates we have. pub async fn init(&mut self, last_vote_sent: Option) { + let epoch_state = self.epoch_state.clone(); let new_round_event = self .round_state - .process_certificates(self.block_store.sync_info()) + .process_certificates(self.block_store.sync_info(), &epoch_state.verifier) .expect("Can not jump start a round_state from existing certificates."); if let Some(vote) = last_vote_sent { self.round_state.record_vote(vote); diff --git a/consensus/src/round_manager_fuzzing.rs b/consensus/src/round_manager_fuzzing.rs index 3132e644762c3..16e98b94a3f19 100644 --- a/consensus/src/round_manager_fuzzing.rs +++ b/consensus/src/round_manager_fuzzing.rs @@ -20,7 +20,9 @@ use crate::{ persistent_liveness_storage::{PersistentLivenessStorage, RecoveryData}, pipeline::execution_client::DummyExecutionClient, round_manager::RoundManager, - test_utils::{MockPayloadManager, MockStorage}, + test_utils::{ + MockOptQSPayloadProvider, MockPastProposalStatusTracker, MockPayloadManager, MockStorage, + }, util::{mock_time_service::SimulatedTimeService, time_service::TimeService}, }; use aptos_channels::{self, aptos_channel, message_queues::QueueStyle}; @@ -180,6 +182,7 @@ fn create_node_for_fuzzing() -> RoundManager { false, ValidatorTxnConfig::default_disabled(), true, + Arc::new(MockOptQSPayloadProvider {}), ); // @@ -209,6 +212,7 @@ fn create_node_for_fuzzing() -> RoundManager { OnChainRandomnessConfig::default_enabled(), OnChainJWKConsensusConfig::default_enabled(), None, + Arc::new(MockPastProposalStatusTracker {}), ) } diff --git a/consensus/src/round_manager_test.rs b/consensus/src/round_manager_test.rs index b17787865dd9e..29716947991bd 100644 --- a/consensus/src/round_manager_test.rs +++ b/consensus/src/round_manager_test.rs @@ -23,8 +23,8 @@ use crate::{ round_manager::RoundManager, test_utils::{ consensus_runtime, create_vec_signed_transactions, - mock_execution_client::MockExecutionClient, timed_block_on, MockPayloadManager, - MockStorage, TreeInserter, + mock_execution_client::MockExecutionClient, timed_block_on, MockOptQSPayloadProvider, + MockPastProposalStatusTracker, MockPayloadManager, MockStorage, TreeInserter, }, util::time_service::{ClockTimeService, TimeService}, }; @@ -305,6 +305,7 @@ impl NodeSetup { false, onchain_consensus_config.effective_validator_txn_config(), true, + Arc::new(MockOptQSPayloadProvider {}), ); let round_state = Self::create_round_state(time_service); @@ -332,6 +333,7 @@ impl NodeSetup { onchain_randomness_config.clone(), onchain_jwk_consensus_config.clone(), None, + Arc::new(MockPastProposalStatusTracker {}), ); block_on(round_manager.init(last_vote_sent)); Self { @@ -995,13 +997,14 @@ fn sync_info_carried_on_timeout_vote() { .insert_single_quorum_cert(block_0_quorum_cert.clone()) .unwrap(); - node.round_manager - .round_state - .process_certificates(SyncInfo::new( + node.round_manager.round_state.process_certificates( + SyncInfo::new( block_0_quorum_cert.clone(), block_0_quorum_cert.into_wrapped_ledger_info(), None, - )); + ), + &generate_validator_verifier(&[node.signer.clone()]), + ); node.round_manager .process_local_timeout(2) .await diff --git a/consensus/src/test_utils/mock_payload_manager.rs b/consensus/src/test_utils/mock_payload_manager.rs index e62ec85b1ea9a..cfac0aaa458a3 100644 --- a/consensus/src/test_utils/mock_payload_manager.rs +++ b/consensus/src/test_utils/mock_payload_manager.rs @@ -3,13 +3,12 @@ use crate::{ error::QuorumStoreError, - payload_client::{ - user::quorum_store_client::QuorumStoreClient, PayloadClient, PayloadPullParameters, - }, + payload_client::{user::quorum_store_client::QuorumStoreClient, PayloadClient}, }; use anyhow::Result; use aptos_consensus_types::{ - block::block_test_utils::random_payload, common::Payload, request_response::GetPayloadCommand, + block::block_test_utils::random_payload, common::Payload, + payload_pull_params::PayloadPullParameters, request_response::GetPayloadCommand, }; use aptos_types::{ transaction::{ExecutionStatus, TransactionStatus}, diff --git a/consensus/src/test_utils/mod.rs b/consensus/src/test_utils/mod.rs index b556d9bfc7ed8..5744a1ab0b090 100644 --- a/consensus/src/test_utils/mod.rs +++ b/consensus/src/test_utils/mod.rs @@ -5,11 +5,16 @@ #![allow(clippy::unwrap_used)] use crate::{ block_storage::{BlockReader, BlockStore}, + liveness::{ + proposal_status_tracker::{TOptQSPullParamsProvider, TPastProposalStatusTracker}, + round_state::NewRoundReason, + }, payload_manager::DirectMempoolPayloadManager, }; use aptos_consensus_types::{ block::{block_test_utils::certificate_for_genesis, Block}, common::{Author, Round}, + payload_pull_params::OptQSPayloadPullParams, pipelined_block::PipelinedBlock, quorum_cert::QuorumCert, sync_info::SyncInfo, @@ -270,3 +275,17 @@ pub(crate) fn create_vec_signed_transactions_with_gas( .map(|_| create_signed_transaction(gas_unit_price)) .collect() } + +pub struct MockOptQSPayloadProvider {} + +impl TOptQSPullParamsProvider for MockOptQSPayloadProvider { + fn get_params(&self) -> Option { + None + } +} + +pub struct MockPastProposalStatusTracker {} + +impl TPastProposalStatusTracker for MockPastProposalStatusTracker { + fn push(&self, _status: NewRoundReason) {} +} diff --git a/crates/aptos-collections/src/bounded_vec_deque.rs b/crates/aptos-collections/src/bounded_vec_deque.rs index 6435b7371f331..5da8bdd16e047 100644 --- a/crates/aptos-collections/src/bounded_vec_deque.rs +++ b/crates/aptos-collections/src/bounded_vec_deque.rs @@ -52,6 +52,14 @@ impl BoundedVecDeque { pub fn iter(&self) -> Iter<'_, T> { self.inner.iter() } + + pub fn len(&self) -> usize { + self.inner.len() + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } } impl IntoIterator for BoundedVecDeque { diff --git a/testsuite/generate-format/tests/staged/consensus.yaml b/testsuite/generate-format/tests/staged/consensus.yaml index 1e81da1a2692b..f9bb0aaf27da6 100644 --- a/testsuite/generate-format/tests/staged/consensus.yaml +++ b/testsuite/generate-format/tests/staged/consensus.yaml @@ -854,6 +854,13 @@ RoundTimeoutReason: Unknown: UNIT 1: ProposalNotReceived: UNIT + 2: + PayloadUnavailable: + STRUCT: + - missing_authors: + TYPENAME: BitVec + 3: + NoQC: UNIT Script: STRUCT: - code: BYTES diff --git a/types/src/validator_verifier.rs b/types/src/validator_verifier.rs index 95ebfa637e16f..50aa7b69c4767 100644 --- a/types/src/validator_verifier.rs +++ b/types/src/validator_verifier.rs @@ -627,6 +627,25 @@ pub fn random_validator_verifier( count: usize, custom_voting_power_quorum: Option, pseudo_random_account_address: bool, +) -> (Vec, ValidatorVerifier) { + random_validator_verifier_with_voting_power( + count, + custom_voting_power_quorum, + pseudo_random_account_address, + &[], + ) +} + +/// Helper function to get random validator signers and a corresponding validator verifier for +/// testing. If custom_voting_power_quorum is not None, set a custom voting power quorum amount. +/// With pseudo_random_account_address enabled, logs show `0 -> [0000]`, `1 -> [1000]` +/// `voting_power` is optional in that if it's empty then a voting power of 1 is used. +#[cfg(any(test, feature = "fuzzing"))] +pub fn random_validator_verifier_with_voting_power( + count: usize, + custom_voting_power_quorum: Option, + pseudo_random_account_address: bool, + voting_power: &[u64], ) -> (Vec, ValidatorVerifier) { let mut signers = Vec::new(); let mut validator_infos = vec![]; @@ -639,7 +658,7 @@ pub fn random_validator_verifier( validator_infos.push(ValidatorConsensusInfo::new( random_signer.author(), random_signer.public_key(), - 1, + *voting_power.get(i).unwrap_or(&1), )); signers.push(random_signer); } From 77ff4bf413f54c41206bd5573e1891fa3a0dccf6 Mon Sep 17 00:00:00 2001 From: Greg Nazario Date: Fri, 11 Oct 2024 03:52:34 -0700 Subject: [PATCH 08/22] [Rosetta] Fungible Asset support (#14194) * temp: Add FA support for rosetta * [rosetta] Add testing for FA in rosetta * [rosetta] Setup file input for additional currencies * [rosetta] Complete implementation for FA Now supports FA transfer, balances, and other parts --- .../build/Minter/bytecode_scripts/main.mv | Bin 263 -> 263 bytes crates/aptos-rosetta-cli/src/construction.rs | 9 +- crates/aptos-rosetta/src/account.rs | 341 ++++++----- crates/aptos-rosetta/src/client.rs | 12 +- crates/aptos-rosetta/src/common.rs | 89 ++- crates/aptos-rosetta/src/construction.rs | 166 +++++- crates/aptos-rosetta/src/lib.rs | 32 +- crates/aptos-rosetta/src/main.rs | 72 ++- crates/aptos-rosetta/src/test/mod.rs | 466 +++++++++++++++ crates/aptos-rosetta/src/types/move_types.rs | 24 + crates/aptos-rosetta/src/types/objects.rs | 538 +++++++++++++++--- testsuite/smoke-test/src/rosetta.rs | 2 + 12 files changed, 1489 insertions(+), 262 deletions(-) create mode 100644 crates/aptos-rosetta/src/test/mod.rs diff --git a/aptos-move/move-examples/scripts/minter/build/Minter/bytecode_scripts/main.mv b/aptos-move/move-examples/scripts/minter/build/Minter/bytecode_scripts/main.mv index 1f982eefd3c12287fa9283a752bdc11e2d2fd0bb..8176dde7dd676d89f1c6f557f4ca21596814481c 100644 GIT binary patch delta 14 VcmZo?YG-0yDD#?|Z6hlaBLE&$1Bw6u delta 14 VcmZo?YG-0yDD#?|bt5YiBLE&x1Bn0t diff --git a/crates/aptos-rosetta-cli/src/construction.rs b/crates/aptos-rosetta-cli/src/construction.rs index c0b6485fc157a..466b2a23ec289 100644 --- a/crates/aptos-rosetta-cli/src/construction.rs +++ b/crates/aptos-rosetta-cli/src/construction.rs @@ -4,7 +4,7 @@ use crate::common::{format_output, NetworkArgs, UrlArgs}; use aptos::common::types::{EncodingOptions, PrivateKeyInputOptions, ProfileOptions}; use aptos_logger::info; -use aptos_rosetta::types::TransactionIdentifier; +use aptos_rosetta::types::{Currency, TransactionIdentifier}; use aptos_types::account_address::AccountAddress; use clap::{Parser, Subcommand}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -154,6 +154,12 @@ pub struct TransferCommand { /// The amount of coins to send #[clap(long)] amount: u64, + #[clap(long, value_parser = parse_currency)] + currency: Currency, +} + +fn parse_currency(str: &str) -> anyhow::Result { + Ok(serde_json::from_str(str)?) } impl TransferCommand { @@ -175,6 +181,7 @@ impl TransferCommand { self.txn_args.sequence_number, self.txn_args.max_gas, self.txn_args.gas_price, + self.currency, ) .await } diff --git a/crates/aptos-rosetta/src/account.rs b/crates/aptos-rosetta/src/account.rs index f75322c2fd99f..6e24fd04e2dde 100644 --- a/crates/aptos-rosetta/src/account.rs +++ b/crates/aptos-rosetta/src/account.rs @@ -8,19 +8,24 @@ use crate::{ common::{ - check_network, get_block_index_from_request, handle_request, native_coin, native_coin_tag, - with_context, + check_network, get_block_index_from_request, handle_request, native_coin, with_context, }, error::{ApiError, ApiResult}, types::{AccountBalanceRequest, AccountBalanceResponse, Amount, Currency, *}, RosettaContext, }; use aptos_logger::{debug, trace, warn}; -use aptos_types::{ - account_address::AccountAddress, - account_config::{AccountResource, CoinStoreResourceUntyped}, +use aptos_rest_client::{ + aptos_api_types::{AptosError, AptosErrorCode, ViewFunction}, + error::{AptosErrorResponse, RestError}, }; -use std::{collections::HashSet, str::FromStr}; +use aptos_types::{account_address::AccountAddress, account_config::AccountResource}; +use move_core_types::{ + ident_str, + language_storage::{ModuleId, StructTag, TypeTag}, + parser::parse_type_tag, +}; +use std::str::FromStr; use warp::Filter; /// Account routes e.g. balance @@ -53,7 +58,6 @@ async fn account_balance( let network_identifier = request.network_identifier; check_network(network_identifier, &server_context)?; - let rest_client = server_context.rest_client()?; // Retrieve the block index to read let block_height = @@ -69,7 +73,7 @@ async fn account_balance( // Retrieve all metadata we want to provide as an on-demand lookup let (sequence_number, operators, balances, lockup_expiration) = get_balances( - &rest_client, + &server_context, request.account_identifier, balance_version, request.currencies, @@ -90,11 +94,12 @@ async fn account_balance( /// Retrieve the balances for an account #[allow(clippy::manual_retain)] async fn get_balances( - rest_client: &aptos_rest_client::Client, + server_context: &RosettaContext, account: AccountIdentifier, version: u64, maybe_filter_currencies: Option>, ) -> ApiResult<(u64, Option>, Vec, u64)> { + let rest_client = server_context.rest_client()?; let owner_address = account.account_address()?; let pool_address = account.pool_address()?; @@ -105,7 +110,7 @@ async fn get_balances( // Lookup the delegation pool, if it's provided in the account information if pool_address.is_some() { match get_delegation_stake_balances( - rest_client, + rest_client.as_ref(), &account, owner_address, pool_address.unwrap(), @@ -137,156 +142,198 @@ async fn get_balances( } } - // Retrieve all account resources - // TODO: This will need to change for FungibleAssets, will need to lookup on a list of known FAs - if let Ok(response) = rest_client - .get_account_resources_at_version_bcs(owner_address, version) + // Retrieve sequence number + let sequence_number = match rest_client + .get_account_resource_at_version_bcs(owner_address, "0x1::account::Account", version) .await { - let resources = response.into_inner(); - let mut maybe_sequence_number = None; - let mut maybe_operators = None; - - // Iterate through resources, converting balances - for (struct_tag, bytes) in resources { - match ( - struct_tag.address, - struct_tag.module.as_str(), - struct_tag.name.as_str(), - ) { - // Retrieve the sequence number from the account resource - // TODO: Make a separate call for this - (AccountAddress::ONE, ACCOUNT_MODULE, ACCOUNT_RESOURCE) => { - let account: AccountResource = bcs::from_bytes(&bytes)?; - maybe_sequence_number = Some(account.sequence_number()) + Ok(response) => { + let account: AccountResource = response.into_inner(); + account.sequence_number() + }, + Err(RestError::Api(AptosErrorResponse { + error: + AptosError { + error_code: AptosErrorCode::AccountNotFound, + .. }, - // Parse all associated coin stores - // TODO: This would need to be expanded to support other coin stores - (AccountAddress::ONE, COIN_MODULE, COIN_STORE_RESOURCE) => { - // Only show coins on the base account - if account.is_base_account() { - let coin_store: CoinStoreResourceUntyped = bcs::from_bytes(&bytes)?; - if let Some(coin_type) = struct_tag.type_args.first() { - // Only display supported coins - if coin_type == &native_coin_tag() { - balances.push(Amount { - value: coin_store.coin().to_string(), - currency: native_coin(), - }); - } - } - } + .. + })) + | Err(RestError::Api(AptosErrorResponse { + error: + AptosError { + error_code: AptosErrorCode::ResourceNotFound, + .. }, - // Parse all staking contract data to know the underlying balances of the pools - (AccountAddress::ONE, STAKING_CONTRACT_MODULE, STORE_RESOURCE) => { - if account.is_base_account() || pool_address.is_some() { - continue; - } - - let store: Store = bcs::from_bytes(&bytes)?; - maybe_operators = Some(vec![]); - for (operator, contract) in store.staking_contracts { - // Keep track of operators - maybe_operators.as_mut().unwrap().push(operator); - match get_stake_balances( - rest_client, - &account, - contract.pool_address, - version, - ) - .await - { - Ok(Some(balance_result)) => { - if let Some(balance) = balance_result.balance { - total_requested_balance = Some( - total_requested_balance.unwrap_or_default() - + u64::from_str(&balance.value).unwrap_or_default(), - ); - } - lockup_expiration = balance_result.lockup_expiration; - }, - result => { - warn!( - "Failed to retrieve requested balance for account: {}, address: {}: {:?}", - owner_address, contract.pool_address, result - ) - }, - } - } - if let Some(balance) = total_requested_balance { - balances.push(Amount { - value: balance.to_string(), - currency: native_coin(), - }) - } - - /* TODO: Right now operator stake is not supported - else if account.is_operator_stake() { - // For operator stake, filter on operator address - let operator_address = account.operator_address()?; - if let Some(contract) = store.staking_contracts.get(&operator_address) { - balances.push(get_total_stake( - rest_client, - &account, - contract.pool_address, - version, - ).await?); - } - }*/ - }, - _ => {}, - } - } - - // Retrieves the sequence number accordingly - // TODO: Sequence number should be 0 if it isn't retrieved probably - let sequence_number = if let Some(sequence_number) = maybe_sequence_number { - sequence_number - } else { + .. + })) => { + // If the account or resource doesn't exist, set the sequence number to 0 + 0 + }, + _ => { + // Any other error we can't retrieve the sequence number return Err(ApiError::InternalError(Some( "Failed to retrieve account sequence number".to_string(), ))); - }; - - // Filter based on requested currencies - if let Some(currencies) = maybe_filter_currencies { - let mut currencies: HashSet = currencies.into_iter().collect(); - // Remove extra currencies not requested - balances = balances - .into_iter() - .filter(|balance| currencies.contains(&balance.currency)) - .collect(); + }, + }; - for balance in balances.iter() { - currencies.remove(&balance.currency); + // Retrieve staking information (if it applies) + // Only non-pool addresses, and non base accounts + let mut maybe_operators = None; + if !account.is_base_account() && pool_address.is_none() { + if let Ok(response) = rest_client + .get_account_resource_at_version_bcs( + owner_address, + "0x1::staking_contract::Store", + version, + ) + .await + { + let store: Store = response.into_inner(); + maybe_operators = Some(vec![]); + for (operator, contract) in store.staking_contracts { + // Keep track of operators + maybe_operators.as_mut().unwrap().push(operator); + match get_stake_balances( + rest_client.as_ref(), + &account, + contract.pool_address, + version, + ) + .await + { + Ok(Some(balance_result)) => { + if let Some(balance) = balance_result.balance { + total_requested_balance = Some( + total_requested_balance.unwrap_or_default() + + u64::from_str(&balance.value).unwrap_or_default(), + ); + } + lockup_expiration = balance_result.lockup_expiration; + }, + result => { + warn!( + "Failed to retrieve requested balance for account: {}, address: {}: {:?}", + owner_address, contract.pool_address, result + ) + }, + } } - - for currency in currencies { + if let Some(balance) = total_requested_balance { balances.push(Amount { - value: 0.to_string(), - currency, - }); + value: balance.to_string(), + currency: native_coin(), + }) } + + /* TODO: Right now operator stake is not supported + else if account.is_operator_stake() { + // For operator stake, filter on operator address + let operator_address = account.operator_address()?; + if let Some(contract) = store.staking_contracts.get(&operator_address) { + balances.push(get_total_stake( + rest_client, + &account, + contract.pool_address, + version, + ).await?); + } + }*/ } + } - // Retrieve balances - Ok(( - sequence_number, - maybe_operators, - balances, - lockup_expiration, - )) + // Filter currencies to lookup + let currencies_to_lookup = if let Some(currencies) = maybe_filter_currencies { + currencies.into_iter().collect() } else { - // If it fails, we return 0 - // TODO: This should probably be fixed to check if the account exists. Then if the account doesn't exist, return empty balance, otherwise error - Ok(( - 0, - None, - vec![Amount { - value: 0.to_string(), - currency: native_coin(), - }], - 0, - )) + server_context.currencies.clone() + }; + + // Retrieve the fungible asset balances and the coin balances + for currency in currencies_to_lookup.iter() { + match *currency { + // FA only + Currency { + metadata: + Some(CurrencyMetadata { + move_type: None, + fa_address: Some(ref fa_address), + }), + .. + } => { + let response = rest_client + .view_bcs::>( + &ViewFunction { + module: ModuleId { + address: AccountAddress::ONE, + name: ident_str!(PRIMARY_FUNGIBLE_STORE_MODULE).into(), + }, + function: ident_str!(BALANCE_FUNCTION).into(), + ty_args: vec![TypeTag::Struct(Box::new(StructTag { + address: AccountAddress::ONE, + module: ident_str!(OBJECT_MODULE).into(), + name: ident_str!(OBJECT_CORE_RESOURCE).into(), + type_args: vec![], + }))], + args: vec![ + bcs::to_bytes(&owner_address).unwrap(), + bcs::to_bytes(&AccountAddress::from_str(fa_address).unwrap()) + .unwrap(), + ], + }, + Some(version), + ) + .await? + .into_inner(); + let fa_balance = response.first().copied().unwrap_or(0); + balances.push(Amount { + value: fa_balance.to_string(), + currency: currency.clone(), + }) + }, + // Coin or Coin and FA combined + Currency { + metadata: + Some(CurrencyMetadata { + move_type: Some(ref coin_type), + fa_address: _, + }), + .. + } => { + if let Ok(type_tag) = parse_type_tag(coin_type) { + let response = rest_client + .view_bcs::>( + &ViewFunction { + module: ModuleId { + address: AccountAddress::ONE, + name: ident_str!(COIN_MODULE).into(), + }, + function: ident_str!(BALANCE_FUNCTION).into(), + ty_args: vec![type_tag], + args: vec![bcs::to_bytes(&owner_address).unwrap()], + }, + Some(version), + ) + .await? + .into_inner(); + let coin_balance = response.first().copied().unwrap_or(0); + balances.push(Amount { + value: coin_balance.to_string(), + currency: currency.clone(), + }) + } + }, + _ => { + // None for both, means we can't look it up anyways / it's invalid + }, + } } + + Ok(( + sequence_number, + maybe_operators, + balances, + lockup_expiration, + )) } diff --git a/crates/aptos-rosetta/src/client.rs b/crates/aptos-rosetta/src/client.rs index ad7975f616047..c6807a11613b2 100644 --- a/crates/aptos-rosetta/src/client.rs +++ b/crates/aptos-rosetta/src/client.rs @@ -11,9 +11,10 @@ use crate::{ ConstructionParseRequest, ConstructionParseResponse, ConstructionPayloadsRequest, ConstructionPayloadsResponse, ConstructionPreprocessRequest, ConstructionPreprocessResponse, ConstructionSubmitRequest, ConstructionSubmitResponse, - Error, MetadataRequest, NetworkIdentifier, NetworkListResponse, NetworkOptionsResponse, - NetworkRequest, NetworkStatusResponse, Operation, PreprocessMetadata, PublicKey, Signature, - SignatureType, TransactionIdentifier, TransactionIdentifierResponse, + Currency, Error, MetadataRequest, NetworkIdentifier, NetworkListResponse, + NetworkOptionsResponse, NetworkRequest, NetworkStatusResponse, Operation, + PreprocessMetadata, PublicKey, Signature, SignatureType, TransactionIdentifier, + TransactionIdentifierResponse, }, }; use anyhow::anyhow; @@ -190,6 +191,7 @@ impl RosettaClient { sequence_number: Option, max_gas: Option, gas_unit_price: Option, + currency: Currency, ) -> anyhow::Result { let sender = self .get_account_address(network_identifier.clone(), private_key) @@ -203,14 +205,14 @@ impl RosettaClient { 0, None, AccountIdentifier::base_account(sender), - native_coin(), + currency.clone(), amount, ), Operation::deposit( 1, None, AccountIdentifier::base_account(receiver), - native_coin(), + currency, amount, ), ]; diff --git a/crates/aptos-rosetta/src/common.rs b/crates/aptos-rosetta/src/common.rs index b078f455a02c7..b889e4ab400bf 100644 --- a/crates/aptos-rosetta/src/common.rs +++ b/crates/aptos-rosetta/src/common.rs @@ -19,7 +19,7 @@ use aptos_sdk::move_types::{ use aptos_types::{account_address::AccountAddress, chain_id::ChainId}; use futures::future::BoxFuture; use serde::{de::DeserializeOwned, Deserialize, Serialize}; -use std::{convert::Infallible, fmt::LowerHex, future::Future, str::FromStr}; +use std::{collections::HashSet, convert::Infallible, fmt::LowerHex, future::Future, str::FromStr}; use warp::Filter; /// The year 2000 in milliseconds, as this is the lower limit for Rosetta API implementations @@ -149,12 +149,15 @@ const DEFAULT_COIN: &str = "APT"; const DEFAULT_DECIMALS: u8 = 8; /// Provides the [Currency] for 0x1::aptos_coin::AptosCoin aka APT +/// +/// Note that 0xA is the address for FA, but it has to be skipped in order to have backwards compatibility pub fn native_coin() -> Currency { Currency { symbol: DEFAULT_COIN.to_string(), decimals: DEFAULT_DECIMALS, metadata: Some(CurrencyMetadata { - move_type: native_coin_tag().to_string(), + move_type: Some(native_coin_tag().to_string()), + fa_address: None, }), } } @@ -169,14 +172,52 @@ pub fn native_coin_tag() -> TypeTag { })) } -/// Tells us whether the coin is APT and errors if it's not -/// -/// TODO: This is the function that needs to be replaced to handle more coin types -pub fn is_native_coin(currency: &Currency) -> ApiResult<()> { - if currency == &native_coin() { - Ok(()) +#[inline] +pub fn is_native_coin(fa_address: AccountAddress) -> bool { + fa_address == AccountAddress::TEN +} + +pub fn find_coin_currency(currencies: &HashSet, type_tag: &TypeTag) -> Option { + currencies + .iter() + .find(|currency| { + if let Some(CurrencyMetadata { + move_type: Some(ref move_type), + fa_address: _, + }) = currency.metadata + { + move_type == &type_tag.to_string() + } else { + false + } + }) + .cloned() +} +pub fn find_fa_currency( + currencies: &HashSet, + metadata_address: AccountAddress, +) -> Option { + if is_native_coin(metadata_address) { + Some(native_coin()) } else { - Err(ApiError::UnsupportedCurrency(Some(currency.symbol.clone()))) + let val = currencies + .iter() + .find(|currency| { + if let Some(CurrencyMetadata { + move_type: _, + fa_address: Some(ref fa_address), + }) = currency.metadata + { + // TODO: Probably want to cache this + AccountAddress::from_str(fa_address) + .map(|addr| addr == metadata_address) + .unwrap_or(false) + } else { + false + } + }) + .cloned(); + val } } @@ -316,15 +357,27 @@ pub fn to_hex_lower(obj: &T) -> String { } /// Retrieves the currency from the given parameters -/// TODO: What do do about the type params? -/// TODO: Handle other currencies, will need to be passed in as a config file or something on startup -pub fn parse_currency(address: AccountAddress, module: &str, name: &str) -> ApiResult { - match (address, module, name) { - (AccountAddress::ONE, APTOS_COIN_MODULE, APTOS_COIN_RESOURCE) => Ok(native_coin()), - _ => Err(ApiError::TransactionParseError(Some(format!( - "Invalid coin for transfer {}::{}::{}", - address, module, name - )))), +pub fn parse_coin_currency( + server_context: &RosettaContext, + struct_tag: &StructTag, +) -> ApiResult { + if let Some(currency) = server_context.currencies.iter().find(|currency| { + if let Some(move_type) = currency + .metadata + .as_ref() + .and_then(|inner| inner.move_type.as_ref()) + { + struct_tag.to_string() == *move_type + } else { + false + } + }) { + Ok(currency.clone()) + } else { + Err(ApiError::TransactionParseError(Some(format!( + "Invalid coin for transfer {}", + struct_tag + )))) } } diff --git a/crates/aptos-rosetta/src/construction.rs b/crates/aptos-rosetta/src/construction.rs index 6a5bdc2ca3b91..4a2aadb752d8e 100644 --- a/crates/aptos-rosetta/src/construction.rs +++ b/crates/aptos-rosetta/src/construction.rs @@ -26,8 +26,8 @@ use crate::{ common::{ - check_network, decode_bcs, decode_key, encode_bcs, get_account, handle_request, - native_coin, parse_currency, with_context, + check_network, decode_bcs, decode_key, encode_bcs, find_fa_currency, get_account, + handle_request, native_coin, parse_coin_currency, with_context, }, error::{ApiError, ApiResult}, types::{InternalOperation, *}, @@ -39,10 +39,7 @@ use aptos_crypto::{ }; use aptos_global_constants::adjust_gas_headroom; use aptos_logger::debug; -use aptos_sdk::{ - move_types::language_storage::{StructTag, TypeTag}, - transaction_builder::TransactionFactory, -}; +use aptos_sdk::{move_types::language_storage::TypeTag, transaction_builder::TransactionFactory}; use aptos_types::{ account_address::AccountAddress, chain_id::ChainId, @@ -556,8 +553,9 @@ async fn construction_parse( module.name().as_str(), function_name.as_str(), ) { - (AccountAddress::ONE, COIN_MODULE, TRANSFER_FUNCTION) => { - parse_transfer_operation(sender, &type_args, &args)? + (AccountAddress::ONE, COIN_MODULE, TRANSFER_FUNCTION) + | (AccountAddress::ONE, APTOS_ACCOUNT_MODULE, TRANSFER_COINS_FUNCTION) => { + parse_transfer_operation(&server_context, sender, &type_args, &args)? }, (AccountAddress::ONE, APTOS_ACCOUNT_MODULE, TRANSFER_FUNCTION) => { parse_account_transfer_operation(sender, &type_args, &args)? @@ -565,6 +563,12 @@ async fn construction_parse( (AccountAddress::ONE, APTOS_ACCOUNT_MODULE, CREATE_ACCOUNT_FUNCTION) => { parse_create_account_operation(sender, &type_args, &args)? }, + (AccountAddress::ONE, PRIMARY_FUNGIBLE_STORE_MODULE, TRANSFER_FUNCTION) => { + parse_primary_fa_transfer_operation(&server_context, sender, &type_args, &args)? + }, + (AccountAddress::ONE, FUNGIBLE_ASSET_MODULE, TRANSFER_FUNCTION) => { + parse_fa_transfer_operation(&server_context, sender, &type_args, &args)? + }, ( AccountAddress::ONE, STAKING_CONTRACT_MODULE, @@ -663,6 +667,7 @@ fn parse_create_account_operation( /// Parses 0x1::coin::transfer(receiver: address, amount: u64) fn parse_transfer_operation( + server_context: &RosettaContext, sender: AccountAddress, type_args: &[TypeTag], args: &[Vec], @@ -671,16 +676,7 @@ fn parse_transfer_operation( // Check coin is the native coin let currency = match type_args.first() { - Some(TypeTag::Struct(struct_tag)) => { - let StructTag { - address, - module, - name, - .. - } = &**struct_tag; - - parse_currency(*address, module.as_str(), name.as_str())? - }, + Some(TypeTag::Struct(struct_tag)) => parse_coin_currency(server_context, struct_tag)?, _ => { return Err(ApiError::TransactionParseError(Some( "No coin type in transfer".to_string(), @@ -770,6 +766,136 @@ fn parse_account_transfer_operation( Ok(operations) } +/// Parses 0x1::primary_fungible_store::transfer(metadata: address, receiver: address, amount: u64) +fn parse_primary_fa_transfer_operation( + server_context: &RosettaContext, + sender: AccountAddress, + type_args: &[TypeTag], + args: &[Vec], +) -> ApiResult> { + // There should be one type arg + if type_args.len() != 1 { + return Err(ApiError::TransactionParseError(Some(format!( + "Primary fungible store transfer should have one type argument: {:?}", + type_args + )))); + } + let mut operations = Vec::new(); + + // Retrieve the args for the operations + let metadata: AccountAddress = if let Some(metadata) = args.first() { + bcs::from_bytes(metadata)? + } else { + return Err(ApiError::TransactionParseError(Some( + "No metadata address in primary fungible transfer".to_string(), + ))); + }; + let receiver: AccountAddress = if let Some(receiver) = args.get(1) { + bcs::from_bytes(receiver)? + } else { + return Err(ApiError::TransactionParseError(Some( + "No receiver address in primary fungible transfer".to_string(), + ))); + }; + let amount: u64 = if let Some(amount) = args.get(2) { + bcs::from_bytes(amount)? + } else { + return Err(ApiError::TransactionParseError(Some( + "No amount in primary fungible transfer".to_string(), + ))); + }; + + // Grab currency accordingly + + let maybe_currency = find_fa_currency(&server_context.currencies, metadata); + + if let Some(currency) = maybe_currency { + operations.push(Operation::withdraw( + 0, + None, + AccountIdentifier::base_account(sender), + currency.clone(), + amount, + )); + operations.push(Operation::deposit( + 1, + None, + AccountIdentifier::base_account(receiver), + currency.clone(), + amount, + )); + Ok(operations) + } else { + Err(ApiError::UnsupportedCurrency(Some(metadata.to_string()))) + } +} + +/// Parses 0x1::fungible_asset::transfer(metadata: address, receiver: address, amount: u64) +/// +/// This is only for using directly from a store, please prefer using primary fa. +fn parse_fa_transfer_operation( + server_context: &RosettaContext, + sender: AccountAddress, + type_args: &[TypeTag], + args: &[Vec], +) -> ApiResult> { + // There is one type arg for the object + if type_args.len() != 1 { + return Err(ApiError::TransactionParseError(Some(format!( + "Fungible asset transfer should have one type argument: {:?}", + type_args + )))); + } + let mut operations = Vec::new(); + + // Retrieve the args for the operations + let metadata: AccountAddress = if let Some(metadata) = args.first() { + bcs::from_bytes(metadata)? + } else { + return Err(ApiError::TransactionParseError(Some( + "No metadata address in fungible asset transfer".to_string(), + ))); + }; + let receiver: AccountAddress = if let Some(receiver) = args.get(1) { + bcs::from_bytes(receiver)? + } else { + return Err(ApiError::TransactionParseError(Some( + "No receiver address in fungible asset transfer".to_string(), + ))); + }; + let amount: u64 = if let Some(amount) = args.get(2) { + bcs::from_bytes(amount)? + } else { + return Err(ApiError::TransactionParseError(Some( + "No amount in fungible transfer".to_string(), + ))); + }; + + // Grab currency accordingly + + let maybe_currency = find_fa_currency(&server_context.currencies, metadata); + + if let Some(currency) = maybe_currency { + operations.push(Operation::withdraw( + 0, + None, + AccountIdentifier::base_account(sender), + currency.clone(), + amount, + )); + operations.push(Operation::deposit( + 1, + None, + AccountIdentifier::base_account(receiver), + currency.clone(), + amount, + )); + Ok(operations) + } else { + Err(ApiError::UnsupportedCurrency(Some(metadata.to_string()))) + } +} + /// Parses a specific BCS function argument to the given type pub fn parse_function_arg( name: &str, @@ -1050,7 +1176,7 @@ async fn construction_payloads( check_network(request.network_identifier, &server_context)?; // Retrieve the real operation we're doing, this identifies the sub-operations to a function - let mut operation = InternalOperation::extract(&request.operations)?; + let mut operation = InternalOperation::extract(&server_context, &request.operations)?; // For some reason, metadata is optional on the Rosetta spec, we enforce it here, otherwise we // can't build the [RawTransaction] offline. @@ -1304,7 +1430,7 @@ async fn construction_preprocess( check_network(request.network_identifier, &server_context)?; // Determine the actual operation from the collection of Rosetta [Operation] - let internal_operation = InternalOperation::extract(&request.operations)?; + let internal_operation = InternalOperation::extract(&server_context, &request.operations)?; // Provide the accounts that need public keys (there's only one supported today) let required_public_keys = vec![AccountIdentifier::base_account(internal_operation.sender())]; diff --git a/crates/aptos-rosetta/src/lib.rs b/crates/aptos-rosetta/src/lib.rs index 792084f9edf4f..7a0726c9c80ea 100644 --- a/crates/aptos-rosetta/src/lib.rs +++ b/crates/aptos-rosetta/src/lib.rs @@ -7,14 +7,15 @@ use crate::{ block::BlockRetriever, - common::{handle_request, with_context}, + common::{handle_request, native_coin, with_context}, error::{ApiError, ApiResult}, + types::Currency, }; use aptos_config::config::ApiConfig; use aptos_logger::debug; use aptos_types::{account_address::AccountAddress, chain_id::ChainId}; use aptos_warp_webserver::{logger, Error, WebServer}; -use std::{convert::Infallible, sync::Arc}; +use std::{collections::HashSet, convert::Infallible, sync::Arc}; use tokio::task::JoinHandle; use warp::{ http::{HeaderValue, Method, StatusCode}, @@ -31,6 +32,9 @@ pub mod common; pub mod error; pub mod types; +#[cfg(test)] +mod test; + pub const NODE_VERSION: &str = "0.1"; pub const ROSETTA_VERSION: &str = "1.4.12"; @@ -43,6 +47,8 @@ pub struct RosettaContext { pub chain_id: ChainId, /// Block index cache pub block_cache: Option>, + /// Set of supported currencies + pub currencies: HashSet, } impl RosettaContext { @@ -50,11 +56,16 @@ impl RosettaContext { rest_client: Option>, chain_id: ChainId, block_cache: Option>, + mut currencies: HashSet, ) -> Self { + // Always add APT + currencies.insert(native_coin()); + RosettaContext { rest_client, chain_id, block_cache, + currencies, } } @@ -80,12 +91,18 @@ pub fn bootstrap( chain_id: ChainId, api_config: ApiConfig, rest_client: Option, + supported_currencies: HashSet, ) -> anyhow::Result { let runtime = aptos_runtimes::spawn_named_runtime("rosetta".into(), None); debug!("Starting up Rosetta server with {:?}", api_config); - runtime.spawn(bootstrap_async(chain_id, api_config, rest_client)); + runtime.spawn(bootstrap_async( + chain_id, + api_config, + rest_client, + supported_currencies, + )); Ok(runtime) } @@ -94,6 +111,7 @@ pub async fn bootstrap_async( chain_id: ChainId, api_config: ApiConfig, rest_client: Option, + supported_currencies: HashSet, ) -> anyhow::Result> { debug!("Starting up Rosetta server with {:?}", api_config); @@ -123,7 +141,13 @@ pub async fn bootstrap_async( )) }); - let context = RosettaContext::new(rest_client.clone(), chain_id, block_cache).await; + let context = RosettaContext::new( + rest_client.clone(), + chain_id, + block_cache, + supported_currencies, + ) + .await; api.serve(routes(context)).await; }); Ok(handle) diff --git a/crates/aptos-rosetta/src/main.rs b/crates/aptos-rosetta/src/main.rs index 1e136ae75f017..11574dd66ca18 100644 --- a/crates/aptos-rosetta/src/main.rs +++ b/crates/aptos-rosetta/src/main.rs @@ -8,10 +8,12 @@ use aptos_config::config::{ApiConfig, DEFAULT_MAX_PAGE_SIZE}; use aptos_logger::prelude::*; use aptos_node::AptosNodeArgs; -use aptos_rosetta::bootstrap; +use aptos_rosetta::{bootstrap, common::native_coin, types::Currency}; use aptos_types::chain_id::ChainId; use clap::Parser; use std::{ + collections::HashSet, + fs::File, net::SocketAddr, path::PathBuf, sync::{ @@ -85,8 +87,13 @@ async fn main() { println!("aptos-rosetta: Starting rosetta"); // Ensure runtime for Rosetta is up and running - let _rosetta = bootstrap(args.chain_id(), args.api_config(), args.rest_client()) - .expect("aptos-rosetta: Should bootstrap rosetta server"); + let _rosetta = bootstrap( + args.chain_id(), + args.api_config(), + args.rest_client(), + args.supported_currencies(), + ) + .expect("aptos-rosetta: Should bootstrap rosetta server"); println!("aptos-rosetta: Rosetta started"); // Run until there is an interrupt @@ -106,6 +113,9 @@ trait ServerArgs { /// Retrieve the chain id fn chain_id(&self) -> ChainId; + + /// Supported currencies for the service + fn supported_currencies(&self) -> HashSet; } /// Aptos Rosetta API Server @@ -146,6 +156,14 @@ impl ServerArgs for CommandArgs { CommandArgs::Online(args) => args.chain_id(), } } + + fn supported_currencies(&self) -> HashSet { + match self { + CommandArgs::OnlineRemote(args) => args.supported_currencies(), + CommandArgs::Offline(args) => args.supported_currencies(), + CommandArgs::Online(args) => args.supported_currencies(), + } + } } #[derive(Debug, Parser)] @@ -170,6 +188,31 @@ pub struct OfflineArgs { /// This can be configured to change performance characteristics #[clap(long, default_value_t = DEFAULT_MAX_PAGE_SIZE)] transactions_page_size: u16, + + /// A file of currencies to support other than APT + /// + /// Example file for testnet: + /// ```json + /// [ + /// { + /// "symbol": "TC", + /// "decimals": 4, + /// "metadata": { + /// "fa_address": "0xb528ad40e472f8fcf0f21aa78aecd09fe68f6208036a5845e6d16b7d561c83b8", + /// "move_type": "0xf5a9b6ccc95f8ad3c671ddf1e227416e71f7bcd3c971efe83c0ae8e5e028350f::test_faucet::TestFaucetCoin" + /// } + /// }, + /// { + /// "symbol": "TFA", + /// "decimals": 4, + /// "metadata": { + /// "fa_address": "0x7e51ad6e79cd113f5abe08f53ed6a3c2bfbf88561a24ae10b9e1e822e0623dfd" + /// } + /// } + /// ] + /// ``` + #[clap(long)] + currency_config_file: Option, } impl ServerArgs for OfflineArgs { @@ -192,6 +235,21 @@ impl ServerArgs for OfflineArgs { fn chain_id(&self) -> ChainId { self.chain_id } + + fn supported_currencies(&self) -> HashSet { + let mut supported_currencies = HashSet::new(); + supported_currencies.insert(native_coin()); + + if let Some(ref filepath) = self.currency_config_file { + let file = File::open(filepath).unwrap(); + let currencies: Vec = serde_json::from_reader(file).unwrap(); + currencies.into_iter().for_each(|item| { + supported_currencies.insert(item); + }); + } + + supported_currencies + } } #[derive(Debug, Parser)] @@ -218,6 +276,10 @@ impl ServerArgs for OnlineRemoteArgs { fn chain_id(&self) -> ChainId { self.offline_args.chain_id } + + fn supported_currencies(&self) -> HashSet { + self.offline_args.supported_currencies() + } } #[derive(Debug, Parser)] @@ -242,6 +304,10 @@ impl ServerArgs for OnlineLocalArgs { fn chain_id(&self) -> ChainId { self.online_args.offline_args.chain_id } + + fn supported_currencies(&self) -> HashSet { + self.online_args.offline_args.supported_currencies() + } } #[test] diff --git a/crates/aptos-rosetta/src/test/mod.rs b/crates/aptos-rosetta/src/test/mod.rs new file mode 100644 index 0000000000000..ac5c9428b095c --- /dev/null +++ b/crates/aptos-rosetta/src/test/mod.rs @@ -0,0 +1,466 @@ +// Copyright (c) Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use crate::{ + common::native_coin, + types::{ + Currency, CurrencyMetadata, FungibleAssetChangeEvent, ObjectCore, OperationType, + Transaction, FUNGIBLE_ASSET_MODULE, FUNGIBLE_STORE_RESOURCE, OBJECT_CORE_RESOURCE, + OBJECT_MODULE, OBJECT_RESOURCE_GROUP, + }, + RosettaContext, +}; +use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519Signature}, + HashValue, PrivateKey, Uniform, +}; +use aptos_rest_client::aptos_api_types::{ResourceGroup, TransactionOnChainData}; +use aptos_types::{ + account_config::fungible_store::FungibleStoreResource, + chain_id::ChainId, + contract_event::ContractEvent, + event::{EventHandle, EventKey}, + on_chain_config::CurrentTimeMicroseconds, + state_store::{state_key::StateKey, state_value::StateValueMetadata}, + test_helpers::transaction_test_helpers::get_test_raw_transaction, + transaction::{ExecutionStatus, TransactionInfo, TransactionInfoV0}, + write_set::{WriteOp, WriteSet, WriteSetMut}, +}; +use move_core_types::{ + account_address::AccountAddress, + ident_str, + language_storage::{StructTag, TypeTag}, +}; +use once_cell::sync::Lazy; +use serde::Serialize; +use std::{collections::HashSet, str::FromStr}; + +const APT_ADDRESS: AccountAddress = AccountAddress::TEN; +const OTHER_CURRENCY_ADDRESS: &str = "0x12341234123412341234123412341234"; +static OTHER_CURRENCY: Lazy = Lazy::new(|| Currency { + symbol: "FUN".to_string(), + decimals: 2, + metadata: Some(CurrencyMetadata { + move_type: None, + fa_address: Some(OTHER_CURRENCY_ADDRESS.to_string()), + }), +}); + +async fn test_rosetta_context() -> RosettaContext { + let mut currencies = HashSet::new(); + currencies.insert(OTHER_CURRENCY.clone()); + + RosettaContext::new(None, ChainId::test(), None, currencies).await +} + +fn test_transaction( + sender: AccountAddress, + version: u64, + changes: WriteSet, + events: Vec, +) -> TransactionOnChainData { + // generate random key + let private_key = Ed25519PrivateKey::generate_for_testing(); + + TransactionOnChainData { + version, + transaction: aptos_types::transaction::Transaction::UserTransaction( + aptos_types::transaction::SignedTransaction::new( + get_test_raw_transaction( + sender, + 0, // Sequence number doesn't matter for this + None, // TODO: payload + None, // Expiration timestamp + Some(101), // Gas unit price, specifically make it different than 100 to check calculations + None, // Max gas amount + ), + // Dummy keys and signatures + private_key.public_key(), + Ed25519Signature::dummy_signature(), + ), + ), + info: TransactionInfo::V0(TransactionInfoV0::new( + HashValue::random(), + HashValue::random(), + HashValue::random(), + None, + 178, // gas used, chosen arbitrarily + ExecutionStatus::Success, // TODO: Add other statuses + )), + events, + accumulator_root_hash: Default::default(), + changes, + } +} + +fn resource_group_modification_write_op( + address: &AccountAddress, + resource: &StructTag, + input: &T, +) -> (StateKey, WriteOp) { + let encoded = bcs::to_bytes(input).unwrap(); + let state_key = StateKey::resource_group(address, resource); + let write_op = WriteOp::Modification { + data: encoded.into(), + metadata: StateValueMetadata::new(0, 0, &CurrentTimeMicroseconds { microseconds: 0 }), + }; + (state_key, write_op) +} + +struct FaData { + fa_metadata_address: AccountAddress, + owner: AccountAddress, + store_address: AccountAddress, + previous_balance: u64, + deposit: bool, + amount: u64, +} + +impl FaData { + fn create_change(&self) -> (Vec<(StateKey, WriteOp)>, Vec) { + let object_core = ObjectCore { + guid_creation_num: 0, + owner: self.owner, + allow_ungated_transfer: false, + transfer_events: EventHandle::new(EventKey::new(42, self.owner), 22), + }; + + let (new_balance, contract_event) = if self.deposit { + let type_tag = TypeTag::Struct(Box::new(StructTag { + address: AccountAddress::ONE, + module: ident_str!(FUNGIBLE_ASSET_MODULE).into(), + name: ident_str!("Deposit").into(), + type_args: vec![], + })); + let event = FungibleAssetChangeEvent { + store: self.store_address, + amount: self.amount, + }; + ( + self.previous_balance + self.amount, + ContractEvent::new_v2(type_tag, bcs::to_bytes(&event).unwrap()), + ) + } else { + let event = FungibleAssetChangeEvent { + store: self.store_address, + amount: self.amount, + }; + let type_tag = TypeTag::Struct(Box::new(StructTag { + address: AccountAddress::ONE, + module: ident_str!(FUNGIBLE_ASSET_MODULE).into(), + name: ident_str!("Withdraw").into(), + type_args: vec![], + })); + + ( + self.previous_balance - self.amount, + ContractEvent::new_v2(type_tag, bcs::to_bytes(&event).unwrap()), + ) + }; + + let store = FungibleStoreResource::new(self.fa_metadata_address, new_balance, false); + let mut group = ResourceGroup::new(); + group.insert( + StructTag { + address: AccountAddress::ONE, + module: ident_str!(OBJECT_MODULE).into(), + name: ident_str!(OBJECT_CORE_RESOURCE).into(), + type_args: vec![], + }, + bcs::to_bytes(&object_core).unwrap(), + ); + group.insert( + StructTag { + address: AccountAddress::ONE, + module: ident_str!(FUNGIBLE_ASSET_MODULE).into(), + name: ident_str!(FUNGIBLE_STORE_RESOURCE).into(), + type_args: vec![], + }, + bcs::to_bytes(&store).unwrap(), + ); + + let write_ops = vec![ + // Update sender + resource_group_modification_write_op( + &self.store_address, + &StructTag { + address: AccountAddress::ONE, + module: ident_str!(OBJECT_MODULE).into(), + name: ident_str!(OBJECT_RESOURCE_GROUP).into(), + type_args: vec![], + }, + &group, + ), + ]; + + (write_ops, vec![contract_event]) + } +} + +fn mint_fa_output( + owner: AccountAddress, + fa_address: AccountAddress, + store_address: AccountAddress, + previous_balance: u64, + amount: u64, +) -> (WriteSet, Vec) { + let (minter_ops, minter_events) = FaData { + fa_metadata_address: fa_address, + owner, + store_address, + previous_balance, + deposit: true, + amount, + } + .create_change(); + + let write_set = WriteSetMut::new(minter_ops).freeze().unwrap(); + (write_set, minter_events) +} +fn transfer_fa_output( + owner: AccountAddress, + fa_address: AccountAddress, + store_address: AccountAddress, + previous_balance: u64, + dest: AccountAddress, + dest_store_address: AccountAddress, + dest_previous_balance: u64, + amount: u64, +) -> (WriteSet, Vec) { + let (mut sender_ops, mut sender_events) = FaData { + fa_metadata_address: fa_address, + owner, + store_address, + previous_balance, + deposit: false, + amount, + } + .create_change(); + + let (mut dest_ops, mut dest_events) = FaData { + fa_metadata_address: fa_address, + owner: dest, + store_address: dest_store_address, + previous_balance: dest_previous_balance, + deposit: true, + amount, + } + .create_change(); + sender_ops.append(&mut dest_ops); + sender_events.append(&mut dest_events); + let write_set = WriteSetMut::new(sender_ops).freeze().unwrap(); + (write_set, sender_events) +} + +#[tokio::test] +async fn test_fa_mint() { + let context = test_rosetta_context().await; + + let version = 0; + let amount = 100; + let sender = AccountAddress::random(); + let store_address = AccountAddress::random(); + let (mint_changes, mint_events) = mint_fa_output(sender, APT_ADDRESS, store_address, 0, amount); + let input = test_transaction(sender, version, mint_changes, mint_events); + + let result = Transaction::from_transaction(&context, input).await; + let expected_txn = result.expect("Must succeed"); + assert_eq!(2, expected_txn.operations.len()); + + // TODO: Check that reading is working correctly + let operation_1 = expected_txn.operations.first().unwrap(); + assert_eq!( + operation_1.operation_type, + OperationType::Deposit.to_string() + ); + assert_eq!( + operation_1.amount.as_ref().unwrap().value, + format!("{}", amount) + ); + assert_eq!( + operation_1 + .account + .as_ref() + .unwrap() + .account_address() + .unwrap(), + sender, + ); + let operation_2 = expected_txn.operations.get(1).unwrap(); + assert_eq!(operation_2.operation_type, OperationType::Fee.to_string()); + assert_eq!( + operation_2 + .account + .as_ref() + .unwrap() + .account_address() + .unwrap(), + sender, + ); + // TODO: Check fee +} + +#[tokio::test] +async fn test_fa_transfer() { + let context = test_rosetta_context().await; + + let version = 0; + let amount = 100; + let sender = AccountAddress::random(); + let receiver = AccountAddress::random(); + let store_address = AccountAddress::random(); + let receiver_store_address = AccountAddress::random(); + let (changes, events) = transfer_fa_output( + sender, + APT_ADDRESS, + store_address, + amount * 2, + receiver, + receiver_store_address, + 0, + amount, + ); + let input = test_transaction(sender, version, changes, events); + + let result = Transaction::from_transaction(&context, input).await; + let expected_txn = result.expect("Must succeed"); + assert_eq!(3, expected_txn.operations.len(), "Ops: {:#?}", expected_txn); + + // TODO: Check that reading is working correctly + // TODO: Do we want to order these? + let operation_1 = expected_txn.operations.first().unwrap(); + assert_eq!( + operation_1 + .account + .as_ref() + .unwrap() + .account_address() + .unwrap(), + sender + ); + assert_eq!( + operation_1.operation_type, + OperationType::Withdraw.to_string() + ); + assert_eq!( + operation_1.amount.as_ref().unwrap().value, + format!("-{}", amount) + ); + let operation_2 = expected_txn.operations.get(1).unwrap(); + assert_eq!( + operation_2.operation_type, + OperationType::Deposit.to_string() + ); + assert_eq!( + operation_2.amount.as_ref().unwrap().value, + format!("{}", amount) + ); + assert_eq!( + operation_2 + .account + .as_ref() + .unwrap() + .account_address() + .unwrap(), + receiver + ); + let operation_3 = expected_txn.operations.get(2).unwrap(); + assert_eq!(operation_3.operation_type, OperationType::Fee.to_string()); + assert_eq!( + operation_3 + .account + .as_ref() + .unwrap() + .account_address() + .unwrap(), + sender + ); + // TODO: Check fee +} + +#[tokio::test] +async fn test_fa_transfer_other_currency() { + let context = test_rosetta_context().await; + + let version = 0; + let amount = 100; + let sender = AccountAddress::random(); + let receiver = AccountAddress::random(); + let store_address = AccountAddress::random(); + let receiver_store_address = AccountAddress::random(); + let (changes, events) = transfer_fa_output( + sender, + AccountAddress::from_str(OTHER_CURRENCY_ADDRESS).unwrap(), + store_address, + amount * 2, + receiver, + receiver_store_address, + 0, + amount, + ); + let input = test_transaction(sender, version, changes, events); + + let result = Transaction::from_transaction(&context, input).await; + let expected_txn = result.expect("Must succeed"); + assert_eq!(3, expected_txn.operations.len(), "Ops: {:#?}", expected_txn); + + // TODO: Check that reading is working correctly + // TODO: Do we want to order these? + let operation_1 = expected_txn.operations.first().unwrap(); + assert_eq!( + operation_1 + .account + .as_ref() + .unwrap() + .account_address() + .unwrap(), + sender + ); + assert_eq!( + operation_1.operation_type, + OperationType::Withdraw.to_string() + ); + assert_eq!( + operation_1.amount.as_ref().unwrap().value, + format!("-{}", amount) + ); + assert_eq!( + operation_1.amount.as_ref().unwrap().currency, + OTHER_CURRENCY.to_owned() + ); + let operation_2 = expected_txn.operations.get(1).unwrap(); + assert_eq!( + operation_2.operation_type, + OperationType::Deposit.to_string() + ); + assert_eq!( + operation_2.amount.as_ref().unwrap().value, + format!("{}", amount) + ); + assert_eq!( + operation_2 + .account + .as_ref() + .unwrap() + .account_address() + .unwrap(), + receiver + ); + assert_eq!( + operation_2.amount.as_ref().unwrap().currency, + OTHER_CURRENCY.to_owned() + ); + let operation_3 = expected_txn.operations.get(2).unwrap(); + assert_eq!(operation_3.operation_type, OperationType::Fee.to_string()); + assert_eq!( + operation_3 + .account + .as_ref() + .unwrap() + .account_address() + .unwrap(), + sender + ); + assert_eq!(operation_3.amount.as_ref().unwrap().currency, native_coin()); + // TODO: Check fee +} diff --git a/crates/aptos-rosetta/src/types/move_types.rs b/crates/aptos-rosetta/src/types/move_types.rs index eff18d095fbb4..286296833ca46 100644 --- a/crates/aptos-rosetta/src/types/move_types.rs +++ b/crates/aptos-rosetta/src/types/move_types.rs @@ -16,6 +16,10 @@ pub const STAKING_PROXY_MODULE: &str = "staking_proxy"; pub const STAKING_CONTRACT_MODULE: &str = "staking_contract"; pub const VESTING_MODULE: &str = "vesting"; pub const DELEGATION_POOL_MODULE: &str = "delegation_pool"; +pub const OBJECT_MODULE: &str = "object"; +pub const PRIMARY_FUNGIBLE_STORE_MODULE: &str = "primary_fungible_store"; +pub const FUNGIBLE_ASSET_MODULE: &str = "fungible_asset"; +pub const DISPATCHABLE_FUNGIBLE_ASSET_MODULE: &str = "dispatchable_fungible_asset"; pub const ACCOUNT_RESOURCE: &str = "Account"; pub const APTOS_COIN_RESOURCE: &str = "AptosCoin"; @@ -24,13 +28,19 @@ pub const COIN_STORE_RESOURCE: &str = "CoinStore"; pub const STAKE_POOL_RESOURCE: &str = "StakePool"; pub const STAKING_CONTRACT_RESOURCE: &str = "StakingContract"; pub const STORE_RESOURCE: &str = "Store"; +pub const FUNGIBLE_STORE_RESOURCE: &str = "FungibleStore"; pub const STAKING_GROUP_UPDATE_COMMISSION_RESOURCE: &str = "StakingGroupUpdateCommissionEvent"; pub const VESTING_RESOURCE: &str = "Vesting"; pub const DELEGATION_POOL_RESOURCE: &str = "DelegationPool"; pub const WITHDRAW_STAKE_EVENT: &str = "WithdrawStakeEvent"; +pub const OBJECT_CORE_RESOURCE: &str = "ObjectCore"; + +pub const OBJECT_RESOURCE_GROUP: &str = "ObjectGroup"; pub const CREATE_ACCOUNT_FUNCTION: &str = "create_account"; pub const TRANSFER_FUNCTION: &str = "transfer"; +pub const TRANSFER_COINS_FUNCTION: &str = "transfer_coins"; +pub const BALANCE_FUNCTION: &str = "balance"; // Staking Contract pub const RESET_LOCKUP_FUNCTION: &str = "reset_lockup"; @@ -245,3 +255,17 @@ pub struct WithdrawUndelegatedEvent { pub delegator_address: AccountAddress, pub amount_withdrawn: u64, } + +#[derive(Debug, Serialize, Deserialize)] +pub struct FungibleAssetChangeEvent { + pub store: AccountAddress, + pub amount: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ObjectCore { + pub guid_creation_num: u64, + pub owner: AccountAddress, + pub allow_ungated_transfer: bool, + pub transfer_events: EventHandle, +} diff --git a/crates/aptos-rosetta/src/types/objects.rs b/crates/aptos-rosetta/src/types/objects.rs index f58c1fb50a795..1109fa900eb98 100644 --- a/crates/aptos-rosetta/src/types/objects.rs +++ b/crates/aptos-rosetta/src/types/objects.rs @@ -6,7 +6,7 @@ //! [Spec](https://www.rosetta-api.org/docs/api_objects.html) use crate::{ - common::{is_native_coin, native_coin, native_coin_tag}, + common::{find_coin_currency, find_fa_currency, native_coin}, construction::{ parse_create_stake_pool_operation, parse_delegation_pool_add_stake_operation, parse_delegation_pool_unlock_operation, parse_delegation_pool_withdraw_operation, @@ -25,11 +25,15 @@ use anyhow::anyhow; use aptos_cached_packages::aptos_stdlib; use aptos_crypto::{ed25519::Ed25519PublicKey, ValidCryptoMaterialStringExt}; use aptos_logger::warn; -use aptos_rest_client::aptos_api_types::{TransactionOnChainData, U64}; +use aptos_rest_client::aptos_api_types::{ResourceGroup, TransactionOnChainData, U64}; use aptos_types::{ + access_path::Path, account_address::AccountAddress, - account_config::{AccountResource, CoinStoreResourceUntyped, WithdrawEvent}, - contract_event::{ContractEvent, FEE_STATEMENT_EVENT_TYPE}, + account_config::{ + fungible_store::FungibleStoreResource, AccountResource, CoinStoreResourceUntyped, + WithdrawEvent, + }, + contract_event::{ContractEvent, ContractEventV2, FEE_STATEMENT_EVENT_TYPE}, event::EventKey, fee_statement::FeeStatement, stake_pool::{SetOperatorEvent, StakePool}, @@ -38,17 +42,27 @@ use aptos_types::{ write_set::{WriteOp, WriteSet}, }; use itertools::Itertools; -use move_core_types::language_storage::TypeTag; +use move_core_types::{ + ident_str, + language_storage::{ModuleId, StructTag, TypeTag}, + parser::parse_type_tag, +}; +use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; use std::{ cmp::Ordering, - collections::{BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, HashSet}, convert::TryFrom, fmt::{Display, Formatter}, hash::Hash, str::FromStr, }; +static WITHDRAW_TYPE_TAG: Lazy = + Lazy::new(|| parse_type_tag("0x1::fungible_asset::Withdraw").unwrap()); +static DEPOSIT_TYPE_TAG: Lazy = + Lazy::new(|| parse_type_tag("0x1::fungible_asset::Deposit").unwrap()); + /// A description of all types used by the Rosetta implementation. /// /// This is used to verify correctness of the implementation and to check things like @@ -148,10 +162,17 @@ pub struct Currency { #[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] pub struct CurrencyMetadata { - pub move_type: String, + /// Move coin type e.g. 0x1::aptos_coin::AptosCoin + #[serde(skip_serializing_if = "Option::is_none")] + pub move_type: Option, + /// Fungible Asset Address e.g. 0xA + #[serde(skip_serializing_if = "Option::is_none")] + pub fa_address: Option, } -/// Various signing curves supported by Rosetta. We only use [`CurveType::Edwards25519`] +/// Various signing curves supported by Rosetta. +/// +/// We only use [`CurveType::Edwards25519`] /// [API Spec](https://www.rosetta-api.org/docs/models/CurveType.html) #[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)] #[serde(rename_all = "snake_case")] @@ -904,18 +925,37 @@ impl Transaction { let mut operations = vec![]; let mut operation_index: u64 = 0; if successful { - // Parse all operations from the writeset changes in a success + let mut object_to_owner = HashMap::new(); + let mut store_to_currency = HashMap::new(); + let mut framework_changes = vec![]; + // Not the most efficient, parse all store owners, and assets associated with stores for (state_key, write_op) in &txn.changes { - let mut ops = parse_operations_from_write_set( + let new_changes = preprocess_write_set( server_context, state_key, write_op, - &events, - maybe_user_txn.map(|inner| inner.sender()), maybe_user_txn.map(|inner| inner.payload()), txn.version, + &mut object_to_owner, + &mut store_to_currency, + ); + framework_changes.extend(new_changes); + } + + // Parse all operations from the writeset changes in a success + for (struct_tag, account_address, data) in &framework_changes { + let mut ops = parse_operations_from_write_set( + server_context, + struct_tag, + *account_address, + data, + &events, // TODO: Filter events down to framework events only + maybe_user_txn.map(|inner| inner.sender()), + txn.version, operation_index, - &txn.changes, + &txn.changes, // TODO: Move to parsed framework_changes + &mut object_to_owner, + &mut store_to_currency, ) .await?; operation_index += ops.len() as u64; @@ -940,6 +980,7 @@ impl Transaction { // Parse all failed operations from the payload if let Some(user_txn) = maybe_user_txn { let mut ops = parse_failed_operations_from_txn_payload( + &server_context.currencies, operation_index, user_txn.sender(), user_txn.payload(), @@ -986,6 +1027,7 @@ impl Transaction { /// This case only occurs if the transaction failed, and that's because it's less accurate /// than just following the state changes fn parse_failed_operations_from_txn_payload( + currencies: &HashSet, operation_index: u64, sender: AccountAddress, payload: &TransactionPayload, @@ -997,16 +1039,17 @@ fn parse_failed_operations_from_txn_payload( inner.module().name().as_str(), inner.function().as_str(), ) { - (AccountAddress::ONE, COIN_MODULE, TRANSFER_FUNCTION) => { - // Only put the transfer in if we can understand the currency + (AccountAddress::ONE, COIN_MODULE, TRANSFER_FUNCTION) + | (AccountAddress::ONE, APTOS_ACCOUNT_MODULE, TRANSFER_COINS_FUNCTION) => { + // We could add a create here as well on transfer_coins, but we don't know if it will actually happen if let Some(type_tag) = inner.ty_args().first() { - // We don't want to do lookups on failures for currencies that don't exist, - // so we only look up cached info not new info - // TODO: If other coins are supported, this will need to be updated to handle more coins - if type_tag == &native_coin_tag() { - operations = parse_transfer_from_txn_payload( + // Find currency from type tag + let maybe_currency = find_coin_currency(currencies, type_tag); + + if let Some(currency) = maybe_currency { + operations = parse_coin_transfer_from_txn_payload( inner, - native_coin(), + currency.clone(), sender, operation_index, ) @@ -1015,8 +1058,35 @@ fn parse_failed_operations_from_txn_payload( }, (AccountAddress::ONE, APTOS_ACCOUNT_MODULE, TRANSFER_FUNCTION) => { // We could add a create here as well, but we don't know if it will actually happen - operations = - parse_transfer_from_txn_payload(inner, native_coin(), sender, operation_index) + operations = parse_coin_transfer_from_txn_payload( + inner, + native_coin(), + sender, + operation_index, + ) + }, + (AccountAddress::ONE, PRIMARY_FUNGIBLE_STORE_MODULE, TRANSFER_FUNCTION) => { + // Primary transfer has the same interface as coin transfer, but it's a metadata address instead of a coin type generic + let maybe_metadata_address = inner + .args() + .first() + .map(|encoded| bcs::from_bytes::(encoded)); + if let Some(Ok(addr)) = maybe_metadata_address { + // Find currency from type tag + let maybe_currency = find_fa_currency(currencies, addr); + + if let Some(currency) = maybe_currency { + operations = parse_primary_fa_transfer_from_txn_payload( + inner, + currency.clone(), + sender, + operation_index, + ) + } + } + }, + (AccountAddress::ONE, DISPATCHABLE_FUNGIBLE_ASSET_MODULE, TRANSFER_FUNCTION) => { + // TODO: This isn't really easy to handle atm, objects get messy, need owners etc. }, (AccountAddress::ONE, ACCOUNT_MODULE, CREATE_ACCOUNT_FUNCTION) => { if let Some(Ok(address)) = inner @@ -1159,20 +1229,62 @@ fn parse_failed_operations_from_txn_payload( } /// Parses a 0x1::coin::transfer to a Withdraw and Deposit -fn parse_transfer_from_txn_payload( +fn parse_coin_transfer_from_txn_payload( payload: &EntryFunction, currency: Currency, sender: AccountAddress, operation_index: u64, ) -> Vec { - let mut operations = vec![]; - let args = payload.args(); let maybe_receiver = args .first() .map(|encoded| bcs::from_bytes::(encoded)); let maybe_amount = args.get(1).map(|encoded| bcs::from_bytes::(encoded)); + build_transfer_operations( + payload, + operation_index, + sender, + maybe_receiver, + maybe_amount, + currency, + ) +} + +/// Parses a 0x1::primary_fungible_store::transfer to a Withdraw and Deposit +fn parse_primary_fa_transfer_from_txn_payload( + payload: &EntryFunction, + currency: Currency, + sender: AccountAddress, + operation_index: u64, +) -> Vec { + let args = payload.args(); + let maybe_receiver = args + .get(1) + .map(|encoded| bcs::from_bytes::(encoded)); + let maybe_amount = args.get(2).map(|encoded| bcs::from_bytes::(encoded)); + + build_transfer_operations( + payload, + operation_index, + sender, + maybe_receiver, + maybe_amount, + currency, + ) +} + +/// Builds operations for a coin or FA transfer +fn build_transfer_operations( + payload: &EntryFunction, + operation_index: u64, + sender: AccountAddress, + maybe_receiver: Option>, + maybe_amount: Option>, + currency: Currency, +) -> Vec { + let mut operations = vec![]; + if let (Some(Ok(receiver)), Some(Ok(amount))) = (maybe_receiver, maybe_amount) { operations.push(Operation::withdraw( operation_index, @@ -1204,35 +1316,17 @@ fn parse_transfer_from_txn_payload( /// It is more accurate because untracked scripts are included in balance operations async fn parse_operations_from_write_set( server_context: &RosettaContext, - state_key: &StateKey, - write_op: &WriteOp, + struct_tag: &StructTag, + address: AccountAddress, + data: &[u8], events: &[ContractEvent], maybe_sender: Option, - _maybe_payload: Option<&TransactionPayload>, version: u64, operation_index: u64, changes: &WriteSet, + object_to_owner: &mut HashMap, + store_to_currency: &mut HashMap, ) -> ApiResult> { - let (struct_tag, address) = match state_key.inner() { - StateKeyInner::AccessPath(path) => { - if let Some(struct_tag) = path.get_struct_tag() { - (struct_tag, path.address) - } else { - return Ok(vec![]); - } - }, - _ => { - // Ignore all but access path - return Ok(vec![]); - }, - }; - - let bytes = match write_op.bytes() { - Some(bytes) => bytes, - None => return Ok(vec![]), - }; - let data = &bytes; - // Determine operation match ( struct_tag.address, @@ -1240,6 +1334,7 @@ async fn parse_operations_from_write_set( struct_tag.name.as_str(), struct_tag.type_args.len(), ) { + // TODO: Handle object transfer for transfer of fungible asset stores (AccountAddress::ONE, ACCOUNT_MODULE, ACCOUNT_RESOURCE, 0) => { parse_account_resource_changes(version, address, data, maybe_sender, operation_index) }, @@ -1269,17 +1364,18 @@ async fn parse_operations_from_write_set( }, (AccountAddress::ONE, COIN_MODULE, COIN_STORE_RESOURCE, 1) => { if let Some(type_tag) = struct_tag.type_args.first() { - // TODO: This will need to be updated to support more coins - if type_tag == &native_coin_tag() { + // Find the currency and parse it accordingly + let maybe_currency = find_coin_currency(&server_context.currencies, type_tag); + + if let Some(currency) = maybe_currency { parse_coinstore_changes( - native_coin(), + currency.clone(), version, address, data, events, operation_index, ) - .await } else { Ok(vec![]) } @@ -1291,6 +1387,15 @@ async fn parse_operations_from_write_set( Ok(vec![]) } }, + (AccountAddress::ONE, FUNGIBLE_ASSET_MODULE, FUNGIBLE_STORE_RESOURCE, 0) => { + parse_fungible_store_changes( + object_to_owner, + store_to_currency, + address, + events, + operation_index, + ) + }, _ => { // Any unknown type will just skip the operations Ok(vec![]) @@ -1298,6 +1403,114 @@ async fn parse_operations_from_write_set( } } +fn parse_write_set<'a>( + state_key: &'a StateKey, + write_op: &'a WriteOp, +) -> Option<(StructTag, AccountAddress, &'a [u8])> { + let (struct_tag, address) = match state_key.inner() { + StateKeyInner::AccessPath(path) => match path.get_path() { + Path::Resource(struct_tag) => (struct_tag, path.address), + Path::ResourceGroup(group_tag) => (group_tag, path.address), + _ => return None, + }, + _ => { + // Ignore all but access path + return None; + }, + }; + + let bytes = match write_op.bytes() { + Some(bytes) => bytes, + None => return None, + }; + + Some((struct_tag, address, bytes)) +} + +fn preprocess_write_set<'a>( + server_context: &RosettaContext, + state_key: &'a StateKey, + write_op: &'a WriteOp, + _maybe_payload: Option<&TransactionPayload>, + version: u64, + object_to_owner: &mut HashMap, + store_to_currency: &mut HashMap, +) -> Vec<(StructTag, AccountAddress, Vec)> { + let write_set_data = parse_write_set(state_key, write_op); + if write_set_data.is_none() { + return vec![]; + } + let (struct_tag, address, data) = write_set_data.unwrap(); + + // Determine owners of stores, and metadata addresses for stores + let mut resources = vec![]; + match ( + struct_tag.address, + struct_tag.module.as_str(), + struct_tag.name.as_str(), + ) { + (AccountAddress::ONE, OBJECT_MODULE, OBJECT_RESOURCE_GROUP) => { + // Parse the underlying resources in the group + let maybe_resource_group = bcs::from_bytes::(data); + let resource_group = match maybe_resource_group { + Ok(resource_group) => resource_group, + Err(err) => { + warn!( + "Failed to parse object resource group in version {}: {:#}", + version, err + ); + return vec![]; + }, + }; + + for (struct_tag, bytes) in resource_group.iter() { + match ( + struct_tag.address, + struct_tag.module.as_str(), + struct_tag.name.as_str(), + ) { + (AccountAddress::ONE, OBJECT_MODULE, OBJECT_CORE_RESOURCE) => { + parse_object_owner(address, bytes, object_to_owner); + }, + (AccountAddress::ONE, FUNGIBLE_ASSET_MODULE, FUNGIBLE_STORE_RESOURCE) => { + parse_fungible_store_metadata( + &server_context.currencies, + version, + address, + bytes, + store_to_currency, + ); + }, + _ => {}, + } + + // Filter out transactions that are not framework + if struct_tag.address == AccountAddress::ONE { + resources.push((struct_tag.clone(), address, bytes.clone())); + } + } + }, + (AccountAddress::ONE, ..) => { + // Filter out transactions that are not framework + // TODO: maybe be more strict on what we filter + resources.push((struct_tag.clone(), address, data.to_vec())); + }, + _ => {}, + } + + resources +} + +fn parse_object_owner( + object_address: AccountAddress, + data: &[u8], + object_to_owner: &mut HashMap, +) { + if let Ok(object_core) = bcs::from_bytes::(data) { + object_to_owner.insert(object_address, object_core.owner); + } +} + /// Parses any account resource changes, in this case only create account is supported fn parse_account_resource_changes( version: u64, @@ -1785,7 +1998,7 @@ async fn parse_delegation_pool_resource_changes( } /// Parses coin store direct changes, for withdraws and deposits -async fn parse_coinstore_changes( +fn parse_coinstore_changes( currency: Currency, version: u64, address: AccountAddress, @@ -1835,6 +2048,91 @@ async fn parse_coinstore_changes( Ok(operations) } +fn parse_fungible_store_metadata( + currencies: &HashSet, + version: u64, + address: AccountAddress, + data: &[u8], + store_to_currency: &mut HashMap, +) { + let fungible_store: FungibleStoreResource = if let Ok(fungible_store) = bcs::from_bytes(data) { + fungible_store + } else { + warn!( + "Fungible store failed to parse for address {} at version {} : {}", + address, + version, + hex::encode(data) + ); + return; + }; + + let metadata_address = fungible_store.metadata(); + let maybe_currency = find_fa_currency(currencies, metadata_address); + if let Some(currency) = maybe_currency { + store_to_currency.insert(address, currency); + } +} + +/// Parses fungible store direct changes, for withdraws and deposits +/// +/// Note that, we don't know until we introspect the change, which fa it is +fn parse_fungible_store_changes( + object_to_owner: &HashMap, + store_to_currency: &HashMap, + address: AccountAddress, + events: &[ContractEvent], + mut operation_index: u64, +) -> ApiResult> { + let mut operations = vec![]; + + // Find the fungible asset currency association + let maybe_currency = store_to_currency.get(&address); + if maybe_currency.is_none() { + return Ok(operations); + } + let currency = maybe_currency.unwrap(); + + // If there's a currency, let's fill in operations + // If we don't have an owner here, there's missing data on the writeset + let maybe_owner = object_to_owner.get(&address); + if maybe_owner.is_none() { + warn!( + "First pass did not catch owner for fungible store \"{}\", returning no operations", + address + ); + return Ok(operations); + } + + let owner = maybe_owner.copied().unwrap(); + + let withdraw_amounts = get_amount_from_fa_event(events, &WITHDRAW_TYPE_TAG, address); + for amount in withdraw_amounts { + operations.push(Operation::withdraw( + operation_index, + Some(OperationStatusType::Success), + AccountIdentifier::base_account(owner), + currency.clone(), + amount, + )); + operation_index += 1; + } + + let deposit_amounts = get_amount_from_fa_event(events, &DEPOSIT_TYPE_TAG, address); + for amount in deposit_amounts { + operations.push(Operation::deposit( + operation_index, + Some(OperationStatusType::Success), + AccountIdentifier::base_account(owner), + currency.clone(), + amount, + )); + operation_index += 1; + } + + Ok(operations) +} + /// Pulls the balance change from a withdraw or deposit event fn get_amount_from_event(events: &[ContractEvent], event_key: &EventKey) -> Vec { filter_events(events, event_key, |event_key, event| { @@ -1852,6 +2150,27 @@ fn get_amount_from_event(events: &[ContractEvent], event_key: &EventKey) -> Vec< }) } +/// Pulls the balance change from a withdraw or deposit event +fn get_amount_from_fa_event( + events: &[ContractEvent], + type_tag: &TypeTag, + store_address: AccountAddress, +) -> Vec { + filter_v2_events(type_tag, events, |event| { + if let Ok(event) = bcs::from_bytes::(event.event_data()) { + if event.store == store_address { + Some(event.amount) + } else { + None + } + } else { + // If we can't parse the withdraw event, then there's nothing + warn!("Failed to parse fungible store event! Skipping"); + None + } + }) +} + /// Filter v2 FeeStatement events with non-zero storage_fee_refund fn get_fee_statement_from_event(events: &[ContractEvent]) -> Vec { events @@ -1886,6 +2205,20 @@ fn filter_events Option, T>( .collect() } +fn filter_v2_events Option, T>( + event_type: &TypeTag, + events: &[ContractEvent], + parser: F, +) -> Vec { + events + .iter() + .filter(|event| event.is_v2()) + .map(|event| event.v2().unwrap()) + .filter(|event| event_type == event.type_tag()) + .filter_map(parser) + .collect() +} + /// An enum for processing which operation is in a transaction pub enum OperationDetails { CreateAccount, @@ -1917,7 +2250,10 @@ pub enum InternalOperation { impl InternalOperation { /// Pulls the [`InternalOperation`] from the set of [`Operation`] /// TODO: this needs to be broken up - pub fn extract(operations: &Vec) -> ApiResult { + pub fn extract( + server_context: &RosettaContext, + operations: &Vec, + ) -> ApiResult { match operations.len() { // Single operation actions 1 => { @@ -2165,7 +2501,10 @@ impl InternalOperation { )))) }, // Double operation actions (only coin transfer) - 2 => Ok(Self::Transfer(Transfer::extract_transfer(operations)?)), + 2 => Ok(Self::Transfer(Transfer::extract_transfer( + server_context, + operations, + )?)), // Anything else is not expected _ => Err(ApiError::InvalidOperations(Some(format!( "Unrecognized operation combination {:?}", @@ -2201,11 +2540,73 @@ impl InternalOperation { create_account.sender, ), InternalOperation::Transfer(transfer) => { - is_native_coin(&transfer.currency)?; - ( - aptos_stdlib::aptos_account_transfer(transfer.receiver, transfer.amount.0), - transfer.sender, - ) + // Check if the currency is known + let currency = &transfer.currency; + + // We special case APT, because we don't want the behavior to change + if currency == &native_coin() { + return Ok(( + aptos_stdlib::aptos_account_transfer(transfer.receiver, transfer.amount.0), + transfer.sender, + )); + } + + // For all other coins and FAs we need to handle them accordingly + if let Some(ref metadata) = currency.metadata { + match (&metadata.move_type, &metadata.fa_address) { + // For currencies with the coin type, we will always use the coin functionality, even if migrated + (Some(coin_type), Some(_)) | (Some(coin_type), None) => { + let coin_type_tag = parse_type_tag(coin_type) + .map_err(|err| ApiError::InvalidInput(Some(err.to_string())))?; + ( + aptos_stdlib::aptos_account_transfer_coins( + coin_type_tag, + transfer.receiver, + transfer.amount.0, + ), + transfer.sender, + ) + }, + // For FA only currencies, we use the FA functionality + (None, Some(fa_address_str)) => { + let fa_address = AccountAddress::from_str(fa_address_str)?; + + ( + TransactionPayload::EntryFunction(EntryFunction::new( + ModuleId::new( + AccountAddress::ONE, + ident_str!("primary_fungible_store").to_owned(), + ), + ident_str!("transfer").to_owned(), + vec![TypeTag::Struct(Box::new(StructTag { + address: AccountAddress::ONE, + module: ident_str!(OBJECT_MODULE).into(), + name: ident_str!(OBJECT_CORE_RESOURCE).into(), + type_args: vec![], + }))], + vec![ + bcs::to_bytes(&fa_address).unwrap(), + bcs::to_bytes(&transfer.receiver).unwrap(), + bcs::to_bytes(&transfer.amount.0).unwrap(), + ], + )), + transfer.sender, + ) + }, + _ => { + return Err(ApiError::InvalidInput(Some(format!( + "{} does not have a move type provided", + currency.symbol + )))) + }, + } + } else { + // This should never happen unless the server's currency list is improperly set + return Err(ApiError::InvalidInput(Some(format!( + "{} does not have a currency information provided", + currency.symbol + )))); + } }, InternalOperation::SetOperator(set_operator) => { if set_operator.old_operator.is_none() { @@ -2312,7 +2713,10 @@ pub struct Transfer { } impl Transfer { - pub fn extract_transfer(operations: &Vec) -> ApiResult { + pub fn extract_transfer( + server_context: &RosettaContext, + operations: &Vec, + ) -> ApiResult { // Only support 1:1 P2P transfer // This is composed of a Deposit and a Withdraw operation if operations.len() != 2 { @@ -2384,8 +2788,14 @@ impl Transfer { } // Check that the currency is supported - // TODO: in future use currency, since there's more than just 1 - is_native_coin(&withdraw_amount.currency)?; + if !server_context + .currencies + .contains(&withdraw_amount.currency) + { + return Err(ApiError::UnsupportedCurrency(Some( + withdraw_amount.currency.symbol.clone(), + ))); + } let withdraw_value = i128::from_str(&withdraw_amount.value) .map_err(|_| ApiError::InvalidTransferOperations(Some("Withdraw amount is invalid")))?; diff --git a/testsuite/smoke-test/src/rosetta.rs b/testsuite/smoke-test/src/rosetta.rs index 8d99e1199454e..9facc3c80b4c8 100644 --- a/testsuite/smoke-test/src/rosetta.rs +++ b/testsuite/smoke-test/src/rosetta.rs @@ -113,6 +113,7 @@ async fn setup_test( Some(aptos_rest_client::Client::new( validator.rest_api_endpoint(), )), + HashSet::new(), ) .await .unwrap(); @@ -2143,6 +2144,7 @@ async fn transfer_and_wait( sequence_number, max_gas, gas_unit_price, + native_coin(), ) }, ) From a6ad9d33679aaee49b9b2137365818023e4469da Mon Sep 17 00:00:00 2001 From: "Andrea Cappa (zi0Black)" <13380579+zi0Black@users.noreply.github.com> Date: Fri, 11 Oct 2024 16:23:36 +0200 Subject: [PATCH 09/22] Authenticator Fuzzer (#14869) Co-authored-by: Gerardo Di Giacomo --- Cargo.lock | 11 +- crates/aptos-crypto/Cargo.toml | 4 +- .../aptos-crypto/src/ed25519/ed25519_keys.rs | 9 + .../aptos-crypto/src/ed25519/ed25519_sigs.rs | 9 + .../secp256r1_ecdsa/secp256r1_ecdsa_keys.rs | 8 + .../secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs | 13 + testsuite/fuzzer/fuzz.sh | 62 ++- testsuite/fuzzer/fuzz/Cargo.toml | 13 +- .../move/aptosvm_authenticators.rs | 372 ++++++++++++++++++ .../move/aptosvm_publish_and_run.rs | 10 +- .../fuzzer/fuzz/fuzz_targets/move/utils.rs | 160 ++++++-- types/Cargo.toml | 3 +- types/src/keyless/bn254_circom.rs | 2 + types/src/keyless/groth16_sig.rs | 2 + types/src/keyless/mod.rs | 6 + types/src/keyless/openid_sig.rs | 1 + types/src/keyless/zkp_sig.rs | 1 + types/src/transaction/authenticator.rs | 2 + types/src/transaction/webauthn.rs | 18 + 19 files changed, 670 insertions(+), 36 deletions(-) create mode 100644 testsuite/fuzzer/fuzz/fuzz_targets/move/aptosvm_authenticators.rs diff --git a/Cargo.lock b/Cargo.lock index e6096f45511dc..4ab17dbdbd8d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1009,6 +1009,7 @@ dependencies = [ "aes-gcm", "anyhow", "aptos-crypto-derive", + "arbitrary", "ark-bls12-381", "ark-bn254", "ark-ec", @@ -4287,6 +4288,7 @@ dependencies = [ "aptos-experimental-runtimes", "aptos-infallible", "aptos-proptest-helpers", + "arbitrary", "ark-bn254", "ark-crypto-primitives", "ark-ec", @@ -8411,6 +8413,7 @@ name = "fuzzer-fuzz" version = "0.0.0" dependencies = [ "aptos-cached-packages", + "aptos-crypto", "aptos-framework", "aptos-language-e2e-tests", "aptos-types", @@ -8424,6 +8427,8 @@ dependencies = [ "move-vm-types", "once_cell", "rayon", + "serde", + "serde_json", ] [[package]] @@ -15851,9 +15856,9 @@ dependencies = [ [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" [[package]] name = "strum_macros" @@ -16698,7 +16703,7 @@ dependencies = [ "prost 0.12.3", "rustls-native-certs 0.7.0", "rustls-pemfile 2.1.1", - "strum 0.26.2", + "strum 0.26.3", "strum_macros 0.26.4", "tokio", "tokio-rustls 0.26.0", diff --git a/crates/aptos-crypto/Cargo.toml b/crates/aptos-crypto/Cargo.toml index eacada812fa56..8a0822a6b4bfa 100644 --- a/crates/aptos-crypto/Cargo.toml +++ b/crates/aptos-crypto/Cargo.toml @@ -16,6 +16,7 @@ rust-version = { workspace = true } aes-gcm = { workspace = true } anyhow = { workspace = true } aptos-crypto-derive = { workspace = true } +arbitrary = { workspace = true, features = ["derive"], optional = true } ark-bn254 = { workspace = true } ark-ec = { workspace = true } ark-ff = { workspace = true } @@ -61,6 +62,7 @@ typenum = { workspace = true } x25519-dalek = { workspace = true } [dev-dependencies] +arbitrary = { workspace = true, features = ["derive"] } ark-bls12-381 = { workspace = true } ark-bn254 = { workspace = true } ark-serialize = { workspace = true } @@ -79,7 +81,7 @@ trybuild = { workspace = true } default = [] assert-private-keys-not-cloneable = [] cloneable-private-keys = [] -fuzzing = ["proptest", "proptest-derive", "cloneable-private-keys"] +fuzzing = ["proptest", "proptest-derive", "cloneable-private-keys", "arbitrary"] testing = [] [[bench]] diff --git a/crates/aptos-crypto/src/ed25519/ed25519_keys.rs b/crates/aptos-crypto/src/ed25519/ed25519_keys.rs index b1f578b0a3e23..4914e1448252f 100644 --- a/crates/aptos-crypto/src/ed25519/ed25519_keys.rs +++ b/crates/aptos-crypto/src/ed25519/ed25519_keys.rs @@ -38,6 +38,15 @@ impl Clone for Ed25519PrivateKey { #[derive(DeserializeKey, Clone, SerializeKey)] pub struct Ed25519PublicKey(pub(crate) ed25519_dalek::PublicKey); +#[cfg(any(test, feature = "fuzzing"))] +impl<'a> arbitrary::Arbitrary<'a> for Ed25519PublicKey { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let bytes: [u8; ED25519_PUBLIC_KEY_LENGTH] = u.arbitrary()?; + Ed25519PublicKey::from_bytes_unchecked(&bytes) + .map_err(|_| arbitrary::Error::IncorrectFormat) + } +} + impl Ed25519PrivateKey { /// The length of the Ed25519PrivateKey pub const LENGTH: usize = ED25519_PRIVATE_KEY_LENGTH; diff --git a/crates/aptos-crypto/src/ed25519/ed25519_sigs.rs b/crates/aptos-crypto/src/ed25519/ed25519_sigs.rs index 4c1d9438c83a0..c13b8483b7f97 100644 --- a/crates/aptos-crypto/src/ed25519/ed25519_sigs.rs +++ b/crates/aptos-crypto/src/ed25519/ed25519_sigs.rs @@ -18,6 +18,15 @@ use std::{cmp::Ordering, fmt}; #[derive(DeserializeKey, Clone, SerializeKey)] pub struct Ed25519Signature(pub(crate) ed25519_dalek::Signature); +#[cfg(any(test, feature = "fuzzing"))] +impl<'a> arbitrary::Arbitrary<'a> for Ed25519Signature { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let bytes: [u8; ED25519_SIGNATURE_LENGTH] = u.arbitrary()?; + Ed25519Signature::from_bytes_unchecked(&bytes) + .map_err(|_| arbitrary::Error::IncorrectFormat) + } +} + impl Ed25519Signature { /// The length of the Ed25519Signature pub const LENGTH: usize = ED25519_SIGNATURE_LENGTH; diff --git a/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_keys.rs b/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_keys.rs index e2f32013245a0..d48cd8a2a3a9c 100644 --- a/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_keys.rs +++ b/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_keys.rs @@ -41,6 +41,14 @@ impl Clone for PrivateKey { #[key_name("Secp256r1EcdsaPublicKey")] pub struct PublicKey(pub(crate) p256::ecdsa::VerifyingKey); +#[cfg(any(test, feature = "fuzzing"))] +impl<'a> arbitrary::Arbitrary<'a> for PublicKey { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + let bytes: [u8; PUBLIC_KEY_LENGTH] = u.arbitrary()?; + PublicKey::from_bytes_unchecked(&bytes).map_err(|_| arbitrary::Error::IncorrectFormat) + } +} + impl PrivateKey { /// The length of the PrivateKey pub const LENGTH: usize = PRIVATE_KEY_LENGTH; diff --git a/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs b/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs index 28cfb76105170..aac404aaaba40 100644 --- a/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs +++ b/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs @@ -35,6 +35,7 @@ impl Signature { /// Deserialize an P256Signature, without checking for malleability /// Uses the SEC1 serialization format. + #[cfg(not(feature = "fuzzing"))] pub(crate) fn from_bytes_unchecked( bytes: &[u8], ) -> std::result::Result { @@ -44,6 +45,18 @@ impl Signature { } } + /// Deserialize an P256Signature, without checking for malleability + /// Uses the SEC1 serialization format. + #[cfg(any(test, feature = "fuzzing"))] + pub fn from_bytes_unchecked( + bytes: &[u8], + ) -> std::result::Result { + match p256::ecdsa::Signature::try_from(bytes) { + Ok(p256_signature) => Ok(Signature(p256_signature)), + Err(_) => Err(CryptoMaterialError::DeserializationError), + } + } + /// return an all-zero signature (for test only) #[cfg(any(test, feature = "fuzzing"))] pub fn dummy_signature() -> Self { diff --git a/testsuite/fuzzer/fuzz.sh b/testsuite/fuzzer/fuzz.sh index a932286fc6a70..bdf98b9275816 100755 --- a/testsuite/fuzzer/fuzz.sh +++ b/testsuite/fuzzer/fuzz.sh @@ -39,6 +39,12 @@ function usage() { "build-oss-fuzz") echo "Usage: $0 build-oss-fuzz " ;; + "coverage") + echo "Usage: $0 coverage " + ;; + "clean-coverage") + echo "Usage: $0 clean-coverage " + ;; "debug") echo "Usage: $0 debug " ;; @@ -55,10 +61,11 @@ function usage() { echo "Usage: $0 test" ;; *) - echo "Usage: $0 " + echo "Usage: $0 " echo " add adds a new fuzz target" echo " build builds fuzz targets" echo " build-oss-fuzz builds fuzz targets for oss-fuzz" + echo " coverage generates coverage for a fuzz target" echo " debug debugs a fuzz target with a testcase" echo " flamegraph generates a flamegraph for a fuzz target with a testcase" echo " list lists existing fuzz targets" @@ -125,6 +132,49 @@ function build-oss-fuzz() { done } +function coverage() { + if [ -z "$1" ]; then + usage coverage + fi + fuzz_target=$1 + local corpus_dir="fuzz/corpus/$fuzz_target" + local coverage_dir="./fuzz/coverage/$fuzz_target/report" + mkdir -p $coverage_dir + + if [ ! -d "fuzz/coverage/$fuzz_target" ]; then + cargo_fuzz coverage $fuzz_target $corpus_dir + fi + + info "Generating coverage for $fuzz_target" + + fuzz_target_bin=$(find ./target -name $fuzz_target -type f -perm /111) #$(find target/*/coverage -name $fuzz_target -type f) + echo "Found fuzz target binary: $fuzz_target_bin" + # Generate the coverage report + cargo +nightly cov -- show $fuzz_target_bin \ + --format=html \ + --instr-profile=fuzz/coverage/$fuzz_target/coverage.profdata \ + --show-directory-coverage \ + --output-dir=$coverage_dir \ + -Xdemangler=rustfilt \ + --show-branches=count \ + --ignore-filename-regex='rustc/.*/library|\.cargo' +} + +function clean-coverage() { + if [ "$#" -ne 1 ]; then + usage clean + fi + + local fuzz_target="$1" + local target_dir="coverage/$fuzz_target" + + if [ "$fuzz_target" == "all" ]; then + rm -rf coverage + else + rm -rf $target_dir + fi +} + # use rust-gdb to debug a fuzz target with a testcase function debug() { if [ -z "$2" ]; then @@ -182,7 +232,7 @@ function run() { fi fi info "Running $fuzz_target" - cargo_fuzz run --sanitizer none $fuzz_target $testcase + cargo_fuzz run --sanitizer none -O $fuzz_target $testcase -- -fork=10 } function test() { @@ -247,6 +297,14 @@ case "$1" in shift build-oss-fuzz "$@" ;; + "coverage") + shift + coverage "$@" + ;; + "clean-coverage") + shift + clean-coverage "$@" + ;; "debug") shift debug "$@" diff --git a/testsuite/fuzzer/fuzz/Cargo.toml b/testsuite/fuzzer/fuzz/Cargo.toml index e5054199095f9..bc787a7f08988 100644 --- a/testsuite/fuzzer/fuzz/Cargo.toml +++ b/testsuite/fuzzer/fuzz/Cargo.toml @@ -9,11 +9,12 @@ cargo-fuzz = true [dependencies] aptos-cached-packages = { workspace = true } +aptos-crypto = { workspace = true } aptos-framework = { workspace = true } aptos-language-e2e-tests = { workspace = true, features = ["fuzzing"] } -aptos-types = { workspace = true } +aptos-types = { workspace = true, features = ["fuzzing"] } aptos-vm = { workspace = true } -arbitrary = "1.3.2" +arbitrary = { workspace = true, features = ["derive"] } bcs = { workspace = true } libfuzzer-sys = "0.4" move-binary-format = { workspace = true, features = ["fuzzing"] } @@ -22,6 +23,8 @@ move-core-types = { workspace = true, features = ["fuzzing"] } move-vm-types = { workspace = true, features = ["fuzzing"] } once_cell = { workspace = true } rayon = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } [features] disabled = [] @@ -77,3 +80,9 @@ name = "move_aptosvm_publish" path = "fuzz_targets/move/aptosvm_publish.rs" test = false doc = false + +[[bin]] +name = "move_aptosvm_authenticators" +path = "fuzz_targets/move/aptosvm_authenticators.rs" +test = false +doc = false diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/aptosvm_authenticators.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/aptosvm_authenticators.rs new file mode 100644 index 0000000000000..a406d991a800c --- /dev/null +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/aptosvm_authenticators.rs @@ -0,0 +1,372 @@ +#![no_main] + +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use aptos_cached_packages::aptos_stdlib; +use aptos_crypto::{ + ed25519::{Ed25519PrivateKey, Ed25519PublicKey}, + PrivateKey, SigningKey, Uniform, +}; +use aptos_language_e2e_tests::{ + account::Account, data_store::GENESIS_CHANGE_SET_HEAD, executor::FakeExecutor, +}; +use aptos_types::{ + chain_id::ChainId, + keyless::{AnyKeylessPublicKey, KeylessSignature, TransactionAndProof}, + transaction::{ + authenticator::{ + AccountAuthenticator, AnyPublicKey, AnySignature, EphemeralPublicKey, + EphemeralSignature, SingleKeyAuthenticator, TransactionAuthenticator, + }, + ExecutionStatus, SignedTransaction, TransactionStatus, + }, + write_set::WriteSet, +}; +use aptos_vm::AptosVM; +use libfuzzer_sys::{fuzz_target, Corpus}; +use move_core_types::vm_status::{StatusCode, StatusType}; +use once_cell::sync::Lazy; +use std::sync::Arc; +mod utils; +use utils::{ + check_for_invariant_violation, FuzzerTransactionAuthenticator, Style, TransactionState, +}; + +// genesis write set generated once for each fuzzing session +static VM: Lazy = Lazy::new(|| GENESIS_CHANGE_SET_HEAD.write_set().clone()); + +const FUZZER_CONCURRENCY_LEVEL: usize = 1; +static TP: Lazy> = Lazy::new(|| { + Arc::new( + rayon::ThreadPoolBuilder::new() + .num_threads(FUZZER_CONCURRENCY_LEVEL) + .build() + .unwrap(), + ) +}); + +fn run_case(input: TransactionState) -> Result<(), Corpus> { + tdbg!(&input); + + AptosVM::set_concurrency_level_once(FUZZER_CONCURRENCY_LEVEL); + let mut vm = FakeExecutor::from_genesis_with_existing_thread_pool( + &VM, + ChainId::mainnet(), + Arc::clone(&TP), + ) + .set_not_parallel(); + + let sender_acc = if true { + // create sender pub/priv key. initialize and fund account + vm.create_accounts(1, input.tx_auth_type.sender().fund_amount(), 0) + .remove(0) + } else { + // only create sender pub/priv key. do not initialize + Account::new() + }; + + let receiver = Account::new(); + + // build tx + let tx = sender_acc + .transaction() + .payload(aptos_stdlib::aptos_coin_transfer(*receiver.address(), 1)) + .sequence_number(0) + .gas_unit_price(100) + .max_gas_amount(1000); + + let tx_auth_type = input.tx_auth_type.clone(); + + let raw_tx = tx.raw(); + let tx = match tx_auth_type { + FuzzerTransactionAuthenticator::Ed25519 { sender: _ } => raw_tx + .sign(&sender_acc.privkey, sender_acc.pubkey.as_ed25519().unwrap()) + .map_err(|_| Corpus::Keep)? + .into_inner(), + FuzzerTransactionAuthenticator::Keyless { + sender: _, + style, + any_keyless_public_key, + keyless_signature, + } => { + match style { + Style::Break => { + // Generate a keypair for ephemeral keys + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key: Ed25519PublicKey = private_key.public_key(); + + // Create a TransactionAndProof to be signed + // This needs to be valid because the signature is checked in mempool (real flow) + let txn_and_proof = TransactionAndProof { + message: raw_tx.clone(), + proof: None, + }; + + // Sign the transaction + let signature = private_key.sign(&txn_and_proof).map_err(|_| Corpus::Keep)?; + + // Build AnyPublicKey::Keyless + let any_public_key = match any_keyless_public_key { + AnyKeylessPublicKey::Normal(normal_key) => { + // TODO: think about idc, it's generated by new_from_preimage + AnyPublicKey::Keyless { + public_key: normal_key, + } + }, + AnyKeylessPublicKey::Federated(federated_key) => { + // TODO: think about idc, it's generated by new_from_preimage (nested in KeylessPublicKey) + AnyPublicKey::FederatedKeyless { + public_key: federated_key, + } + }, + }; + + // Build AnySignature::Keyless + let any_signature = AnySignature::Keyless { + signature: KeylessSignature { + cert: keyless_signature.cert().clone(), + jwt_header_json: input.tx_auth_type.get_jwt_header_json().unwrap(), + exp_date_secs: keyless_signature.exp_date_secs(), + ephemeral_pubkey: EphemeralPublicKey::ed25519(public_key), + ephemeral_signature: EphemeralSignature::ed25519(signature), + }, + }; + + // Build an authenticator + let authenticator = TransactionAuthenticator::SingleSender { + sender: AccountAuthenticator::SingleKey { + authenticator: SingleKeyAuthenticator::new( + any_public_key, + any_signature, + ), + }, + }; + + // Construct the SignedTransaction + SignedTransaction::new_signed_transaction(raw_tx, authenticator) + }, + /* + Style::MatchJWT => { + // Generate a keypair for ephemeral keys + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key: Ed25519PublicKey = private_key.public_key(); + + // Create a TransactionAndProof to be signed + let txn_and_proof = TransactionAndProof { + message: raw_tx.clone(), + proof: None, + }; + + // Sign the transaction + let signature = private_key.sign(&txn_and_proof).map_err(|_| Corpus::Keep)?; + + // Build AnyPublicKey::Keyless + let any_public_key = AnyPublicKey::Keyless { + public_key: KeylessPublicKey { + iss_val: "test.oidc.provider".to_string(), + idc: IdCommitment::new_from_preimage( + &Pepper::from_number(0x5678), + "aud", + "uid_key", + "uid_val", + ) + .map_err(|_| Corpus::Keep)?, + }, + }; + + /* + EphemeralCertificate::OpenIdSig(OpenIdSig { + jwt_sig: vec![], + jwt_payload_json: "jwt_payload_json".to_string(), + uid_key: "uid_key".to_string(), + epk_blinder: b"epk_blinder".to_vec(), + pepper: Pepper::from_number(0x1234), + idc_aud_val: None, + }) + */ + + // Build AnySignature::Keyless + let any_signature = AnySignature::Keyless { + signature: KeylessSignature { + cert: keyless_signature.cert().clone(), + jwt_header_json: input.tx_auth_type.get_jwt_header_json().unwrap(), + exp_date_secs: keyless_signature.exp_date_secs(), + ephemeral_pubkey: EphemeralPublicKey::ed25519(public_key), + ephemeral_signature: EphemeralSignature::ed25519(signature), + }, + }; + + // Build an authenticator + let authenticator = TransactionAuthenticator::SingleSender { + sender: AccountAuthenticator::SingleKey { + authenticator: SingleKeyAuthenticator::new(any_public_key, any_signature), + }, + }; + + // Construct the SignedTransaction + SignedTransaction::new_signed_transaction(raw_tx, authenticator) + }, + Style::MatchKeys => { + // Generate a keypair for ephemeral keys + let private_key = Ed25519PrivateKey::generate_for_testing(); + let public_key: Ed25519PublicKey = private_key.public_key(); + + // Create a TransactionAndProof to be signed + let txn_and_proof = TransactionAndProof { + message: raw_tx.clone(), + proof: None, + }; + + // Sign the transaction + let signature = private_key.sign(&txn_and_proof).map_err(|_| Corpus::Keep)?; + + // Build AnyPublicKey::Keyless + let any_public_key = AnyPublicKey::Keyless { + public_key: KeylessPublicKey { + iss_val: "test.oidc.provider".to_string(), + idc: IdCommitment::new_from_preimage( + &Pepper::from_number(0x5678), + "aud", + "uid_key", + "uid_val", + ) + .map_err(|_| Corpus::Keep)?, + }, + }; + + /* + EphemeralCertificate::OpenIdSig(OpenIdSig { + jwt_sig: vec![], + jwt_payload_json: "jwt_payload_json".to_string(), + uid_key: "uid_key".to_string(), + epk_blinder: b"epk_blinder".to_vec(), + pepper: Pepper::from_number(0x1234), + idc_aud_val: None, + }) + */ + + // Build AnySignature::Keyless + let any_signature = AnySignature::Keyless { + signature: KeylessSignature { + cert: keyless_signature.cert().clone(), + jwt_header_json: input.tx_auth_type.get_jwt_header_json().unwrap(), + exp_date_secs: keyless_signature.exp_date_secs(), + ephemeral_pubkey: EphemeralPublicKey::ed25519(public_key), + ephemeral_signature: EphemeralSignature::ed25519(signature), + }, + }; + + // Build an authenticator + let authenticator = TransactionAuthenticator::SingleSender { + sender: AccountAuthenticator::SingleKey { + authenticator: SingleKeyAuthenticator::new(any_public_key, any_signature), + }, + }; + + // Construct the SignedTransaction + SignedTransaction::new_signed_transaction(raw_tx, authenticator) + } + */ + } + }, + FuzzerTransactionAuthenticator::MultiAgent { + sender: _, + secondary_signers, + } => { + // higher number here slows down fuzzer significatly due to slow signing process. + if secondary_signers.len() > 10 { + return Err(Corpus::Keep); + } + let secondary_accs: Vec<_> = secondary_signers + .iter() + .map(|acc| acc.convert_account(&mut vm)) + .collect(); + let secondary_signers = secondary_accs.iter().map(|acc| *acc.address()).collect(); + let secondary_private_keys = secondary_accs.iter().map(|acc| &acc.privkey).collect(); + raw_tx + .sign_multi_agent( + &sender_acc.privkey, + secondary_signers, + secondary_private_keys, + ) + .map_err(|_| Corpus::Keep)? + .into_inner() + }, + FuzzerTransactionAuthenticator::FeePayer { + sender: _, + secondary_signers, + fee_payer, + } => { + // higher number here slows down fuzzer significatly due to slow signing process. + if secondary_signers.len() > 10 { + return Err(Corpus::Keep); + } + let secondary_accs: Vec<_> = secondary_signers + .iter() + .map(|acc| acc.convert_account(&mut vm)) + .collect(); + + let secondary_signers = secondary_accs.iter().map(|acc| *acc.address()).collect(); + let secondary_private_keys = secondary_accs.iter().map(|acc| &acc.privkey).collect(); + let fee_payer_acc = fee_payer.convert_account(&mut vm); + raw_tx + .sign_fee_payer( + &sender_acc.privkey, + secondary_signers, + secondary_private_keys, + *fee_payer_acc.address(), + &fee_payer_acc.privkey, + ) + .map_err(|_| Corpus::Keep)? + .into_inner() + }, + }; + + // exec tx + tdbg!("exec start"); + + let res = vm.execute_block(vec![tx.clone()]); + + let res = res + .map_err(|e| { + check_for_invariant_violation(e); + Corpus::Keep + })? + .pop() + .expect("expect 1 output"); + tdbg!("exec end"); + + // if error exit gracefully + let status = match tdbg!(res.status()) { + TransactionStatus::Keep(status) => status, + TransactionStatus::Discard(e) => { + if e.status_type() == StatusType::InvariantViolation { + panic!("invariant violation {:?}", e); + } + return Err(Corpus::Keep); + }, + _ => return Err(Corpus::Keep), + }; + match tdbg!(status) { + ExecutionStatus::Success => (), + ExecutionStatus::MiscellaneousError(e) => { + if let Some(e) = e { + if e.status_type() == StatusType::InvariantViolation + && *e != StatusCode::TYPE_RESOLUTION_FAILURE + && *e != StatusCode::STORAGE_ERROR + { + panic!("invariant violation {:?}", e); + } + } + return Err(Corpus::Keep); + }, + _ => return Err(Corpus::Keep), + }; + + Ok(()) +} + +fuzz_target!(|fuzz_data: TransactionState| -> Corpus { + run_case(fuzz_data).err().unwrap_or(Corpus::Keep) +}); diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/aptosvm_publish_and_run.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/aptosvm_publish_and_run.rs index 20301e8db6d98..5f6a50e42f547 100644 --- a/testsuite/fuzzer/fuzz/fuzz_targets/move/aptosvm_publish_and_run.rs +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/aptosvm_publish_and_run.rs @@ -32,8 +32,8 @@ use std::{ }; mod utils; use utils::{ - check_for_invariant_violation, publish_group, sort_by_deps, Authenticator, ExecVariant, - RunnableState, + check_for_invariant_violation, publish_group, sort_by_deps, ExecVariant, + FuzzerRunnableAuthenticator, RunnableState, }; // genesis write set generated once for each fuzzing session @@ -258,11 +258,11 @@ fn run_case(mut input: RunnableState) -> Result<(), Corpus> { }; let raw_tx = tx.raw(); let tx = match input.tx_auth_type { - Authenticator::Ed25519 { sender: _ } => raw_tx + FuzzerRunnableAuthenticator::Ed25519 { sender: _ } => raw_tx .sign(&sender_acc.privkey, sender_acc.pubkey.as_ed25519().unwrap()) .map_err(|_| Corpus::Keep)? .into_inner(), - Authenticator::MultiAgent { + FuzzerRunnableAuthenticator::MultiAgent { sender: _, secondary_signers, } => { @@ -285,7 +285,7 @@ fn run_case(mut input: RunnableState) -> Result<(), Corpus> { .map_err(|_| Corpus::Keep)? .into_inner() }, - Authenticator::FeePayer { + FuzzerRunnableAuthenticator::FeePayer { sender: _, secondary_signers, fee_payer, diff --git a/testsuite/fuzzer/fuzz/fuzz_targets/move/utils.rs b/testsuite/fuzzer/fuzz/fuzz_targets/move/utils.rs index cfc1d1971dea5..9a95cdd257361 100644 --- a/testsuite/fuzzer/fuzz/fuzz_targets/move/utils.rs +++ b/testsuite/fuzzer/fuzz/fuzz_targets/move/utils.rs @@ -8,7 +8,10 @@ use aptos_framework::natives::code::{ ModuleMetadata, MoveOption, PackageDep, PackageMetadata, UpgradePolicy, }; use aptos_language_e2e_tests::{account::Account, executor::FakeExecutor}; -use aptos_types::transaction::{ExecutionStatus, TransactionPayload, TransactionStatus}; +use aptos_types::{ + keyless::{AnyKeylessPublicKey, EphemeralCertificate}, + transaction::{ExecutionStatus, TransactionPayload, TransactionStatus}, +}; use arbitrary::Arbitrary; use libfuzzer_sys::Corpus; use move_binary_format::{ @@ -20,6 +23,7 @@ use move_core_types::{ value::{MoveStructLayout, MoveTypeLayout, MoveValue}, vm_status::{StatusType, VMStatus}, }; +use serde::{Deserialize, Serialize}; use std::collections::{BTreeMap, BTreeSet, HashSet}; #[macro_export] @@ -60,22 +64,6 @@ pub struct UserAccount { fund: FundAmount, } -#[derive(Debug, Arbitrary, Eq, PartialEq, Clone)] -pub enum Authenticator { - Ed25519 { - sender: UserAccount, - }, - MultiAgent { - sender: UserAccount, - secondary_signers: Vec, - }, - FeePayer { - sender: UserAccount, - secondary_signers: Vec, - fee_payer: UserAccount, - }, -} - impl UserAccount { pub fn fund_amount(&self) -> u64 { match self.fund { @@ -94,15 +82,32 @@ impl UserAccount { } } -impl Authenticator { +// Used to fuzz the MoveVM +#[derive(Debug, Arbitrary, Eq, PartialEq, Clone)] +pub enum FuzzerRunnableAuthenticator { + Ed25519 { + sender: UserAccount, + }, + MultiAgent { + sender: UserAccount, + secondary_signers: Vec, + }, + FeePayer { + sender: UserAccount, + secondary_signers: Vec, + fee_payer: UserAccount, + }, +} + +impl FuzzerRunnableAuthenticator { pub fn sender(&self) -> UserAccount { match self { - Authenticator::Ed25519 { sender } => *sender, - Authenticator::MultiAgent { + FuzzerRunnableAuthenticator::Ed25519 { sender } => *sender, + FuzzerRunnableAuthenticator::MultiAgent { sender, secondary_signers: _, } => *sender, - Authenticator::FeePayer { + FuzzerRunnableAuthenticator::FeePayer { sender, secondary_signers: _, fee_payer: _, @@ -130,7 +135,118 @@ pub enum ExecVariant { pub struct RunnableState { pub dep_modules: Vec, pub exec_variant: ExecVariant, - pub tx_auth_type: Authenticator, + pub tx_auth_type: FuzzerRunnableAuthenticator, +} + +#[derive(Debug, Arbitrary, Eq, PartialEq, Clone, Serialize, Deserialize)] +pub struct JwtHeader { + pub alg: String, + pub typ: Option, + pub kid: Option, + // Add other JWT header fields as needed +} + +#[derive(Debug, Arbitrary, Eq, PartialEq, Clone, Serialize, Deserialize)] +pub struct FuzzingKeylessSignature { + exp_date_secs: u64, + jwt_header: JwtHeader, + cert: EphemeralCertificate, + //ephemeral_pubkey: EphemeralPublicKey, + //ephemeral_signature: EphemeralSignature, +} + +impl FuzzingKeylessSignature { + pub fn exp_date_secs(&self) -> u64 { + self.exp_date_secs + } + + pub fn jwt_header(&self) -> &JwtHeader { + &self.jwt_header + } + + pub fn cert(&self) -> &EphemeralCertificate { + &self.cert + } + + /* + pub fn ephemeral_pubkey(&self) -> &EphemeralPublicKey { + &self.ephemeral_pubkey + } + + pub fn ephemeral_signature(&self) -> &EphemeralSignature { + &self.ephemeral_signature + } + */ +} + +#[derive(Debug, Arbitrary, Eq, PartialEq, Clone)] +pub enum Style { + Break, + //MatchJWT, + //MatchKeys, +} + +//TODO: reorganize this type excluding not usefull fields. Do it after implementing JWK and Federated Keyless. +// Used to fuzz the transaction authenticator +#[derive(Debug, Arbitrary, Eq, PartialEq, Clone)] +pub enum FuzzerTransactionAuthenticator { + Ed25519 { + sender: UserAccount, + }, + Keyless { + sender: UserAccount, + style: Style, + any_keyless_public_key: AnyKeylessPublicKey, + keyless_signature: FuzzingKeylessSignature, + }, + MultiAgent { + sender: UserAccount, + secondary_signers: Vec, + }, + FeePayer { + sender: UserAccount, + secondary_signers: Vec, + fee_payer: UserAccount, + }, +} + +impl FuzzerTransactionAuthenticator { + pub fn sender(&self) -> UserAccount { + match self { + FuzzerTransactionAuthenticator::Ed25519 { sender } => *sender, + FuzzerTransactionAuthenticator::Keyless { + sender, + style: _, + any_keyless_public_key: _, + keyless_signature: _, + } => *sender, + FuzzerTransactionAuthenticator::MultiAgent { + sender, + secondary_signers: _, + } => *sender, + FuzzerTransactionAuthenticator::FeePayer { + sender, + secondary_signers: _, + fee_payer: _, + } => *sender, + } + } + + pub fn get_jwt_header_json(&self) -> Option { + if let FuzzerTransactionAuthenticator::Keyless { + keyless_signature, .. + } = self + { + serde_json::to_string(&keyless_signature.jwt_header).ok() + } else { + None + } + } +} + +#[derive(Debug, Arbitrary, Eq, PartialEq, Clone)] +pub struct TransactionState { + pub tx_auth_type: FuzzerTransactionAuthenticator, } // used for ordering modules topologically diff --git a/types/Cargo.toml b/types/Cargo.toml index df07844a31988..a4b140253c636 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -20,6 +20,7 @@ aptos-crypto-derive = { workspace = true } aptos-dkg = { workspace = true } aptos-experimental-runtimes = { workspace = true } aptos-infallible = { workspace = true } +arbitrary = { workspace = true, features = ["derive"], optional = true } ark-bn254 = { workspace = true } ark-ec = { workspace = true } ark-ff = { workspace = true } @@ -95,7 +96,7 @@ url = { workspace = true } [features] default = [] -fuzzing = ["proptest", "proptest-derive", "aptos-crypto/fuzzing", "move-core-types/fuzzing"] +fuzzing = ["proptest", "proptest-derive", "aptos-crypto/fuzzing", "move-core-types/fuzzing", "arbitrary"] [[bench]] name = "keyless" diff --git a/types/src/keyless/bn254_circom.rs b/types/src/keyless/bn254_circom.rs index 83b28c03905d8..1c37c1d8b176b 100644 --- a/types/src/keyless/bn254_circom.rs +++ b/types/src/keyless/bn254_circom.rs @@ -61,6 +61,7 @@ pub fn parse_fr_element(s: &str) -> Result { } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct G1Bytes(pub(crate) [u8; G1_PROJECTIVE_COMPRESSED_NUM_BYTES]); impl G1Bytes { @@ -150,6 +151,7 @@ impl TryInto for &G1Bytes { } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct G2Bytes(pub(crate) [u8; G2_PROJECTIVE_COMPRESSED_NUM_BYTES]); impl G2Bytes { diff --git a/types/src/keyless/groth16_sig.rs b/types/src/keyless/groth16_sig.rs index b0b4db07ca823..fa589d5cbb44a 100644 --- a/types/src/keyless/groth16_sig.rs +++ b/types/src/keyless/groth16_sig.rs @@ -24,6 +24,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; #[derive( Copy, Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize, CryptoHasher, BCSCryptoHash, )] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct Groth16Proof { a: G1Bytes, b: G2Bytes, @@ -31,6 +32,7 @@ pub struct Groth16Proof { } #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct ZeroKnowledgeSig { pub proof: ZKP, /// The expiration horizon that the circuit should enforce on the expiration date committed in diff --git a/types/src/keyless/mod.rs b/types/src/keyless/mod.rs index 78e5b5365b7d4..1e92aa1abf629 100644 --- a/types/src/keyless/mod.rs +++ b/types/src/keyless/mod.rs @@ -76,6 +76,7 @@ macro_rules! serialize { /// the expiration time /// `exp_timestamp_secs`. #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub enum EphemeralCertificate { ZeroKnowledgeSig(ZeroKnowledgeSig), OpenIdSig(OpenIdSig), @@ -161,6 +162,7 @@ impl KeylessSignature { /// This value should **NOT* be changed since on-chain addresses are based on it (e.g., /// hashing with a larger pepper would lead to a different address). #[derive(Clone, Debug, Eq, PartialEq, Hash)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct Pepper(pub(crate) [u8; poseidon_bn254::keyless::BYTES_PACKED_PER_SCALAR]); impl Pepper { @@ -226,6 +228,7 @@ impl Serialize for Pepper { } #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct IdCommitment(#[serde(with = "serde_bytes")] pub(crate) Vec); impl IdCommitment { @@ -293,6 +296,7 @@ impl TryFrom<&[u8]> for IdCommitment { /// `PublicKey` struct. But the `key_name` procedural macro only works with the `[De]SerializeKey` /// procedural macros, which we cannot use since they force us to reimplement serialization. #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct KeylessPublicKey { /// The value of the `iss` field from the JWT, indicating the OIDC provider. /// e.g., @@ -311,6 +315,7 @@ pub struct KeylessPublicKey { /// Unlike a normal keyless account, a "federated" keyless account will accept JWKs published at a /// specific contract address. #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct FederatedKeylessPublicKey { pub jwk_addr: AccountAddress, pub pk: KeylessPublicKey, @@ -336,6 +341,7 @@ impl TryFrom<&[u8]> for FederatedKeylessPublicKey { } #[derive(Clone, Debug, Eq, PartialEq, Hash)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub enum AnyKeylessPublicKey { Normal(KeylessPublicKey), Federated(FederatedKeylessPublicKey), diff --git a/types/src/keyless/openid_sig.rs b/types/src/keyless/openid_sig.rs index 01d75a5b99212..c09646eef121f 100644 --- a/types/src/keyless/openid_sig.rs +++ b/types/src/keyless/openid_sig.rs @@ -18,6 +18,7 @@ use serde_with::skip_serializing_none; use std::collections::BTreeMap; #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct OpenIdSig { /// The decoded bytes of the JWS signature in the JWT () #[serde(with = "serde_bytes")] diff --git a/types/src/keyless/zkp_sig.rs b/types/src/keyless/zkp_sig.rs index bb6c41988a442..2072529660d31 100644 --- a/types/src/keyless/zkp_sig.rs +++ b/types/src/keyless/zkp_sig.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; #[derive( Copy, Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize, CryptoHasher, BCSCryptoHash, )] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub enum ZKP { Groth16(Groth16Proof), } diff --git a/types/src/transaction/authenticator.rs b/types/src/transaction/authenticator.rs index f5237eef056df..ed7643af29951 100644 --- a/types/src/transaction/authenticator.rs +++ b/types/src/transaction/authenticator.rs @@ -1150,6 +1150,7 @@ impl TryFrom<&[u8]> for AnyPublicKey { } #[derive(Clone, Debug, Eq, PartialEq, Hash, Serialize, Deserialize)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub enum EphemeralSignature { Ed25519 { signature: Ed25519Signature, @@ -1218,6 +1219,7 @@ impl TryFrom<&[u8]> for EphemeralSignature { } #[derive(Clone, Debug, Eq, PartialEq, Hash)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub enum EphemeralPublicKey { Ed25519 { public_key: Ed25519PublicKey, diff --git a/types/src/transaction/webauthn.rs b/types/src/transaction/webauthn.rs index 5ccf74af18817..84d76de586d19 100644 --- a/types/src/transaction/webauthn.rs +++ b/types/src/transaction/webauthn.rs @@ -54,11 +54,29 @@ pub enum AssertionSignature { }, } +/// Custom arbitrary implementation for fuzzing +/// as the `secp256r1_ecdsa::Signature` type is an external dependency +/// p256::ecdsa::Signature +#[cfg(any(test, feature = "fuzzing"))] +impl<'a> arbitrary::Arbitrary<'a> for AssertionSignature { + fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result { + // Generate a fixed-length byte array for the signature + let bytes: [u8; aptos_crypto::secp256r1_ecdsa::Signature::LENGTH] = u.arbitrary()?; + + // Create a signature without validating it + let signature = aptos_crypto::secp256r1_ecdsa::Signature::from_bytes_unchecked(&bytes) + .map_err(|_| arbitrary::Error::IncorrectFormat)?; + + Ok(AssertionSignature::Secp256r1Ecdsa { signature }) + } +} + /// `PartialAuthenticatorAssertionResponse` includes a subset of the fields returned from /// an [`AuthenticatorAssertionResponse`](passkey_types::webauthn::AuthenticatorAssertionResponse) /// /// See #[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] +#[cfg_attr(feature = "fuzzing", derive(arbitrary::Arbitrary))] pub struct PartialAuthenticatorAssertionResponse { /// This attribute contains the raw signature returned from the authenticator. /// NOTE: Many signatures returned from WebAuthn assertions are not raw signatures. From 5a90c6ac659b3bee4d4cab6e4fdf1888d28ec947 Mon Sep 17 00:00:00 2001 From: Alden Hu Date: Fri, 11 Oct 2024 09:12:26 -0700 Subject: [PATCH 10/22] simplify TransactionReplayer::replay implementation (#14920) --- .../src/ledger_update_output.rs | 31 +---- execution/executor-types/src/lib.rs | 4 +- execution/executor/src/chunk_executor.rs | 112 ++++++++---------- .../src/components/chunk_commit_queue.rs | 28 +---- .../src/components/chunk_result_verifier.rs | 26 ++++ .../executor/src/components/executed_chunk.rs | 43 +------ execution/executor/src/tests/mod.rs | 75 ++++++++---- .../src/backup_types/transaction/restore.rs | 50 +++++--- 8 files changed, 172 insertions(+), 197 deletions(-) diff --git a/execution/executor-types/src/ledger_update_output.rs b/execution/executor-types/src/ledger_update_output.rs index 9bb7f58685447..beb86a5dd2959 100644 --- a/execution/executor-types/src/ledger_update_output.rs +++ b/execution/executor-types/src/ledger_update_output.rs @@ -11,7 +11,7 @@ use aptos_types::{ contract_event::ContractEvent, epoch_state::EpochState, proof::accumulator::InMemoryTransactionAccumulator, - state_store::{combine_or_add_sharded_state_updates, ShardedStateUpdates}, + state_store::ShardedStateUpdates, transaction::{ block_epilogue::BlockEndInfo, TransactionInfo, TransactionStatus, TransactionToCommit, Version, @@ -119,35 +119,6 @@ impl LedgerUpdateOutput { ) } - pub fn combine(&mut self, rhs: Self) { - assert!(self.block_end_info.is_none()); - assert!(rhs.block_end_info.is_none()); - let Self { - statuses_for_input_txns, - to_commit, - subscribable_events, - transaction_info_hashes, - state_updates_until_last_checkpoint: state_updates_before_last_checkpoint, - sharded_state_cache, - transaction_accumulator, - block_end_info: _block_end_info, - } = rhs; - - if let Some(updates) = state_updates_before_last_checkpoint { - combine_or_add_sharded_state_updates( - &mut self.state_updates_until_last_checkpoint, - updates, - ); - } - - self.statuses_for_input_txns.extend(statuses_for_input_txns); - self.to_commit.extend(to_commit); - self.subscribable_events.extend(subscribable_events); - self.transaction_info_hashes.extend(transaction_info_hashes); - self.sharded_state_cache.combine(sharded_state_cache); - self.transaction_accumulator = transaction_accumulator; - } - pub fn next_version(&self) -> Version { self.transaction_accumulator.num_leaves() as Version } diff --git a/execution/executor-types/src/lib.rs b/execution/executor-types/src/lib.rs index 8a3b9ee9a9fb3..8ae1701243ca2 100644 --- a/execution/executor-types/src/lib.rs +++ b/execution/executor-types/src/lib.rs @@ -259,14 +259,14 @@ impl VerifyExecutionMode { } pub trait TransactionReplayer: Send { - fn replay( + fn enqueue_chunks( &self, transactions: Vec, transaction_infos: Vec, write_sets: Vec, event_vecs: Vec>, verify_execution_mode: &VerifyExecutionMode, - ) -> Result<()>; + ) -> Result; fn commit(&self) -> Result; } diff --git a/execution/executor/src/chunk_executor.rs b/execution/executor/src/chunk_executor.rs index fbe4d1f8d1b32..d54497c000ca9 100644 --- a/execution/executor/src/chunk_executor.rs +++ b/execution/executor/src/chunk_executor.rs @@ -6,10 +6,10 @@ use crate::{ components::{ - apply_chunk_output::{ensure_no_discard, ensure_no_retry, ApplyChunkOutput}, + apply_chunk_output::ApplyChunkOutput, chunk_commit_queue::{ChunkCommitQueue, ChunkToUpdateLedger}, chunk_output::ChunkOutput, - chunk_result_verifier::{ChunkResultVerifier, StateSyncChunkVerifier}, + chunk_result_verifier::{ChunkResultVerifier, ReplayChunkVerifier, StateSyncChunkVerifier}, executed_chunk::ExecutedChunk, transaction_chunk::{ChunkToApply, ChunkToExecute, TransactionChunk}, }, @@ -28,7 +28,7 @@ use aptos_logger::prelude::*; use aptos_metrics_core::{IntGaugeHelper, TimerHelper}; use aptos_storage_interface::{ async_proof_fetcher::AsyncProofFetcher, cached_state_view::CachedStateView, - state_delta::StateDelta, DbReaderWriter, ExecutedTrees, + state_delta::StateDelta, DbReaderWriter, }; use aptos_types::{ block_executor::config::BlockExecutorConfigFromOnchain, @@ -93,6 +93,10 @@ impl ChunkExecutor { error }) } + + pub fn is_empty(&self) -> bool { + self.with_inner(|inner| Ok(inner.is_empty())).unwrap() + } } impl ChunkExecutorTrait for ChunkExecutor { @@ -279,6 +283,10 @@ impl ChunkExecutorInner { Ok(chunk) } + fn is_empty(&self) -> bool { + self.commit_queue.lock().is_empty() + } + // ************************* Chunk Executor Implementation ************************* fn enqueue_chunk( &self, @@ -401,24 +409,28 @@ impl ChunkExecutorInner { } impl TransactionReplayer for ChunkExecutor { - fn replay( + fn enqueue_chunks( &self, transactions: Vec, transaction_infos: Vec, write_sets: Vec, event_vecs: Vec>, verify_execution_mode: &VerifyExecutionMode, - ) -> Result<()> { + ) -> Result { let _guard = CONCURRENCY_GAUGE.concurrency_with(&["replayer", "replay"]); self.maybe_initialize()?; - self.inner.read().as_ref().expect("not reset").replay( - transactions, - transaction_infos, - write_sets, - event_vecs, - verify_execution_mode, - ) + self.inner + .read() + .as_ref() + .expect("not reset") + .enqueue_chunks( + transactions, + transaction_infos, + write_sets, + event_vecs, + verify_execution_mode, + ) } fn commit(&self) -> Result { @@ -428,19 +440,18 @@ impl TransactionReplayer for ChunkExecutor { } } -impl TransactionReplayer for ChunkExecutorInner { - fn replay( +impl ChunkExecutorInner { + fn enqueue_chunks( &self, mut transactions: Vec, mut transaction_infos: Vec, mut write_sets: Vec, mut event_vecs: Vec>, verify_execution_mode: &VerifyExecutionMode, - ) -> Result<()> { + ) -> Result { let started = Instant::now(); let num_txns = transactions.len(); - let mut latest_view = self.commit_queue.lock().expect_latest_view()?; - let chunk_begin = latest_view.num_transactions() as Version; + let chunk_begin = self.commit_queue.lock().expecting_version(); let chunk_end = chunk_begin + num_txns as Version; // right-exclusive // Find epoch boundaries. @@ -459,12 +470,10 @@ impl TransactionReplayer for ChunkExecutorInner { epochs.push((epoch_begin, chunk_end)); } - let mut executed_chunk = None; + let mut chunks_enqueued = 0; // Replay epoch by epoch. for (begin, end) in epochs { - self.remove_and_replay_epoch( - &mut executed_chunk, - &mut latest_view, + chunks_enqueued += self.remove_and_replay_epoch( &mut transactions, &mut transaction_infos, &mut write_sets, @@ -475,16 +484,13 @@ impl TransactionReplayer for ChunkExecutorInner { )?; } - self.commit_queue - .lock() - .enqueue_chunk_to_commit_directly(executed_chunk.expect("Nothing to commit."))?; info!( num_txns = num_txns, tps = (num_txns as f64 / started.elapsed().as_secs_f64()), "TransactionReplayer::replay() OK" ); - Ok(()) + Ok(chunks_enqueued) } fn commit(&self) -> Result { @@ -504,16 +510,12 @@ impl TransactionReplayer for ChunkExecutorInner { .current_version .expect("Version must exist after commit.")) } -} -impl ChunkExecutorInner { /// Remove `end_version - begin_version` transactions from the mutable input arguments and replay. /// The input range indicated by `[begin_version, end_version]` is guaranteed not to cross epoch boundaries. /// Notice there can be known broken versions inside the range. fn remove_and_replay_epoch( &self, - executed_chunk: &mut Option, - latest_view: &mut ExecutedTrees, transactions: &mut Vec, transaction_infos: &mut Vec, write_sets: &mut Vec, @@ -521,21 +523,21 @@ impl ChunkExecutorInner { begin_version: Version, end_version: Version, verify_execution_mode: &VerifyExecutionMode, - ) -> Result<()> { + ) -> Result { // we try to apply the txns in sub-batches split by known txns to skip and the end of the batch let txns_to_skip = verify_execution_mode.txns_to_skip(); let mut batch_ends = txns_to_skip .range(begin_version..end_version) .chain(once(&end_version)); + let mut chunks_enqueued = 0; + let mut batch_begin = begin_version; let mut batch_end = *batch_ends.next().unwrap(); while batch_begin < end_version { if batch_begin == batch_end { // batch_end is a known broken version that won't pass execution verification self.remove_and_apply( - executed_chunk, - latest_view, transactions, transaction_infos, write_sets, @@ -543,6 +545,7 @@ impl ChunkExecutorInner { batch_begin, batch_begin + 1, )?; + chunks_enqueued += 1; info!( version_skipped = batch_begin, "Skipped known broken transaction, applied transaction output directly." @@ -555,7 +558,6 @@ impl ChunkExecutorInner { // Try to run the transactions with the VM let next_begin = if verify_execution_mode.should_verify() { self.verify_execution( - latest_view, transactions, transaction_infos, write_sets, @@ -568,8 +570,6 @@ impl ChunkExecutorInner { batch_end }; self.remove_and_apply( - executed_chunk, - latest_view, transactions, transaction_infos, write_sets, @@ -577,15 +577,15 @@ impl ChunkExecutorInner { batch_begin, next_begin, )?; + chunks_enqueued += 1; batch_begin = next_begin; } - Ok(()) + Ok(chunks_enqueued) } fn verify_execution( &self, - latest_view: &mut ExecutedTrees, transactions: &[Transaction], transaction_infos: &[TransactionInfo], write_sets: &[WriteSet], @@ -595,7 +595,7 @@ impl ChunkExecutorInner { verify_execution_mode: &VerifyExecutionMode, ) -> Result { // Execute transactions. - let state_view = self.latest_state_view(latest_view.state())?; + let state_view = self.latest_state_view(&self.commit_queue.lock().latest_state())?; let txns = transactions .iter() .take((end_version - begin_version) as usize) @@ -639,8 +639,6 @@ impl ChunkExecutorInner { /// It's guaranteed that there's no known broken versions or epoch endings in the range. fn remove_and_apply( &self, - executed_chunk: &mut Option, - latest_view: &mut ExecutedTrees, transactions: &mut Vec, transaction_infos: &mut Vec, write_sets: &mut Vec, @@ -650,7 +648,7 @@ impl ChunkExecutorInner { ) -> Result<()> { let num_txns = (end_version - begin_version) as usize; let txn_infos: Vec<_> = transaction_infos.drain(..num_txns).collect(); - let (txns, txn_outs) = multizip(( + let (transactions, transaction_outputs) = multizip(( transactions.drain(..num_txns), txn_infos.iter(), write_sets.drain(..num_txns), @@ -670,28 +668,16 @@ impl ChunkExecutorInner { }) .unzip(); - let state_view = self.latest_state_view(latest_view.state())?; - let chunk_output = ChunkOutput::by_transaction_output(txns, txn_outs, state_view)?; - let (executed_batch, to_discard, to_retry) = chunk_output.apply_to_ledger( - latest_view, - Some( - txn_infos - .iter() - .map(|txn_info| txn_info.state_checkpoint_hash()) - .collect(), - ), - )?; - ensure_no_discard(to_discard)?; - ensure_no_retry(to_retry)?; - executed_batch - .ledger_update_output - .ensure_transaction_infos_match(&txn_infos)?; - - match executed_chunk { - Some(chunk) => chunk.combine(executed_batch), - None => *executed_chunk = Some(executed_batch), - } - *latest_view = executed_chunk.as_ref().unwrap().result_view(); + let chunk = ChunkToApply { + transactions, + transaction_outputs, + first_version: begin_version, + }; + let chunk_verifier = Arc::new(ReplayChunkVerifier { + transaction_infos: txn_infos, + }); + self.enqueue_chunk(chunk, chunk_verifier, "replay")?; + Ok(()) } } diff --git a/execution/executor/src/components/chunk_commit_queue.rs b/execution/executor/src/components/chunk_commit_queue.rs index 54c69a2f61afe..d796079bf812e 100644 --- a/execution/executor/src/components/chunk_commit_queue.rs +++ b/execution/executor/src/components/chunk_commit_queue.rs @@ -66,18 +66,7 @@ impl ChunkCommitQueue { } pub(crate) fn expecting_version(&self) -> Version { - self.latest_txn_accumulator.num_leaves() - } - - pub(crate) fn expect_latest_view(&self) -> Result { - ensure!( - self.to_update_ledger.is_empty(), - "Pending chunk to update_ledger, can't construct latest ExecutedTrees." - ); - Ok(ExecutedTrees::new( - self.latest_state.clone(), - self.latest_txn_accumulator.clone(), - )) + self.latest_state.next_version() } pub(crate) fn enqueue_for_ledger_update( @@ -130,17 +119,6 @@ impl ChunkCommitQueue { Ok((self.persisted_state.clone(), chunk)) } - pub(crate) fn enqueue_chunk_to_commit_directly(&mut self, chunk: ExecutedChunk) -> Result<()> { - ensure!( - self.to_update_ledger.is_empty(), - "Mixed usage of different modes." - ); - self.latest_state = chunk.result_state.clone(); - self.latest_txn_accumulator = chunk.ledger_update_output.transaction_accumulator.clone(); - self.to_commit.push_back(Some(chunk)); - Ok(()) - } - pub(crate) fn dequeue_committed(&mut self, latest_state: StateDelta) -> Result<()> { ensure!(!self.to_commit.is_empty(), "to_commit is empty."); ensure!( @@ -154,4 +132,8 @@ impl ChunkCommitQueue { .log_generation("commit_queue_base"); Ok(()) } + + pub(crate) fn is_empty(&self) -> bool { + self.to_commit.is_empty() && self.to_update_ledger.is_empty() + } } diff --git a/execution/executor/src/components/chunk_result_verifier.rs b/execution/executor/src/components/chunk_result_verifier.rs index d9723dc0c57b5..5cc222163c99f 100644 --- a/execution/executor/src/components/chunk_result_verifier.rs +++ b/execution/executor/src/components/chunk_result_verifier.rs @@ -133,3 +133,29 @@ impl ChunkResultVerifier for StateSyncChunkVerifier { } } } + +pub struct ReplayChunkVerifier { + pub transaction_infos: Vec, +} + +impl ChunkResultVerifier for ReplayChunkVerifier { + fn verify_chunk_result( + &self, + _parent_accumulator: &InMemoryTransactionAccumulator, + ledger_update_output: &LedgerUpdateOutput, + ) -> Result<()> { + ledger_update_output.ensure_transaction_infos_match(&self.transaction_infos) + } + + fn transaction_infos(&self) -> &[TransactionInfo] { + &self.transaction_infos + } + + fn maybe_select_chunk_ending_ledger_info( + &self, + _ledger_update_output: &LedgerUpdateOutput, + _next_epoch_state: Option<&EpochState>, + ) -> Result> { + Ok(None) + } +} diff --git a/execution/executor/src/components/executed_chunk.rs b/execution/executor/src/components/executed_chunk.rs index aa9872dc5c3b5..28e8e68fcf87e 100644 --- a/execution/executor/src/components/executed_chunk.rs +++ b/execution/executor/src/components/executed_chunk.rs @@ -15,7 +15,7 @@ use aptos_types::account_config::NewEpochEvent; use aptos_types::contract_event::ContractEvent; use aptos_types::{ epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, - state_store::combine_or_add_sharded_state_updates, transaction::TransactionToCommit, + transaction::TransactionToCommit, }; #[derive(Debug)] @@ -28,16 +28,6 @@ pub struct ExecutedChunk { } impl ExecutedChunk { - pub fn reconfig_suffix(&self) -> Self { - assert!(self.next_epoch_state.is_some()); - Self { - result_state: self.result_state.clone(), - ledger_info: None, - next_epoch_state: self.next_epoch_state.clone(), - ledger_update_output: self.ledger_update_output.reconfig_suffix(), - } - } - pub fn transactions_to_commit(&self) -> &Vec { &self.ledger_update_output.to_commit } @@ -46,37 +36,6 @@ impl ExecutedChunk { self.next_epoch_state.is_some() } - pub fn combine(&mut self, rhs: Self) { - assert_eq!( - self.ledger_update_output.next_version(), - rhs.ledger_update_output.first_version(), - "Chunks to be combined are not consecutive.", - ); - let Self { - result_state, - ledger_info, - next_epoch_state, - ledger_update_output, - } = rhs; - - let old_result_state = self.result_state.replace_with(result_state); - // TODO(aldenhu): This is very unfortunate. Will revisit soon by remodeling the state diff. - if self.result_state.base_version > old_result_state.base_version - && old_result_state.base_version != old_result_state.current_version - { - combine_or_add_sharded_state_updates( - &mut self - .ledger_update_output - .state_updates_until_last_checkpoint, - old_result_state.updates_since_base, - ) - } - - self.ledger_info = ledger_info; - self.next_epoch_state = next_epoch_state; - self.ledger_update_output.combine(ledger_update_output) - } - pub fn result_view(&self) -> ExecutedTrees { ExecutedTrees::new( self.result_state.clone(), diff --git a/execution/executor/src/tests/mod.rs b/execution/executor/src/tests/mod.rs index d7c65868196a3..c50d2480a1b84 100644 --- a/execution/executor/src/tests/mod.rs +++ b/execution/executor/src/tests/mod.rs @@ -14,7 +14,8 @@ use crate::{ use aptos_crypto::{ed25519::Ed25519PrivateKey, HashValue, PrivateKey, SigningKey, Uniform}; use aptos_db::AptosDB; use aptos_executor_types::{ - BlockExecutorTrait, LedgerUpdateOutput, TransactionReplayer, VerifyExecutionMode, + BlockExecutorTrait, ChunkExecutorTrait, LedgerUpdateOutput, TransactionReplayer, + VerifyExecutionMode, }; use aptos_storage_interface::{ async_proof_fetcher::AsyncProofFetcher, DbReaderWriter, ExecutedTrees, Result, @@ -27,7 +28,6 @@ use aptos_types::{ bytes::NumToBytes, chain_id::ChainId, ledger_info::{LedgerInfo, LedgerInfoWithSignatures}, - proof::definition::LeafCount, state_store::{state_key::StateKey, state_value::StateValue, StateViewId}, test_helpers::transaction_test_helpers::{block, TEST_BLOCK_EXECUTOR_ONCHAIN_CONFIG}, transaction::{ @@ -38,6 +38,7 @@ use aptos_types::{ }, write_set::{WriteOp, WriteSet, WriteSetMut}, }; +use itertools::Itertools; use proptest::prelude::*; use std::{iter::once, sync::Arc}; @@ -741,23 +742,23 @@ fn run_transactions_naive( } proptest! { -#![proptest_config(ProptestConfig::with_cases(5))] + #![proptest_config(ProptestConfig::with_cases(5))] -#[test] -#[cfg_attr(feature = "consensus-only-perf-test", ignore)] -fn test_reconfiguration_with_retry_transaction_status( - (num_user_txns, reconfig_txn_index) in (2..5u64).prop_flat_map(|num_user_txns| { - ( - Just(num_user_txns), - 0..num_user_txns - 1 // avoid state checkpoint right after reconfig - ) + #[test] + #[cfg_attr(feature = "consensus-only-perf-test", ignore)] + fn test_reconfiguration_with_retry_transaction_status( + (num_user_txns, reconfig_txn_index) in (2..5usize).prop_flat_map(|num_user_txns| { + ( + Just(num_user_txns), + 0..num_user_txns - 1 // avoid state checkpoint right after reconfig + ) }).no_shrink()) { let executor = TestExecutor::new(); let block_id = gen_block_id(1); - let mut block = TestBlock::new(num_user_txns, 10, block_id); - let num_input_txns = block.txns.len() as LeafCount; - block.txns[reconfig_txn_index as usize] = encode_reconfiguration_transaction().into(); + let mut block = TestBlock::new(num_user_txns as u64, 10, block_id); + let num_input_txns = block.txns.len(); + block.txns[reconfig_txn_index] = encode_reconfiguration_transaction().into(); let parent_block_id = executor.committed_block_id(); let output = executor.execute_block( @@ -768,34 +769,52 @@ fn test_reconfiguration_with_retry_transaction_status( let retry_iter = output.compute_status_for_input_txns().iter() .skip_while(|status| matches!(*status, TransactionStatus::Keep(_))); prop_assert_eq!( - retry_iter.take_while(|status| matches!(*status,TransactionStatus::Retry)).count() as u64, + retry_iter.take_while(|status| matches!(*status,TransactionStatus::Retry)).count(), num_input_txns - reconfig_txn_index - 1 ); // commit - let ledger_info = gen_ledger_info(reconfig_txn_index + 1 /* version */, output.root_hash(), block_id, 1 /* timestamp */); + let ledger_info = gen_ledger_info( + reconfig_txn_index as Version + 1 /* version */, + output.root_hash(), + block_id, + 1 /* timestamp */ + ); executor.commit_blocks(vec![block_id], ledger_info).unwrap(); let parent_block_id = executor.committed_block_id(); // retry txns after reconfiguration + let retry_txns = block.txns.iter().skip(reconfig_txn_index + 1).cloned().collect_vec(); let retry_block_id = gen_block_id(2); let retry_output = executor.execute_block( - (retry_block_id, block.txns.iter().skip(reconfig_txn_index as usize + 1).cloned().collect::>()).into(), parent_block_id, TEST_BLOCK_EXECUTOR_ONCHAIN_CONFIG + ( retry_block_id, retry_txns ).into(), + parent_block_id, + TEST_BLOCK_EXECUTOR_ONCHAIN_CONFIG ).unwrap(); prop_assert!(retry_output.compute_status_for_input_txns().iter().all(|s| matches!(*s, TransactionStatus::Keep(_)))); // Second block has StateCheckpoint/BlockPrologue transaction added. - let ledger_version = num_input_txns + 1; + let ledger_version = num_input_txns as Version + 1; // commit - let ledger_info = gen_ledger_info(ledger_version, retry_output.root_hash(), retry_block_id, 12345 /* timestamp */); + let ledger_info = gen_ledger_info( + ledger_version, + retry_output.root_hash(), + retry_block_id, + 12345 /* timestamp */ + ); executor.commit_blocks(vec![retry_block_id], ledger_info).unwrap(); // get txn_infos from db let db = executor.db.reader.clone(); prop_assert_eq!(db.expect_synced_version(), ledger_version); - let txn_list = db.get_transactions(1 /* start version */, ledger_version, ledger_version /* ledger version */, false /* fetch events */).unwrap(); - prop_assert_eq!(&block.inner_txns(), &txn_list.transactions[..num_input_txns as usize]); + let txn_list = db.get_transactions( + 1, /* start version */ + ledger_version, /* version */ + ledger_version, /* ledger version */ + false /* fetch events */ + ).unwrap(); + prop_assert_eq!(&block.inner_txns(), &txn_list.transactions[..num_input_txns]); let txn_infos = txn_list.proof.transaction_infos; let write_sets = db.get_write_set_iterator(1, ledger_version).unwrap().collect::>().unwrap(); let event_vecs = db.get_events_iterator(1, ledger_version).unwrap().collect::>().unwrap(); @@ -803,8 +822,20 @@ fn test_reconfiguration_with_retry_transaction_status( // replay txns in one batch across epoch boundary, // and the replayer should deal with `Retry`s automatically let replayer = chunk_executor_tests::TestExecutor::new(); - replayer.executor.replay(txn_list.transactions, txn_infos, write_sets, event_vecs, &VerifyExecutionMode::verify_all()).unwrap(); + let chunks_enqueued = replayer.executor.enqueue_chunks( + txn_list.transactions, + txn_infos, + write_sets, + event_vecs, + &VerifyExecutionMode::verify_all() + ).unwrap(); + assert_eq!(chunks_enqueued, 2); + replayer.executor.update_ledger().unwrap(); + replayer.executor.update_ledger().unwrap(); + + replayer.executor.commit().unwrap(); replayer.executor.commit().unwrap(); + prop_assert!(replayer.executor.is_empty()); let replayed_db = replayer.db.reader.clone(); prop_assert_eq!( replayed_db.get_accumulator_root_hash(ledger_version).unwrap(), diff --git a/storage/backup/backup-cli/src/backup_types/transaction/restore.rs b/storage/backup/backup-cli/src/backup_types/transaction/restore.rs index c1eadb5366ff4..22a7ab7ff8b8b 100644 --- a/storage/backup/backup-cli/src/backup_types/transaction/restore.rs +++ b/storage/backup/backup-cli/src/backup_types/transaction/restore.rs @@ -27,8 +27,9 @@ use crate::{ use anyhow::{anyhow, ensure, Result}; use aptos_db::backup::restore_handler::RestoreHandler; use aptos_executor::chunk_executor::ChunkExecutor; -use aptos_executor_types::{TransactionReplayer, VerifyExecutionMode}; +use aptos_executor_types::{ChunkExecutorTrait, TransactionReplayer, VerifyExecutionMode}; use aptos_logger::prelude::*; +use aptos_metrics_core::TimerHelper; use aptos_storage_interface::DbReaderWriter; use aptos_types::{ contract_event::ContractEvent, @@ -592,7 +593,7 @@ impl TransactionRestoreBatchController { let replay_start = Instant::now(); let db = DbReaderWriter::from_arc(Arc::clone(&restore_handler.aptosdb)); let chunk_replayer = Arc::new(ChunkExecutor::::new(db)); - let db_commit_stream = txns_to_execute_stream + let ledger_update_stream = txns_to_execute_stream .try_chunks(BATCH_SIZE) .err_into::() .map_ok(|chunk| { @@ -602,11 +603,10 @@ impl TransactionRestoreBatchController { let verify_execution_mode = self.verify_execution_mode.clone(); async move { - let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["replay_txn_chunk"]) - .start_timer(); + let _timer = OTHER_TIMERS_SECONDS.timer_with(&["enqueue_chunks"]); + tokio::task::spawn_blocking(move || { - chunk_replayer.replay( + chunk_replayer.enqueue_chunks( txns, txn_infos, write_sets, @@ -614,22 +614,38 @@ impl TransactionRestoreBatchController { &verify_execution_mode, ) }) - .err_into::() .await + .expect("spawn_blocking failed") } }) - .try_buffered_x(self.global_opt.concurrent_downloads, 1) - .and_then(future::ready); + .try_buffered_x(3, 1) + .map_ok(|chunks_enqueued| { + futures::stream::repeat_with(|| Result::Ok(())).take(chunks_enqueued) + }) + .try_flatten(); + + let db_commit_stream = ledger_update_stream + .map_ok(|()| { + let chunk_replayer = chunk_replayer.clone(); + async move { + let _timer = OTHER_TIMERS_SECONDS.timer_with(&["ledger_update"]); + + tokio::task::spawn_blocking(move || chunk_replayer.update_ledger()) + .await + .expect("spawn_blocking failed") + } + }) + .try_buffered_x(3, 1); let total_replayed = db_commit_stream .and_then(|()| { let chunk_replayer = chunk_replayer.clone(); async move { - let _timer = OTHER_TIMERS_SECONDS - .with_label_values(&["commit_txn_chunk"]) - .start_timer(); + let _timer = OTHER_TIMERS_SECONDS.timer_with(&["commit"]); + tokio::task::spawn_blocking(move || { let v = chunk_replayer.commit()?; + let total_replayed = v - first_version + 1; TRANSACTION_REPLAY_VERSION.set(v as i64); info!( @@ -639,13 +655,17 @@ impl TransactionRestoreBatchController { as u64, "Transactions replayed." ); - Ok(v) + Ok(total_replayed) }) - .await? + .await + .expect("spawn_blocking failed") } }) - .try_fold(0, |_total, total| future::ok(total)) + .try_fold(0, |_prev_total, total| future::ok(total)) .await?; + // assert all chunks are fully processed and in DB. + assert!(chunk_replayer.is_empty()); + info!( total_replayed = total_replayed, accumulative_tps = From 813e7d0645274de671627f3b92c5620f9981a3c8 Mon Sep 17 00:00:00 2001 From: Bo Wu Date: Wed, 9 Oct 2024 21:06:07 -0700 Subject: [PATCH 11/22] Fix flaky test --- storage/aptosdb/src/state_store/state_store_test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/aptosdb/src/state_store/state_store_test.rs b/storage/aptosdb/src/state_store/state_store_test.rs index 736e788d443f4..97f5727a8b431 100644 --- a/storage/aptosdb/src/state_store/state_store_test.rs +++ b/storage/aptosdb/src/state_store/state_store_test.rs @@ -470,7 +470,7 @@ proptest! { (input, batch1_size) in hash_map(any::(), any::(), 2..1000) .prop_flat_map(|input| { let len = input.len(); - (Just(input), 1..len) + (Just(input), 2..len) }) ) { let tmp_dir1 = TempPath::new(); From a822e6518b85881d337967342659d663071dea25 Mon Sep 17 00:00:00 2001 From: Rati Gelashvili Date: Fri, 11 Oct 2024 13:33:49 -0400 Subject: [PATCH 12/22] ok_or and map_or audit (#14764) --- api/src/context.rs | 4 +-- .../verifier/transaction_arg_validation.rs | 14 +++++++---- aptos-move/block-executor/src/executor.rs | 8 +++--- .../src/txn_last_input_output.rs | 22 ++++++++-------- .../natives/aggregator_natives/helpers_v1.rs | 20 +++++++-------- config/src/config/node_config_loader.rs | 6 ++--- config/src/config/secure_backend_config.rs | 2 +- consensus/consensus-types/src/sync_info.rs | 2 +- consensus/src/dag/adapter.rs | 12 +++++---- consensus/src/liveness/leader_reputation.rs | 12 +++++---- consensus/src/liveness/round_state.rs | 2 +- consensus/src/persistent_liveness_storage.rs | 4 +-- .../aptos-dkg/src/weighted_vuf/pinkas/mod.rs | 4 +-- crates/aptos/src/account/multisig_account.rs | 6 ++--- crates/aptos/src/common/init.rs | 6 ++--- crates/aptos/src/common/types.rs | 4 +-- .../aptos/src/governance/delegation_pool.rs | 21 ++++++++-------- crates/aptos/src/move_tool/mod.rs | 2 +- .../src/input_processing/witness_gen.rs | 4 ++- mempool/src/shared_mempool/network.rs | 4 +-- secure/storage/src/on_disk.rs | 2 +- .../data-streaming-service/src/data_stream.rs | 9 ++++--- storage/aptosdb/src/db/fake_aptosdb.rs | 2 +- .../src/db/include/aptosdb_internal.rs | 15 ++++++----- storage/aptosdb/src/ledger_db/event_db.rs | 6 ++--- .../src/ledger_db/ledger_metadata_db.rs | 25 ++++++++----------- storage/aptosdb/src/ledger_db/write_set_db.rs | 5 +--- .../backup/backup-cli/src/metadata/view.rs | 8 +++--- .../move/move-prover/lab/src/benchmark.rs | 2 +- third_party/move/move-prover/lab/src/plot.rs | 2 +- .../src/compilation/compiled_package.rs | 2 +- types/src/block_info.rs | 4 +-- 32 files changed, 123 insertions(+), 118 deletions(-) diff --git a/api/src/context.rs b/api/src/context.rs index 2de3374a5c00a..1a77f5d5ee1f4 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -858,7 +858,7 @@ impl Context { } else { self.indexer_reader .as_ref() - .ok_or(anyhow!("Indexer reader is None")) + .ok_or_else(|| anyhow!("Indexer reader is None")) .map_err(|err| { E::internal_with_code(err, AptosErrorCode::InternalError, ledger_info) })? @@ -957,7 +957,7 @@ impl Context { } else { self.indexer_reader .as_ref() - .ok_or(anyhow!("Internal indexer reader doesn't exist"))? + .ok_or_else(|| anyhow!("Internal indexer reader doesn't exist"))? .get_events(event_key, start, order, limit as u64, ledger_version)? }; if order == Order::Descending { diff --git a/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs b/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs index b4be0a60880de..2ee495c5b10a6 100644 --- a/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs +++ b/aptos-move/aptos-vm/src/verifier/transaction_arg_validation.rs @@ -455,11 +455,15 @@ fn validate_and_construct( )?; let mut ret_vals = serialized_result.return_values; // We know ret_vals.len() == 1 - let deserialize_error = VMStatus::error( - StatusCode::INTERNAL_TYPE_ERROR, - Some(String::from("Constructor did not return value")), - ); - Ok(ret_vals.pop().ok_or(deserialize_error)?.0) + Ok(ret_vals + .pop() + .ok_or_else(|| { + VMStatus::error( + StatusCode::INTERNAL_TYPE_ERROR, + Some(String::from("Constructor did not return value")), + ) + })? + .0) } // String is a vector of bytes, so both string and vector carry a length in the serialized format. diff --git a/aptos-move/block-executor/src/executor.rs b/aptos-move/block-executor/src/executor.rs index 90be88920d6a1..43616a636ab4c 100644 --- a/aptos-move/block-executor/src/executor.rs +++ b/aptos-move/block-executor/src/executor.rs @@ -120,12 +120,12 @@ where let execute_result = executor.execute_transaction(&sync_view, txn, idx_to_execute); let mut prev_modified_keys = last_input_output - .modified_keys::(idx_to_execute) - .map_or(HashMap::new(), |keys| keys.collect()); + .modified_keys(idx_to_execute, true) + .map_or_else(HashMap::new, |keys| keys.collect()); let mut prev_modified_delayed_fields = last_input_output .delayed_field_keys(idx_to_execute) - .map_or(HashSet::new(), |keys| keys.collect()); + .map_or_else(HashSet::new, |keys| keys.collect()); let mut read_set = sync_view.take_parallel_reads(); if read_set.is_incorrect_use() { @@ -398,7 +398,7 @@ where clear_speculative_txn_logs(txn_idx as usize); // Not valid and successfully aborted, mark the latest write/delta sets as estimates. - if let Some(keys) = last_input_output.modified_keys::(txn_idx) { + if let Some(keys) = last_input_output.modified_keys(txn_idx, false) { for (k, kind) in keys { use KeyKind::*; match kind { diff --git a/aptos-move/block-executor/src/txn_last_input_output.rs b/aptos-move/block-executor/src/txn_last_input_output.rs index 95ffa57680608..bc6b203f28b25 100644 --- a/aptos-move/block-executor/src/txn_last_input_output.rs +++ b/aptos-move/block-executor/src/txn_last_input_output.rs @@ -35,7 +35,7 @@ macro_rules! forward_on_success_or_skip_rest { $self.outputs[$txn_idx as usize] .load() .as_ref() - .map_or(vec![], |txn_output| match txn_output.as_ref() { + .map_or_else(Vec::new, |txn_output| match txn_output.as_ref() { ExecutionStatus::Success(t) | ExecutionStatus::SkipRest(t) => t.$f(), ExecutionStatus::Abort(_) | ExecutionStatus::SpeculativeExecutionAbortError(_) @@ -273,15 +273,15 @@ impl, E: Debug + Send + Clone> } // Extracts a set of paths (keys) written or updated during execution from transaction - // output, .1 for each item is false for non-module paths and true for module paths. - // If TAKE_GROUP_TAGS is set, the final HashSet of tags is moved for the group key - - // should be called once for each incarnation / record due to 'take'. if TAKE_GROUP_TAGS - // is false, stored modified group resource tags in the group are cloned out. - pub(crate) fn modified_keys( + // output, with corresponding KeyKind. If take_group_tags is true, the final HashSet + // of tags is moved for the group key - should be called once for each incarnation / record + // due to 'take'. if false, stored modified group resource tags in the group are cloned out. + pub(crate) fn modified_keys( &self, txn_idx: TxnIndex, + take_group_tags: bool, ) -> Option)>> { - let group_keys_and_tags: Vec<(T::Key, HashSet)> = if TAKE_GROUP_TAGS { + let group_keys_and_tags: Vec<(T::Key, HashSet)> = if take_group_tags { std::mem::take(&mut self.resource_group_keys_and_tags[txn_idx as usize].acquire()) } else { self.resource_group_keys_and_tags[txn_idx as usize] @@ -367,9 +367,9 @@ impl, E: Debug + Send + Clone> &self, txn_idx: TxnIndex, ) -> Box)>> { - self.outputs[txn_idx as usize].load().as_ref().map_or( - Box::new(empty::<(T::Event, Option)>()), - |txn_output| match txn_output.as_ref() { + match self.outputs[txn_idx as usize].load().as_ref() { + None => Box::new(empty::<(T::Event, Option)>()), + Some(txn_output) => match txn_output.as_ref() { ExecutionStatus::Success(t) | ExecutionStatus::SkipRest(t) => { let events = t.get_events(); Box::new(events.into_iter()) @@ -380,7 +380,7 @@ impl, E: Debug + Send + Clone> Box::new(empty::<(T::Event, Option)>()) }, }, - ) + } } pub(crate) fn take_resource_write_set( diff --git a/aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs b/aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs index 99d1211a5ea10..a633a3df3d9c8 100644 --- a/aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs +++ b/aptos-move/framework/src/natives/aggregator_natives/helpers_v1.rs @@ -56,19 +56,17 @@ pub(crate) fn unpack_aggregator_struct( let pop_with_err = |vec: &mut Vec, msg: &str| { vec.pop() - .map_or(Err(extension_error(msg)), |v| v.value_as::()) + .map_or_else(|| Err(extension_error(msg)), |v| v.value_as::()) }; let limit = pop_with_err(&mut fields, "unable to pop 'limit' field")?; - let key = fields - .pop() - .map_or(Err(extension_error("unable to pop `handle` field")), |v| { - v.value_as::() - })?; - let handle = fields - .pop() - .map_or(Err(extension_error("unable to pop `handle` field")), |v| { - v.value_as::() - })?; + let key = fields.pop().map_or_else( + || Err(extension_error("unable to pop `handle` field")), + |v| v.value_as::(), + )?; + let handle = fields.pop().map_or_else( + || Err(extension_error("unable to pop `handle` field")), + |v| v.value_as::(), + )?; Ok((TableHandle(handle), key, limit)) } diff --git a/config/src/config/node_config_loader.rs b/config/src/config/node_config_loader.rs index b5235620ddf58..62a694f9bfacd 100644 --- a/config/src/config/node_config_loader.rs +++ b/config/src/config/node_config_loader.rs @@ -159,9 +159,9 @@ fn get_chain_id(node_config: &NodeConfig) -> Result { // TODO: can we make this less hacky? // Load the genesis transaction from disk - let genesis_txn = get_genesis_txn(node_config).ok_or(Error::InvariantViolation( - "The genesis transaction was not found!".to_string(), - ))?; + let genesis_txn = get_genesis_txn(node_config).ok_or_else(|| { + Error::InvariantViolation("The genesis transaction was not found!".to_string()) + })?; // Extract the chain ID from the genesis transaction match genesis_txn { diff --git a/config/src/config/secure_backend_config.rs b/config/src/config/secure_backend_config.rs index 62bf3b0c041e6..b0db79659caa5 100644 --- a/config/src/config/secure_backend_config.rs +++ b/config/src/config/secure_backend_config.rs @@ -79,7 +79,7 @@ impl VaultConfig { let path = self .ca_certificate .as_ref() - .ok_or(Error::Missing("ca_certificate"))?; + .ok_or_else(|| Error::Missing("ca_certificate"))?; read_file(path) } } diff --git a/consensus/consensus-types/src/sync_info.rs b/consensus/consensus-types/src/sync_info.rs index 7507dc2be81c3..0c8171016e610 100644 --- a/consensus/consensus-types/src/sync_info.rs +++ b/consensus/consensus-types/src/sync_info.rs @@ -42,7 +42,7 @@ impl Display for SyncInfo { self.highest_timeout_round(), self.highest_commit_round(), self.highest_quorum_cert, - self.highest_ordered_cert.as_ref().map_or("None".to_string(), |cert| cert.to_string()), + self.highest_ordered_cert.as_ref().map_or_else(|| "None".to_string(), |cert| cert.to_string()), self.highest_commit_cert, ) } diff --git a/consensus/src/dag/adapter.rs b/consensus/src/dag/adapter.rs index 36bd615345b04..36941b875cbdb 100644 --- a/consensus/src/dag/adapter.rs +++ b/consensus/src/dag/adapter.rs @@ -297,11 +297,13 @@ impl StorageAdapter { usize::try_from(*index) .map_err(|_err| anyhow!("index {} out of bounds", index)) .and_then(|index| { - validators.get(index).cloned().ok_or(anyhow!( - "index {} is larger than number of validators {}", - index, - validators.len() - )) + validators.get(index).cloned().ok_or_else(|| { + anyhow!( + "index {} is larger than number of validators {}", + index, + validators.len() + ) + }) }) }) .collect() diff --git a/consensus/src/liveness/leader_reputation.rs b/consensus/src/liveness/leader_reputation.rs index dfeabed8181bd..ccd82776d98da 100644 --- a/consensus/src/liveness/leader_reputation.rs +++ b/consensus/src/liveness/leader_reputation.rs @@ -283,11 +283,13 @@ impl NewBlockEventAggregation { usize::try_from(*index) .map_err(|_err| format!("index {} out of bounds", index)) .and_then(|index| { - validators.get(index).ok_or(format!( - "index {} is larger than number of validators {}", - index, - validators.len() - )) + validators.get(index).ok_or_else(|| { + format!( + "index {} is larger than number of validators {}", + index, + validators.len() + ) + }) }) }) .collect() diff --git a/consensus/src/liveness/round_state.rs b/consensus/src/liveness/round_state.rs index 2c7ea4c198da6..bef7b31b44fe3 100644 --- a/consensus/src/liveness/round_state.rs +++ b/consensus/src/liveness/round_state.rs @@ -376,7 +376,7 @@ impl RoundState { round = self.current_round, "{:?} passed since the previous deadline.", now.checked_sub(self.current_round_deadline) - .map_or("0 ms".to_string(), |v| format!("{:?}", v)) + .map_or_else(|| "0 ms".to_string(), |v| format!("{:?}", v)) ); debug!( round = self.current_round, diff --git a/consensus/src/persistent_liveness_storage.rs b/consensus/src/persistent_liveness_storage.rs index 7e69b3304f653..0b09759e2fa06 100644 --- a/consensus/src/persistent_liveness_storage.rs +++ b/consensus/src/persistent_liveness_storage.rs @@ -436,8 +436,8 @@ impl PersistentLivenessStorage for StorageWriteProxy { } info!( "Starting up the consensus state machine with recovery data - [last_vote {}], [highest timeout certificate: {}]", - initial_data.last_vote.as_ref().map_or("None".to_string(), |v| v.to_string()), - initial_data.highest_2chain_timeout_certificate().as_ref().map_or("None".to_string(), |v| v.to_string()), + initial_data.last_vote.as_ref().map_or_else(|| "None".to_string(), |v| v.to_string()), + initial_data.highest_2chain_timeout_certificate().as_ref().map_or_else(|| "None".to_string(), |v| v.to_string()), ); LivenessStorageData::FullRecoveryData(initial_data) diff --git a/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs b/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs index dd3795830f9ea..254ff831a3d26 100644 --- a/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs +++ b/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs @@ -246,7 +246,7 @@ impl WeightedVUF for PinkasWUF { pis.push( apks[player.id] .as_ref() - .ok_or(anyhow!("Missing APK for player {}", player.get_id()))? + .ok_or_else(|| anyhow!("Missing APK for player {}", player.get_id()))? .0 .pi, ); @@ -299,7 +299,7 @@ impl PinkasWUF { let apk = apks[player.id] .as_ref() - .ok_or(anyhow!("Missing APK for player {}", player.get_id()))?; + .ok_or_else(|| anyhow!("Missing APK for player {}", player.get_id()))?; rks.push(&apk.0.rks); shares.push(share); diff --git a/crates/aptos/src/account/multisig_account.rs b/crates/aptos/src/account/multisig_account.rs index a65a351cec56f..637a613aa0a33 100644 --- a/crates/aptos/src/account/multisig_account.rs +++ b/crates/aptos/src/account/multisig_account.rs @@ -200,11 +200,11 @@ impl CliCommand for VerifyProposal { .to_hex_literal() // If full payload not provided, get payload hash directly from transaction proposal: } else { - view_json_option_str(&multisig_transaction["payload_hash"])?.ok_or( + view_json_option_str(&multisig_transaction["payload_hash"])?.ok_or_else(|| { CliError::UnexpectedError( "Neither payload nor payload hash provided on-chain".to_string(), - ), - )? + ) + })? }; // Get verification result based on if expected and actual payload hashes match. if expected_payload_hash.eq(&actual_payload_hash) { diff --git a/crates/aptos/src/common/init.rs b/crates/aptos/src/common/init.rs index 46a5a7da8f6ad..db590ff97faea 100644 --- a/crates/aptos/src/common/init.rs +++ b/crates/aptos/src/common/init.rs @@ -231,9 +231,9 @@ impl CliCommand<()> for InitTool { let public_key = if self.is_hardware_wallet() { let pub_key = match aptos_ledger::get_public_key( derivation_path - .ok_or(CliError::UnexpectedError( - "Invalid derivation path".to_string(), - ))? + .ok_or_else(|| { + CliError::UnexpectedError("Invalid derivation path".to_string()) + })? .as_str(), false, ) { diff --git a/crates/aptos/src/common/types.rs b/crates/aptos/src/common/types.rs index 779e1c26b163d..0b5fc13047670 100644 --- a/crates/aptos/src/common/types.rs +++ b/crates/aptos/src/common/types.rs @@ -2218,9 +2218,7 @@ impl TryInto for &EntryFunctionArguments { fn try_into(self) -> Result { self.function_id .clone() - .ok_or(CliError::CommandArgumentError( - "No function ID provided".to_string(), - )) + .ok_or_else(|| CliError::CommandArgumentError("No function ID provided".to_string())) } } diff --git a/crates/aptos/src/governance/delegation_pool.rs b/crates/aptos/src/governance/delegation_pool.rs index 5d836241af694..b6824e179e84f 100644 --- a/crates/aptos/src/governance/delegation_pool.rs +++ b/crates/aptos/src/governance/delegation_pool.rs @@ -222,13 +222,13 @@ async fn is_partial_governance_voting_enabled_for_delegation_pool( None, ) .await?; - response.inner()[0] - .as_bool() - .ok_or(CliError::UnexpectedError( + response.inner()[0].as_bool().ok_or_else(|| { + CliError::UnexpectedError( "Unexpected response from node when checking if partial governance_voting is \ enabled for delegation pool" .to_string(), - )) + ) + }) } async fn get_remaining_voting_power( @@ -255,14 +255,13 @@ async fn get_remaining_voting_power( None, ) .await?; - let remaining_voting_power_str = - response.inner()[0] - .as_str() - .ok_or(CliError::UnexpectedError(format!( - "Unexpected response from node when getting remaining voting power of {}\ + let remaining_voting_power_str = response.inner()[0].as_str().ok_or_else(|| { + CliError::UnexpectedError(format!( + "Unexpected response from node when getting remaining voting power of {}\ in delegation pool {}", - pool_address, voter_address - )))?; + pool_address, voter_address + )) + })?; remaining_voting_power_str.parse().map_err(|err| { CliError::UnexpectedError(format!( "Unexpected response from node when getting remaining voting power of {}\ diff --git a/crates/aptos/src/move_tool/mod.rs b/crates/aptos/src/move_tool/mod.rs index 72b8c243b891d..4b15f51e6b63e 100644 --- a/crates/aptos/src/move_tool/mod.rs +++ b/crates/aptos/src/move_tool/mod.rs @@ -1569,7 +1569,7 @@ async fn submit_chunked_publish_transactions( match result { Ok(tx_summary) => { let tx_hash = tx_summary.transaction_hash.to_string(); - let status = tx_summary.success.map_or("".to_string(), |success| { + let status = tx_summary.success.map_or_else(String::new, |success| { if success { "Success".to_string() } else { diff --git a/keyless/common/src/input_processing/witness_gen.rs b/keyless/common/src/input_processing/witness_gen.rs index baaf2e783e6ff..6df60c3822113 100644 --- a/keyless/common/src/input_processing/witness_gen.rs +++ b/keyless/common/src/input_processing/witness_gen.rs @@ -12,7 +12,9 @@ pub trait PathStr { impl PathStr for NamedTempFile { fn path_str(&self) -> Result<&str> { - self.path().to_str().ok_or(anyhow!("tempfile path error")) + self.path() + .to_str() + .ok_or_else(|| anyhow!("tempfile path error")) } } diff --git a/mempool/src/shared_mempool/network.rs b/mempool/src/shared_mempool/network.rs index 36a672721093d..3f5bee5f9b520 100644 --- a/mempool/src/shared_mempool/network.rs +++ b/mempool/src/shared_mempool/network.rs @@ -370,7 +370,7 @@ impl> MempoolNetworkInterf // If we don't have any info about the node, we shouldn't broadcast to it let state = sync_states .get_mut(&peer) - .ok_or(BroadcastError::PeerNotFound(peer))?; + .ok_or_else(|| BroadcastError::PeerNotFound(peer))?; // If backoff mode is on for this peer, only execute broadcasts that were scheduled as a backoff broadcast. // This is to ensure the backoff mode is actually honored (there is a chance a broadcast was scheduled @@ -607,7 +607,7 @@ impl> MempoolNetworkInterf let mut sync_states = self.sync_states.write(); let state = sync_states .get_mut(&peer) - .ok_or(BroadcastError::PeerNotFound(peer))?; + .ok_or_else(|| BroadcastError::PeerNotFound(peer))?; // Update peer sync state with info from above broadcast. state.update(&message_id); diff --git a/secure/storage/src/on_disk.rs b/secure/storage/src/on_disk.rs index a896d8b4ef39c..dfbb921a6f07f 100644 --- a/secure/storage/src/on_disk.rs +++ b/secure/storage/src/on_disk.rs @@ -42,7 +42,7 @@ impl OnDiskStorage { // working directory provided by PathBuf::new(). let file_dir = file_path .parent() - .map_or(PathBuf::new(), |p| p.to_path_buf()); + .map_or_else(PathBuf::new, |p| p.to_path_buf()); Self { file_path, diff --git a/state-sync/data-streaming-service/src/data_stream.rs b/state-sync/data-streaming-service/src/data_stream.rs index 95bf16a173d80..68e8fbe4abaa7 100644 --- a/state-sync/data-streaming-service/src/data_stream.rs +++ b/state-sync/data-streaming-service/src/data_stream.rs @@ -588,9 +588,12 @@ impl DataStream { .advertised_data .highest_synced_ledger_info() .map(|ledger_info| ledger_info.ledger_info().version()) - .ok_or(aptos_data_client::error::Error::UnexpectedErrorEncountered( - "The highest synced ledger info is missing from the global data summary!".into(), - ))?; + .ok_or_else(|| { + aptos_data_client::error::Error::UnexpectedErrorEncountered( + "The highest synced ledger info is missing from the global data summary!" + .into(), + ) + })?; // If the stream is not lagging behind, reset the lag and return if highest_response_version >= highest_advertised_version { diff --git a/storage/aptosdb/src/db/fake_aptosdb.rs b/storage/aptosdb/src/db/fake_aptosdb.rs index c15a1ea742dea..325c9fe4228c9 100644 --- a/storage/aptosdb/src/db/fake_aptosdb.rs +++ b/storage/aptosdb/src/db/fake_aptosdb.rs @@ -1117,7 +1117,7 @@ mod tests { let signed_transaction = transaction_with_proof .transaction .try_as_signed_user_txn() - .ok_or(anyhow!("not user transaction"))?; + .ok_or_else(|| anyhow!("not user transaction"))?; ensure!( transaction_with_proof.version == version, diff --git a/storage/aptosdb/src/db/include/aptosdb_internal.rs b/storage/aptosdb/src/db/include/aptosdb_internal.rs index 73cc62d2dafd0..d31b38778e1df 100644 --- a/storage/aptosdb/src/db/include/aptosdb_internal.rs +++ b/storage/aptosdb/src/db/include/aptosdb_internal.rs @@ -1,9 +1,9 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 +use crate::metrics::CONCURRENCY_GAUGE; use aptos_metrics_core::IntGaugeHelper; use aptos_storage_interface::block_info::BlockInfo; -use crate::metrics::CONCURRENCY_GAUGE; impl AptosDB { fn new_with_dbs( @@ -44,8 +44,11 @@ impl AptosDB { internal_indexer_db.clone(), )); - let ledger_pruner = - LedgerPrunerManager::new(Arc::clone(&ledger_db), pruner_config.ledger_pruner_config, internal_indexer_db); + let ledger_pruner = LedgerPrunerManager::new( + Arc::clone(&ledger_db), + pruner_config.ledger_pruner_config, + internal_indexer_db, + ); AptosDB { ledger_db: Arc::clone(&ledger_db), @@ -247,9 +250,9 @@ impl AptosDB { .ledger_db .metadata_db() .get_block_info(block_height)? - .ok_or(AptosDbError::NotFound(format!( - "BlockInfo not found at height {block_height}" - )))?) + .ok_or_else(|| { + AptosDbError::NotFound(format!("BlockInfo not found at height {block_height}")) + })?) } } diff --git a/storage/aptosdb/src/ledger_db/event_db.rs b/storage/aptosdb/src/ledger_db/event_db.rs index 235df9b29a1ca..f34586cfbb42d 100644 --- a/storage/aptosdb/src/ledger_db/event_db.rs +++ b/storage/aptosdb/src/ledger_db/event_db.rs @@ -105,9 +105,9 @@ impl EventDb { Ok(EventsByVersionIter::new( iter, start_version, - start_version.checked_add(num_versions as u64).ok_or({ - AptosDbError::TooManyRequested(num_versions as u64, Version::max_value()) - })?, + start_version.checked_add(num_versions as u64).ok_or( + AptosDbError::TooManyRequested(num_versions as u64, Version::max_value()), + )?, )) } diff --git a/storage/aptosdb/src/ledger_db/ledger_metadata_db.rs b/storage/aptosdb/src/ledger_db/ledger_metadata_db.rs index d8661d7a4c46a..40ace0969239d 100644 --- a/storage/aptosdb/src/ledger_db/ledger_metadata_db.rs +++ b/storage/aptosdb/src/ledger_db/ledger_metadata_db.rs @@ -107,15 +107,13 @@ impl LedgerMetadataDb { } pub(crate) fn get_ledger_commit_progress(&self) -> Result { - get_progress(&self.db, &DbMetadataKey::LedgerCommitProgress)?.ok_or(AptosDbError::NotFound( - "No LedgerCommitProgress in db.".to_string(), - )) + get_progress(&self.db, &DbMetadataKey::LedgerCommitProgress)? + .ok_or_else(|| AptosDbError::NotFound("No LedgerCommitProgress in db.".to_string())) } pub(crate) fn get_pruner_progress(&self) -> Result { - get_progress(&self.db, &DbMetadataKey::LedgerPrunerProgress)?.ok_or(AptosDbError::NotFound( - "No LedgerPrunerProgress in db.".to_string(), - )) + get_progress(&self.db, &DbMetadataKey::LedgerPrunerProgress)? + .ok_or_else(|| AptosDbError::NotFound("No LedgerPrunerProgress in db.".to_string())) } } @@ -137,7 +135,7 @@ impl LedgerMetadataDb { /// Returns the latest ledger info, or NOT_FOUND if it doesn't exist. pub(crate) fn get_latest_ledger_info(&self) -> Result { self.get_latest_ledger_info_option() - .ok_or(AptosDbError::NotFound(String::from("Genesis LedgerInfo"))) + .ok_or_else(|| AptosDbError::NotFound(String::from("Genesis LedgerInfo"))) } /// Returns the latest ledger info for a given epoch. @@ -147,9 +145,7 @@ impl LedgerMetadataDb { ) -> Result { self.db .get::(&epoch)? - .ok_or(AptosDbError::NotFound(format!( - "Last LedgerInfo of epoch {epoch}" - ))) + .ok_or_else(|| AptosDbError::NotFound(format!("Last LedgerInfo of epoch {epoch}"))) } /// Returns an iterator that yields epoch ending ledger infos, starting from `start_epoch`, and @@ -304,9 +300,10 @@ impl LedgerMetadataDb { let mut iter = self.db.iter::()?; iter.seek_for_prev(&version)?; - let (_, block_height) = iter.next().transpose()?.ok_or(anyhow!( - "Block is not found at version {version}, maybe pruned?" - ))?; + let (_, block_height) = iter + .next() + .transpose()? + .ok_or_else(|| anyhow!("Block is not found at version {version}, maybe pruned?"))?; Ok(block_height) } @@ -320,7 +317,7 @@ impl LedgerMetadataDb { let (block_version, block_height) = iter .next() .transpose()? - .ok_or(anyhow!("Block is not found at or after version {version}"))?; + .ok_or_else(|| anyhow!("Block is not found at or after version {version}"))?; Ok((block_version, block_height)) } diff --git a/storage/aptosdb/src/ledger_db/write_set_db.rs b/storage/aptosdb/src/ledger_db/write_set_db.rs index 92dbc1e654178..9320e6fb5b19a 100644 --- a/storage/aptosdb/src/ledger_db/write_set_db.rs +++ b/storage/aptosdb/src/ledger_db/write_set_db.rs @@ -54,10 +54,7 @@ impl WriteSetDb { pub(crate) fn get_write_set(&self, version: Version) -> Result { self.db .get::(&version)? - .ok_or(AptosDbError::NotFound(format!( - "WriteSet at version {}", - version - ))) + .ok_or_else(|| AptosDbError::NotFound(format!("WriteSet at version {}", version))) } /// Returns an iterator that yields `num_transactions` write sets starting from `start_version`. diff --git a/storage/backup/backup-cli/src/metadata/view.rs b/storage/backup/backup-cli/src/metadata/view.rs index 3b857651208de..70439943c726e 100644 --- a/storage/backup/backup-cli/src/metadata/view.rs +++ b/storage/backup/backup-cli/src/metadata/view.rs @@ -259,10 +259,10 @@ impl fmt::Display for BackupStorageState { write!( f, "latest_epoch_ending_epoch: {}, latest_state_snapshot_epoch: {}, latest_state_snapshot_version: {}, latest_transaction_version: {}", - self.latest_epoch_ending_epoch.as_ref().map_or("none".to_string(), u64::to_string), - self.latest_state_snapshot_epoch.as_ref().map_or("none".to_string(), u64::to_string), - self.latest_state_snapshot_version.as_ref().map_or("none".to_string(), Version::to_string), - self.latest_transaction_version.as_ref().map_or("none".to_string(), Version::to_string), + self.latest_epoch_ending_epoch.as_ref().map_or_else(|| "none".to_string(), u64::to_string), + self.latest_state_snapshot_epoch.as_ref().map_or_else(|| "none".to_string(), u64::to_string), + self.latest_state_snapshot_version.as_ref().map_or_else(|| "none".to_string(), Version::to_string), + self.latest_transaction_version.as_ref().map_or_else(|| "none".to_string(), Version::to_string), ) } } diff --git a/third_party/move/move-prover/lab/src/benchmark.rs b/third_party/move/move-prover/lab/src/benchmark.rs index 957352c6a537b..6e4ba638f8bca 100644 --- a/third_party/move/move-prover/lab/src/benchmark.rs +++ b/third_party/move/move-prover/lab/src/benchmark.rs @@ -98,7 +98,7 @@ pub fn benchmark(args: &[String]) { let matches = cmd_line_parser.get_matches_from(args); let get_vec = |s: &str| -> Vec { let vs = matches.get_many::(s); - vs.map_or(vec![], |v| v.cloned().collect()) + vs.map_or_else(Vec::new, |v| v.cloned().collect()) }; let sources = get_vec("sources"); let deps = get_vec("dependencies"); diff --git a/third_party/move/move-prover/lab/src/plot.rs b/third_party/move/move-prover/lab/src/plot.rs index 7464c7d3d9b74..32d71bb3374c4 100644 --- a/third_party/move/move-prover/lab/src/plot.rs +++ b/third_party/move/move-prover/lab/src/plot.rs @@ -70,7 +70,7 @@ pub fn plot_svg(args: &[String]) -> anyhow::Result<()> { }; let out_file = matches .get_one::("out") - .map_or("plot.svg".to_owned(), |s| s.clone()); + .map_or_else(|| "plot.svg".to_owned(), |s| s.clone()); let sort = matches.contains_id("sort"); let top = matches.get_one::("top"); let data_files = get_vec("data-files"); diff --git a/third_party/move/tools/move-package/src/compilation/compiled_package.rs b/third_party/move/tools/move-package/src/compilation/compiled_package.rs index deb21b8fb93ac..95b160e50a365 100644 --- a/third_party/move/tools/move-package/src/compilation/compiled_package.rs +++ b/third_party/move/tools/move-package/src/compilation/compiled_package.rs @@ -712,7 +712,7 @@ impl CompiledPackage { for annot_unit in all_compiled_units { let source_path_str = file_map .get(&annot_unit.loc().file_hash()) - .ok_or(anyhow::anyhow!("invalid transaction script bytecode"))? + .ok_or_else(|| anyhow::anyhow!("invalid transaction script bytecode"))? .0 .as_str(); let source_path = PathBuf::from(source_path_str); diff --git a/types/src/block_info.rs b/types/src/block_info.rs index 62e6e8d51254d..1bf7b2bd33b2f 100644 --- a/types/src/block_info.rs +++ b/types/src/block_info.rs @@ -143,7 +143,7 @@ impl BlockInfo { /// The epoch after this block committed pub fn next_block_epoch(&self) -> u64 { - self.next_epoch_state().map_or(self.epoch(), |e| e.epoch) + self.next_epoch_state().map_or(self.epoch, |e| e.epoch) } pub fn change_timestamp(&mut self, timestamp: u64) { @@ -231,7 +231,7 @@ impl Display for BlockInfo { self.executed_state_id(), self.version(), self.timestamp_usecs(), - self.next_epoch_state.as_ref().map_or("None".to_string(), |epoch_state| format!("{}", epoch_state)), + self.next_epoch_state.as_ref().map_or_else(|| "None".to_string(), |epoch_state| format!("{}", epoch_state)), ) } } From 55e368cbf9314abd824a4c22b6cbbd508a7472bd Mon Sep 17 00:00:00 2001 From: Wolfgang Grieskamp Date: Fri, 11 Oct 2024 11:40:26 -0700 Subject: [PATCH 13/22] [compiler-v2] Add loop labels to the language (#14868) * [compiler-v2] Add loop labels to the language Besides the user being able to describe more complex algorithms more efficiently, loop labels are required to express any reducible control flow in the AST language, and create parity of the AST with the bytecode level for this kind of code (which is also what can be generated from Move). * Apply suggestions from code review * Addressing reviewer comments. --- .../src/bytecode_generator.rs | 7 +- .../tests/bytecode-generator/loop_labels.exp | 78 +++++++++++++++++++ .../tests/bytecode-generator/loop_labels.move | 14 ++++ .../tests/checking-lang-v1/loop_labels.exp | 31 ++++++++ .../tests/checking-lang-v1/loop_labels.move | 14 ++++ .../control_flow/loop_labels_check_err.exp | 21 +++++ .../control_flow/loop_labels_check_err.move | 14 ++++ .../control_flow/loop_labels_check_ok.exp | 37 +++++++++ .../control_flow/loop_labels_check_ok.move | 14 ++++ .../control_flow/loop_labels_parse_err1.exp | 10 +++ .../control_flow/loop_labels_parse_err1.move | 5 ++ .../control_flow/loop_labels_parse_err2.exp | 10 +++ .../control_flow/loop_labels_parse_err2.move | 5 ++ .../control_flow/loop_labels_parse_err3.exp | 7 ++ .../control_flow/loop_labels_parse_err3.move | 9 +++ .../control_flow/loop_labels_parse_err4.exp | 7 ++ .../control_flow/loop_labels_parse_err4.move | 5 ++ .../move/move-compiler-v2/tests/testsuite.rs | 4 +- .../tests/no-v1-comparison/loop_labels.exp | 1 + .../tests/no-v1-comparison/loop_labels.move | 18 +++++ .../move/move-compiler/src/expansion/ast.rs | 35 ++++++--- .../src/expansion/dependency_ordering.rs | 8 +- .../move-compiler/src/expansion/translate.rs | 16 ++-- .../move-compiler/src/naming/translate.rs | 8 +- .../move/move-compiler/src/parser/ast.rs | 37 ++++++--- .../move/move-compiler/src/parser/lexer.rs | 15 ++++ .../move/move-compiler/src/parser/syntax.rs | 54 ++++++++++--- .../move-model/src/builder/exp_builder.rs | 70 +++++++++++++++-- third_party/move/move-model/src/lib.rs | 7 +- 29 files changed, 500 insertions(+), 61 deletions(-) create mode 100644 third_party/move/move-compiler-v2/tests/bytecode-generator/loop_labels.exp create mode 100644 third_party/move/move-compiler-v2/tests/bytecode-generator/loop_labels.move create mode 100644 third_party/move/move-compiler-v2/tests/checking-lang-v1/loop_labels.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking-lang-v1/loop_labels.move create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_err.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_err.move create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_ok.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_ok.move create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err1.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err1.move create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err2.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err2.move create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err3.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err3.move create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err4.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err4.move create mode 100644 third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/loop_labels.exp create mode 100644 third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/loop_labels.move diff --git a/third_party/move/move-compiler-v2/src/bytecode_generator.rs b/third_party/move/move-compiler-v2/src/bytecode_generator.rs index 2b024cb47991d..9cb45341d9743 100644 --- a/third_party/move/move-compiler-v2/src/bytecode_generator.rs +++ b/third_party/move/move-compiler-v2/src/bytecode_generator.rs @@ -450,11 +450,11 @@ impl<'env> Generator<'env> { self.emit_with(*id, |attr| Bytecode::Jump(attr, continue_label)); self.emit_with(*id, |attr| Bytecode::Label(attr, break_label)); }, - ExpData::LoopCont(id, 0, do_continue) => { + ExpData::LoopCont(id, nest, do_continue) => { if let Some(LoopContext { continue_label, break_label, - }) = self.loops.last() + }) = self.loops.iter().rev().nth(*nest) { let target = if *do_continue { *continue_label @@ -466,9 +466,6 @@ impl<'env> Generator<'env> { self.error(*id, "missing enclosing loop statement") } }, - ExpData::LoopCont(_, _, _) => { - unimplemented!("continue/break with nesting") - }, ExpData::SpecBlock(id, spec) => { // Map locals in spec to assigned temporaries. let mut replacer = |id, target| { diff --git a/third_party/move/move-compiler-v2/tests/bytecode-generator/loop_labels.exp b/third_party/move/move-compiler-v2/tests/bytecode-generator/loop_labels.exp new file mode 100644 index 0000000000000..247f1e144dfdb --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/bytecode-generator/loop_labels.exp @@ -0,0 +1,78 @@ +// -- Model dump before bytecode pipeline +module 0x815::test { + private fun f1() { + loop { + loop { + loop { + if true { + loop { + if false { + continue[3] + } else { + break[1] + }; + break + } + } else { + continue[2] + } + } + }; + break + } + } +} // end 0x815::test + +// -- Sourcified model before bytecode pipeline +module 0x815::test { + fun f1() { + 'l0: loop { + loop 'l1: loop if (true) loop { + if (false) continue 'l0 else break 'l1; + break + } else continue 'l0; + break + } + } +} + +============ initial bytecode ================ + +[variant baseline] +fun test::f1() { + var $t0: bool + var $t1: bool + 0: label L0 + 1: label L2 + 2: label L4 + 3: $t0 := true + 4: if ($t0) goto 5 else goto 19 + 5: label L6 + 6: label L9 + 7: $t1 := false + 8: if ($t1) goto 9 else goto 12 + 9: label L11 + 10: goto 0 + 11: goto 14 + 12: label L12 + 13: goto 23 + 14: label L13 + 15: goto 17 + 16: goto 6 + 17: label L10 + 18: goto 21 + 19: label L7 + 20: goto 0 + 21: label L8 + 22: goto 2 + 23: label L5 + 24: goto 1 + 25: label L3 + 26: goto 28 + 27: goto 0 + 28: label L1 + 29: return () +} + + +============ bytecode verification succeeded ======== diff --git a/third_party/move/move-compiler-v2/tests/bytecode-generator/loop_labels.move b/third_party/move/move-compiler-v2/tests/bytecode-generator/loop_labels.move new file mode 100644 index 0000000000000..e91763e82ae00 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/bytecode-generator/loop_labels.move @@ -0,0 +1,14 @@ +module 0x815::test { + fun f1() { + 'outer: loop { + // unlabeled loop, but counts in nesting in AST + loop { + 'inner: loop if (true) loop { + if (false) continue 'outer else break 'inner; + break + } else continue 'outer + }; + break + } + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking-lang-v1/loop_labels.exp b/third_party/move/move-compiler-v2/tests/checking-lang-v1/loop_labels.exp new file mode 100644 index 0000000000000..ca660b6e4e138 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking-lang-v1/loop_labels.exp @@ -0,0 +1,31 @@ + +Diagnostics: +error: unsupported language construct + ┌─ tests/checking-lang-v1/loop_labels.move:3:9 + │ +3 │ 'outer: loop { + │ ^^^^^^ loop labels are not enabled before version 2.1 + +error: unsupported language construct + ┌─ tests/checking-lang-v1/loop_labels.move:6:17 + │ +6 │ 'inner: loop if (true) loop { + │ ^^^^^^ loop labels are not enabled before version 2.1 + +error: unsupported language construct + ┌─ tests/checking-lang-v1/loop_labels.move:7:41 + │ +7 │ if (false) continue 'outer else break 'inner; + │ ^^^^^^ loop labels are not enabled before version 2.1 + +error: unsupported language construct + ┌─ tests/checking-lang-v1/loop_labels.move:7:59 + │ +7 │ if (false) continue 'outer else break 'inner; + │ ^^^^^^ loop labels are not enabled before version 2.1 + +error: unsupported language construct + ┌─ tests/checking-lang-v1/loop_labels.move:9:33 + │ +9 │ } else continue 'outer + │ ^^^^^^ loop labels are not enabled before version 2.1 diff --git a/third_party/move/move-compiler-v2/tests/checking-lang-v1/loop_labels.move b/third_party/move/move-compiler-v2/tests/checking-lang-v1/loop_labels.move new file mode 100644 index 0000000000000..e91763e82ae00 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking-lang-v1/loop_labels.move @@ -0,0 +1,14 @@ +module 0x815::test { + fun f1() { + 'outer: loop { + // unlabeled loop, but counts in nesting in AST + loop { + 'inner: loop if (true) loop { + if (false) continue 'outer else break 'inner; + break + } else continue 'outer + }; + break + } + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_err.exp b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_err.exp new file mode 100644 index 0000000000000..fd0b9ddf3ef67 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_err.exp @@ -0,0 +1,21 @@ + +Diagnostics: +error: label `'outer` undefined + ┌─ tests/checking/control_flow/loop_labels_check_err.move:3:15 + │ +3 │ break 'outer; + │ ^^^^^^ + +error: label `'inner` undefined + ┌─ tests/checking/control_flow/loop_labels_check_err.move:5:19 + │ +5 │ break 'inner + │ ^^^^^^ + +error: label `'l1` already used by outer loop + ┌─ tests/checking/control_flow/loop_labels_check_err.move:11:19 + │ +11 │ 'l1: loop 'l1: loop {}; + │ --- ^^^ + │ │ + │ outer definition of label diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_err.move b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_err.move new file mode 100644 index 0000000000000..6a1a1f616934e --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_err.move @@ -0,0 +1,14 @@ +module 0x815::test { + fun undefined_label() { + break 'outer; + 'outer: loop { + break 'inner + } + } + + fun duplicate_label() { + 'l1: loop {}; + 'l1: loop 'l1: loop {}; + 'l1: loop {} + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_ok.exp b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_ok.exp new file mode 100644 index 0000000000000..3bf1a2816123f --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_ok.exp @@ -0,0 +1,37 @@ +// -- Model dump before bytecode pipeline +module 0x815::test { + private fun f1() { + loop { + loop { + loop { + if true { + loop { + if false { + continue[3] + } else { + break[1] + }; + break + } + } else { + continue[2] + } + } + }; + break + } + } +} // end 0x815::test + +// -- Sourcified model before bytecode pipeline +module 0x815::test { + fun f1() { + 'l0: loop { + loop 'l1: loop if (true) loop { + if (false) continue 'l0 else break 'l1; + break + } else continue 'l0; + break + } + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_ok.move b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_ok.move new file mode 100644 index 0000000000000..e91763e82ae00 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_check_ok.move @@ -0,0 +1,14 @@ +module 0x815::test { + fun f1() { + 'outer: loop { + // unlabeled loop, but counts in nesting in AST + loop { + 'inner: loop if (true) loop { + if (false) continue 'outer else break 'inner; + break + } else continue 'outer + }; + break + } + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err1.exp b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err1.exp new file mode 100644 index 0000000000000..5da0a89107cbc --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err1.exp @@ -0,0 +1,10 @@ + +Diagnostics: +error: unexpected token + ┌─ tests/checking/control_flow/loop_labels_parse_err1.move:3:13 + │ +3 │ 'a: if (true) false else true + │ ^^ + │ │ + │ Unexpected 'if' + │ Expected one of: `while` or `loop` diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err1.move b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err1.move new file mode 100644 index 0000000000000..319268dd01537 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err1.move @@ -0,0 +1,5 @@ +module 0x815::test { + fun f1(): bool { + 'a: if (true) false else true + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err2.exp b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err2.exp new file mode 100644 index 0000000000000..f2275ffd0e61e --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err2.exp @@ -0,0 +1,10 @@ + +Diagnostics: +error: unexpected token + ┌─ tests/checking/control_flow/loop_labels_parse_err2.move:3:13 + │ +3 │ 'a: if (true) false else true + │ ^^ + │ │ + │ Unexpected 'if' + │ Expected one of: `while` or `loop` diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err2.move b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err2.move new file mode 100644 index 0000000000000..319268dd01537 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err2.move @@ -0,0 +1,5 @@ +module 0x815::test { + fun f1(): bool { + 'a: if (true) false else true + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err3.exp b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err3.exp new file mode 100644 index 0000000000000..59cf17aac2daa --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err3.exp @@ -0,0 +1,7 @@ + +Diagnostics: +error: invalid character + ┌─ tests/checking/control_flow/loop_labels_parse_err3.move:3:10 + │ +3 │ ': if (true) false else true + │ ^ Label quote must be followed by 'A-Z', `a-z', or '_' diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err3.move b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err3.move new file mode 100644 index 0000000000000..150322d37c6b9 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err3.move @@ -0,0 +1,9 @@ +module 0x815::test { + fun f1(): bool { + ': if (true) false else true + } + + fun f1(): bool { + '0x: if (true) false else true + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err4.exp b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err4.exp new file mode 100644 index 0000000000000..b8bfb04974e35 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err4.exp @@ -0,0 +1,7 @@ + +Diagnostics: +error: invalid character + ┌─ tests/checking/control_flow/loop_labels_parse_err4.move:3:10 + │ +3 │ '0x: if (true) false else true + │ ^ Label quote must be followed by 'A-Z', `a-z', or '_' diff --git a/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err4.move b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err4.move new file mode 100644 index 0000000000000..4ec39b2125b2d --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/control_flow/loop_labels_parse_err4.move @@ -0,0 +1,5 @@ +module 0x815::test { + fun f1(): bool { + '0x: if (true) false else true + } +} diff --git a/third_party/move/move-compiler-v2/tests/testsuite.rs b/third_party/move/move-compiler-v2/tests/testsuite.rs index 3bd4ea13694d8..132c5d11d30c1 100644 --- a/third_party/move/move-compiler-v2/tests/testsuite.rs +++ b/third_party/move/move-compiler-v2/tests/testsuite.rs @@ -106,7 +106,7 @@ const TEST_CONFIGS: Lazy> = Lazy::new(|| { // Turn optimization on by default. Some configs below may turn it off. .set_experiment(Experiment::OPTIMIZE, true) .set_experiment(Experiment::OPTIMIZE_WAITING_FOR_COMPARE_TESTS, true) - .set_language_version(LanguageVersion::V2_0); + .set_language_version(LanguageVersion::V2_1); opts.testing = true; let configs = vec![ // --- Tests for checking and ast processing @@ -718,7 +718,7 @@ const TEST_CONFIGS: Lazy> = Lazy::new(|| { include: vec!["/op-equal/"], exclude: vec![], exp_suffix: None, - options: opts.clone().set_language_version(LanguageVersion::V2_1), + options: opts.clone(), // Run the entire compiler pipeline to double-check the result stop_after: StopAfter::FileFormat, dump_ast: DumpLevel::EndStage, diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/loop_labels.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/loop_labels.exp new file mode 100644 index 0000000000000..6cd67db3f6472 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/loop_labels.exp @@ -0,0 +1 @@ +processed 1 task diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/loop_labels.move b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/loop_labels.move new file mode 100644 index 0000000000000..b1c0cf1f69c32 --- /dev/null +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/loop_labels.move @@ -0,0 +1,18 @@ +//# run +script { + fun main() { + let result = 0; + 'outer: while (result < 100) { + while (result < 50) { + 'inner: while (result < 30) { + result += 1; + continue 'outer + }; + result += 10; + continue 'outer + }; + result += 20 + }; + assert!(result == 110); + } +} diff --git a/third_party/move/move-compiler/src/expansion/ast.rs b/third_party/move/move-compiler/src/expansion/ast.rs index 880234d6fc715..47db86f2654ba 100644 --- a/third_party/move/move-compiler/src/expansion/ast.rs +++ b/third_party/move/move-compiler/src/expansion/ast.rs @@ -5,7 +5,7 @@ use crate::{ expansion::translate::is_valid_struct_constant_or_schema_name, parser::ast::{ - self as P, Ability, Ability_, BinOp, CallKind, ConstantName, Field, FunctionName, + self as P, Ability, Ability_, BinOp, CallKind, ConstantName, Field, FunctionName, Label, ModuleName, QuantKind, SpecApplyPattern, StructName, UnaryOp, UseDecl, Var, VariantName, ENTRY_MODIFIER, }, @@ -25,7 +25,6 @@ use std::{ fmt, hash::Hash, }; - //************************************************************************************************** // Program //************************************************************************************************** @@ -504,8 +503,8 @@ pub enum Exp_ { IfElse(Box, Box, Box), Match(Box, Vec, Exp)>>), - While(Box, Box), - Loop(Box), + While(Option

use 0x1::error;
 use 0x1::features;
+use 0x1::option;
 use 0x1::string;
 
@@ -221,7 +252,7 @@ and any calls will raise this error. -Arguments passed to concat exceed max limit of 256 bytes (for prefix and suffix together). +Arguments passed to concat exceed max limit of 1024 bytes (for prefix and suffix together).
const ECONCAT_STRING_LENGTH_TOO_LARGE: u64 = 8;
@@ -709,7 +740,7 @@ Useful for when object is sometimes created via string_concat(), and sometimes d
 Concatenates before, snapshot and after into a single string.
 snapshot passed needs to have integer type - currently supported types are u64 and u128.
 Raises EUNSUPPORTED_AGGREGATOR_SNAPSHOT_TYPE if called with another type.
-If length of prefix and suffix together exceed 256 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised.
+If length of prefix and suffix together exceeds 1024 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised.
 
 Parallelism info: This operation enables parallelism.
 
@@ -776,11 +807,523 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
 
 
 
+
+
+
+
+## Function `verify_aggregator_try_add_sub`
+
+
+
+
#[verify_only]
+fun verify_aggregator_try_add_sub(): aggregator_v2::Aggregator<u64>
+
+ + + +
+Implementation + + +
fun verify_aggregator_try_add_sub(): Aggregator<u64> {
+    let agg = create_aggregator(10);
+    spec {
+        assert spec_get_max_value(agg) == 10;
+        assert spec_get_value(agg) == 0;
+    };
+    let x = try_add(&mut agg, 5);
+    spec {
+        assert x;
+        assert is_at_least(agg, 5);
+    };
+    let y = try_sub(&mut agg, 6);
+    spec {
+        assert !y;
+        assert spec_get_value(agg) == 5;
+        assert spec_get_max_value(agg) == 10;
+    };
+    let y = try_sub(&mut agg, 4);
+    spec {
+        assert y;
+        assert spec_get_value(agg) == 1;
+        assert spec_get_max_value(agg) == 10;
+    };
+    let x = try_add(&mut agg, 11);
+    spec {
+        assert !x;
+        assert spec_get_value(agg) == 1;
+        assert spec_get_max_value(agg) == 10;
+    };
+    let x = try_add(&mut agg, 9);
+    spec {
+        assert x;
+        assert spec_get_value(agg) == 10;
+        assert spec_get_max_value(agg) == 10;
+    };
+    agg
+}
+
+ + + +
+ + + +## Function `verify_aggregator_add_sub` + + + +
#[verify_only]
+fun verify_aggregator_add_sub(sub_value: u64, add_value: u64)
+
+ + + +
+Implementation + + +
fun verify_aggregator_add_sub(sub_value: u64, add_value: u64) {
+    let agg = create_aggregator(10);
+    add(&mut agg, add_value);
+    spec {
+        assert spec_get_value(agg) == add_value;
+    };
+    sub(&mut agg, sub_value);
+    spec {
+        assert spec_get_value(agg) == add_value - sub_value;
+    };
+}
+
+ + + +
+ + + +## Function `verify_correct_read` + + + +
#[verify_only]
+fun verify_correct_read()
+
+ + + +
+Implementation + + +
fun verify_correct_read() {
+    let snapshot = create_snapshot(42);
+    spec {
+        assert spec_read_snapshot(snapshot) == 42;
+    };
+    let derived = create_derived_string(std::string::utf8(b"42"));
+    spec {
+        assert spec_read_derived_string(derived).bytes == b"42";
+    };
+}
+
+ + + +
+ + + +## Function `verify_invalid_read` + + + +
#[verify_only]
+fun verify_invalid_read(aggregator: &aggregator_v2::Aggregator<u8>): u8
+
+ + + +
+Implementation + + +
fun verify_invalid_read(aggregator: &Aggregator<u8>): u8 {
+    read(aggregator)
+}
+
+ + + +
+ + + +## Function `verify_invalid_is_least` + + + +
#[verify_only]
+fun verify_invalid_is_least(aggregator: &aggregator_v2::Aggregator<u8>): bool
+
+ + + +
+Implementation + + +
fun verify_invalid_is_least(aggregator: &Aggregator<u8>): bool {
+    is_at_least(aggregator, 0)
+}
+
+ + + +
+ + + +## Function `verify_copy_not_yet_supported` + + + +
#[verify_only]
+fun verify_copy_not_yet_supported()
+
+ + + +
+Implementation + + +
fun verify_copy_not_yet_supported() {
+    let snapshot = create_snapshot(42);
+    copy_snapshot(&snapshot);
+}
+
+ + + +
+ + + +## Function `verify_string_concat1` + + + +
#[verify_only]
+fun verify_string_concat1()
+
+ + + +
+Implementation + + +
fun verify_string_concat1() {
+    let snapshot = create_snapshot(42);
+    let derived = derive_string_concat(std::string::utf8(b"before"), &snapshot, std::string::utf8(b"after"));
+    spec {
+        assert spec_read_derived_string(derived).bytes ==
+            concat(b"before", concat(spec_get_string_value(snapshot).bytes, b"after"));
+    };
+}
+
+ + + +
+ + + +## Function `verify_aggregator_generic` + + + +
#[verify_only]
+fun verify_aggregator_generic<IntElement1: copy, drop, IntElement2: copy, drop>(): (aggregator_v2::Aggregator<IntElement1>, aggregator_v2::Aggregator<IntElement2>)
+
+ + + +
+Implementation + + +
fun verify_aggregator_generic<IntElement1: copy + drop, IntElement2: copy+drop>(): (Aggregator<IntElement1>,  Aggregator<IntElement2>){
+    let x = create_unbounded_aggregator<IntElement1>();
+    let y = create_unbounded_aggregator<IntElement2>();
+    (x, y)
+}
+
+ + + +
+ + + +## Function `verify_aggregator_generic_add` + + + +
#[verify_only]
+fun verify_aggregator_generic_add<IntElement: copy, drop>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + +
+Implementation + + +
fun verify_aggregator_generic_add<IntElement: copy + drop>(aggregator: &mut Aggregator<IntElement>, value: IntElement) {
+    try_add(aggregator, value);
+    is_at_least_impl(aggregator, value);
+    // cannot specify aborts_if condition for generic `add`
+    // because comparison is not supported by IntElement
+    add(aggregator, value);
+}
+
+ + + +
+ + + +## Function `verify_aggregator_generic_sub` + + + +
#[verify_only]
+fun verify_aggregator_generic_sub<IntElement: copy, drop>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + +
+Implementation + + +
fun verify_aggregator_generic_sub<IntElement: copy + drop>(aggregator: &mut Aggregator<IntElement>, value: IntElement) {
+    try_sub(aggregator, value);
+    // cannot specify aborts_if condition for generic `sub`
+    // because comparison is not supported by IntElement
+    sub(aggregator, value);
+}
+
+ + + +
+ + + +## Function `verify_aggregator_invalid_type1` + + + +
#[verify_only]
+fun verify_aggregator_invalid_type1()
+
+ + + +
+Implementation + + +
fun verify_aggregator_invalid_type1() {
+    create_unbounded_aggregator<u8>();
+}
+
+ + + +
+ + + +## Function `verify_snapshot_invalid_type1` + + + +
#[verify_only]
+fun verify_snapshot_invalid_type1()
+
+ + + +
+Implementation + + +
fun verify_snapshot_invalid_type1() {
+    use std::option;
+    create_snapshot(option::some(42));
+}
+
+ + + +
+ + + +## Function `verify_snapshot_invalid_type2` + + + +
#[verify_only]
+fun verify_snapshot_invalid_type2()
+
+ + + +
+Implementation + + +
fun verify_snapshot_invalid_type2() {
+    create_snapshot(vector[42]);
+}
+
+ + + +
+ + + +## Function `verify_aggregator_valid_type` + + + +
#[verify_only]
+fun verify_aggregator_valid_type()
+
+ + + +
+Implementation + + +
fun verify_aggregator_valid_type() {
+    let _agg_1 = create_unbounded_aggregator<u64>();
+    spec {
+        assert spec_get_max_value(_agg_1) == MAX_U64;
+    };
+    let _agg_2 = create_unbounded_aggregator<u128>();
+    spec {
+        assert spec_get_max_value(_agg_2) == MAX_U128;
+    };
+    create_aggregator<u64>(5);
+    create_aggregator<u128>(5);
+}
+
+ + +
-## Specification +## Specification + + + + + + +
native fun spec_get_max_value<IntElement>(aggregator: Aggregator<IntElement>): IntElement;
+
+ + + + + + + +
fun spec_get_string_value<IntElement>(aggregator: AggregatorSnapshot<IntElement>): String;
+
+ + + + + + + +
fun spec_read_snapshot<IntElement>(snapshot: AggregatorSnapshot<IntElement>): IntElement {
+   snapshot.value
+}
+
+ + + + + + + +
fun spec_read_derived_string(snapshot: DerivedStringSnapshot): String {
+   snapshot.value
+}
+
+ + + + + +### Struct `Aggregator` + + +
struct Aggregator<IntElement> has drop, store
+
+ + + +
+
+value: IntElement +
+
+ +
+
+max_value: IntElement +
+
+ +
+
+ + + +
pragma intrinsic;
+
+ + + + + +### Function `max_value` + + +
public fun max_value<IntElement: copy, drop>(aggregator: &aggregator_v2::Aggregator<IntElement>): IntElement
+
+ + + + +
pragma intrinsic;
+
+ @@ -794,7 +1337,7 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI -
pragma opaque;
+
pragma intrinsic;
 
@@ -810,7 +1353,7 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI -
pragma opaque;
+
pragma intrinsic;
 
@@ -826,7 +1369,23 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI -
pragma opaque;
+
pragma intrinsic;
+
+ + + + + +### Function `add` + + +
public fun add<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + + +
pragma intrinsic;
 
@@ -842,7 +1401,23 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI -
pragma opaque;
+
pragma intrinsic;
+
+ + + + + +### Function `sub` + + +
public fun sub<IntElement>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + + +
pragma intrinsic;
 
@@ -858,7 +1433,7 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI -
pragma opaque;
+
pragma intrinsic;
 
@@ -874,7 +1449,7 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI -
pragma opaque;
+
pragma intrinsic;
 
@@ -891,6 +1466,8 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+include AbortsIfIntElement<IntElement>;
+ensures [abstract] result.value == spec_get_value(aggregator);
 
@@ -907,6 +1484,8 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+include AbortsIfIntElement<IntElement>;
+ensures [abstract] result.value == value;
 
@@ -923,6 +1502,8 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+include AbortsIfIntElement<IntElement>;
+ensures [abstract] result == snapshot.value;
 
@@ -939,6 +1520,8 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+aborts_if [abstract] false;
+ensures [abstract] result == snapshot.value;
 
@@ -955,6 +1538,8 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+aborts_if [abstract] len(value.bytes) > 1024;
+ensures [abstract] result.value == value;
 
@@ -971,6 +1556,20 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+include AbortsIfIntElement<IntElement>;
+ensures [abstract] result.value.bytes == concat(before.bytes, concat(spec_get_string_value(snapshot).bytes, after.bytes));
+aborts_if [abstract] len(before.bytes) + len(after.bytes) > 1024;
+
+ + + + + + + +
schema AbortsIfIntElement<IntElement> {
+    aborts_if [abstract] type_info::type_name<IntElement>().bytes != b"u64" && type_info::type_name<IntElement>().bytes != b"u128";
+}
 
@@ -988,6 +1587,7 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+aborts_if [abstract] true;
 
@@ -1005,6 +1605,225 @@ DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTI
pragma opaque;
+aborts_if [abstract] true;
+
+ + + + + + + +
native fun spec_get_value<IntElement>(aggregator: Aggregator<IntElement>): IntElement;
+
+ + + + + +### Function `verify_aggregator_try_add_sub` + + +
#[verify_only]
+fun verify_aggregator_try_add_sub(): aggregator_v2::Aggregator<u64>
+
+ + + + +
ensures spec_get_max_value(result) == 10;
+ensures spec_get_value(result) == 10;
+ensures read(result) == 10;
+
+ + + + + +### Function `verify_aggregator_add_sub` + + +
#[verify_only]
+fun verify_aggregator_add_sub(sub_value: u64, add_value: u64)
+
+ + + + +
pragma aborts_if_is_strict;
+aborts_if add_value > 10;
+aborts_if sub_value > add_value;
+
+ + + + + +### Function `verify_invalid_read` + + +
#[verify_only]
+fun verify_invalid_read(aggregator: &aggregator_v2::Aggregator<u8>): u8
+
+ + + + +
aborts_if true;
+
+ + + + + +### Function `verify_invalid_is_least` + + +
#[verify_only]
+fun verify_invalid_is_least(aggregator: &aggregator_v2::Aggregator<u8>): bool
+
+ + + + +
aborts_if true;
+
+ + + + + +### Function `verify_copy_not_yet_supported` + + +
#[verify_only]
+fun verify_copy_not_yet_supported()
+
+ + + + +
aborts_if true;
+
+ + + + + +### Function `verify_aggregator_generic` + + +
#[verify_only]
+fun verify_aggregator_generic<IntElement1: copy, drop, IntElement2: copy, drop>(): (aggregator_v2::Aggregator<IntElement1>, aggregator_v2::Aggregator<IntElement2>)
+
+ + + + +
aborts_if type_info::type_name<IntElement1>().bytes != b"u64" && type_info::type_name<IntElement1>().bytes != b"u128";
+aborts_if type_info::type_name<IntElement2>().bytes != b"u64" && type_info::type_name<IntElement2>().bytes != b"u128";
+
+ + + + + +### Function `verify_aggregator_generic_add` + + +
#[verify_only]
+fun verify_aggregator_generic_add<IntElement: copy, drop>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + + +
aborts_if type_info::type_name<IntElement>().bytes != b"u64" && type_info::type_name<IntElement>().bytes != b"u128";
+
+ + + + + +### Function `verify_aggregator_generic_sub` + + +
#[verify_only]
+fun verify_aggregator_generic_sub<IntElement: copy, drop>(aggregator: &mut aggregator_v2::Aggregator<IntElement>, value: IntElement)
+
+ + + + +
aborts_if type_info::type_name<IntElement>().bytes != b"u64" && type_info::type_name<IntElement>().bytes != b"u128";
+
+ + + + + +### Function `verify_aggregator_invalid_type1` + + +
#[verify_only]
+fun verify_aggregator_invalid_type1()
+
+ + + + +
aborts_if true;
+
+ + + + + +### Function `verify_snapshot_invalid_type1` + + +
#[verify_only]
+fun verify_snapshot_invalid_type1()
+
+ + + + +
aborts_if true;
+
+ + + + + +### Function `verify_snapshot_invalid_type2` + + +
#[verify_only]
+fun verify_snapshot_invalid_type2()
+
+ + + + +
aborts_if true;
+
+ + + + + +### Function `verify_aggregator_valid_type` + + +
#[verify_only]
+fun verify_aggregator_valid_type()
+
+ + + + +
aborts_if false;
 
diff --git a/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.move b/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.move index 7e26548bc0abd..c72777051d45d 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.move @@ -38,7 +38,7 @@ module aptos_framework::aggregator_v2 { /// The generic type supplied to the aggregator is not supported. const EUNSUPPORTED_AGGREGATOR_TYPE: u64 = 7; - /// Arguments passed to concat exceed max limit of 256 bytes (for prefix and suffix together). + /// Arguments passed to concat exceed max limit of 1024 bytes (for prefix and suffix together). const ECONCAT_STRING_LENGTH_TOO_LARGE: u64 = 8; /// The native aggregator function, that is in the move file, is not yet supported. @@ -194,7 +194,7 @@ module aptos_framework::aggregator_v2 { /// Concatenates `before`, `snapshot` and `after` into a single string. /// snapshot passed needs to have integer type - currently supported types are u64 and u128. /// Raises EUNSUPPORTED_AGGREGATOR_SNAPSHOT_TYPE if called with another type. - /// If length of prefix and suffix together exceed 256 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised. + /// If length of prefix and suffix together exceeds 1024 bytes, ECONCAT_STRING_LENGTH_TOO_LARGE is raised. /// /// Parallelism info: This operation enables parallelism. public native fun derive_string_concat(before: String, snapshot: &AggregatorSnapshot, after: String): DerivedStringSnapshot; @@ -209,6 +209,199 @@ module aptos_framework::aggregator_v2 { /// DEPRECATED, use derive_string_concat() instead. always raises EAGGREGATOR_FUNCTION_NOT_YET_SUPPORTED. public native fun string_concat(before: String, snapshot: &AggregatorSnapshot, after: String): AggregatorSnapshot; + #[verify_only] + fun verify_aggregator_try_add_sub(): Aggregator { + let agg = create_aggregator(10); + spec { + assert spec_get_max_value(agg) == 10; + assert spec_get_value(agg) == 0; + }; + let x = try_add(&mut agg, 5); + spec { + assert x; + assert is_at_least(agg, 5); + }; + let y = try_sub(&mut agg, 6); + spec { + assert !y; + assert spec_get_value(agg) == 5; + assert spec_get_max_value(agg) == 10; + }; + let y = try_sub(&mut agg, 4); + spec { + assert y; + assert spec_get_value(agg) == 1; + assert spec_get_max_value(agg) == 10; + }; + let x = try_add(&mut agg, 11); + spec { + assert !x; + assert spec_get_value(agg) == 1; + assert spec_get_max_value(agg) == 10; + }; + let x = try_add(&mut agg, 9); + spec { + assert x; + assert spec_get_value(agg) == 10; + assert spec_get_max_value(agg) == 10; + }; + agg + } + + spec verify_aggregator_try_add_sub{ + ensures spec_get_max_value(result) == 10; + ensures spec_get_value(result) == 10; + ensures read(result) == 10; + } + + #[verify_only] + fun verify_aggregator_add_sub(sub_value: u64, add_value: u64) { + let agg = create_aggregator(10); + add(&mut agg, add_value); + spec { + assert spec_get_value(agg) == add_value; + }; + sub(&mut agg, sub_value); + spec { + assert spec_get_value(agg) == add_value - sub_value; + }; + } + + spec verify_aggregator_add_sub(sub_value: u64, add_value: u64) { + pragma aborts_if_is_strict; + aborts_if add_value > 10; + aborts_if sub_value > add_value; + } + + #[verify_only] + fun verify_correct_read() { + let snapshot = create_snapshot(42); + spec { + assert spec_read_snapshot(snapshot) == 42; + }; + let derived = create_derived_string(std::string::utf8(b"42")); + spec { + assert spec_read_derived_string(derived).bytes == b"42"; + }; + } + + #[verify_only] + fun verify_invalid_read(aggregator: &Aggregator): u8 { + read(aggregator) + } + spec verify_invalid_read { + aborts_if true; + } + + #[verify_only] + fun verify_invalid_is_least(aggregator: &Aggregator): bool { + is_at_least(aggregator, 0) + } + spec verify_invalid_is_least { + aborts_if true; + } + + #[verify_only] + fun verify_copy_not_yet_supported() { + let snapshot = create_snapshot(42); + copy_snapshot(&snapshot); + } + + spec verify_copy_not_yet_supported { + aborts_if true; + } + + #[verify_only] + fun verify_string_concat1() { + let snapshot = create_snapshot(42); + let derived = derive_string_concat(std::string::utf8(b"before"), &snapshot, std::string::utf8(b"after")); + spec { + assert spec_read_derived_string(derived).bytes == + concat(b"before", concat(spec_get_string_value(snapshot).bytes, b"after")); + }; + } + + #[verify_only] + fun verify_aggregator_generic(): (Aggregator, Aggregator){ + let x = create_unbounded_aggregator(); + let y = create_unbounded_aggregator(); + (x, y) + } + spec verify_aggregator_generic (): (Aggregator, Aggregator) { + use aptos_std::type_info; + aborts_if type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; + aborts_if type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; + } + + #[verify_only] + fun verify_aggregator_generic_add(aggregator: &mut Aggregator, value: IntElement) { + try_add(aggregator, value); + is_at_least_impl(aggregator, value); + // cannot specify aborts_if condition for generic `add` + // because comparison is not supported by IntElement + add(aggregator, value); + } + spec verify_aggregator_generic_add(aggregator: &mut Aggregator, value: IntElement) { + use aptos_std::type_info; + aborts_if type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; + } + + #[verify_only] + fun verify_aggregator_generic_sub(aggregator: &mut Aggregator, value: IntElement) { + try_sub(aggregator, value); + // cannot specify aborts_if condition for generic `sub` + // because comparison is not supported by IntElement + sub(aggregator, value); + } + spec verify_aggregator_generic_sub(aggregator: &mut Aggregator, value: IntElement) { + use aptos_std::type_info; + aborts_if type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; + } + + #[verify_only] + fun verify_aggregator_invalid_type1() { + create_unbounded_aggregator(); + } + spec verify_aggregator_invalid_type1 { + aborts_if true; + } + + #[verify_only] + fun verify_snapshot_invalid_type1() { + use std::option; + create_snapshot(option::some(42)); + } + spec verify_snapshot_invalid_type1 { + aborts_if true; + } + + #[verify_only] + fun verify_snapshot_invalid_type2() { + create_snapshot(vector[42]); + } + + spec verify_snapshot_invalid_type2 { + aborts_if true; + } + + #[verify_only] + fun verify_aggregator_valid_type() { + let _agg_1 = create_unbounded_aggregator(); + spec { + assert spec_get_max_value(_agg_1) == MAX_U64; + }; + let _agg_2 = create_unbounded_aggregator(); + spec { + assert spec_get_max_value(_agg_2) == MAX_U128; + }; + create_aggregator(5); + create_aggregator(5); + } + + spec verify_aggregator_valid_type { + aborts_if false; + } + // ======================================== #[test] diff --git a/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.spec.move b/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.spec.move index 2395ebed63388..3fabb0fc2c8d3 100644 --- a/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.spec.move +++ b/aptos-move/framework/aptos-framework/sources/aggregator_v2/aggregator_v2.spec.move @@ -1,73 +1,109 @@ spec aptos_framework::aggregator_v2 { - spec create_aggregator { - // TODO: temporary mockup. - pragma opaque; + + spec Aggregator { + pragma intrinsic; } - spec create_unbounded_aggregator { - // TODO: temporary mockup. - pragma opaque; + spec max_value(aggregator: &Aggregator): IntElement { + pragma intrinsic; } - spec try_add { - // TODO: temporary mockup. - pragma opaque; + spec create_aggregator(max_value: IntElement): Aggregator { + pragma intrinsic; } - spec try_sub { - // TODO: temporary mockup. - pragma opaque; + spec create_unbounded_aggregator(): Aggregator { + pragma intrinsic; } - spec is_at_least_impl { - // TODO: temporary mockup. - pragma opaque; + spec try_add(aggregator: &mut Aggregator, value: IntElement): bool { + pragma intrinsic; } - spec read { - // TODO: temporary mockup. - pragma opaque; + spec add(aggregator: &mut Aggregator, value: IntElement) { + pragma intrinsic; + } + + spec try_sub(aggregator: &mut Aggregator, value: IntElement): bool { + pragma intrinsic; + } + + spec sub(aggregator: &mut Aggregator, value: IntElement) { + pragma intrinsic; + } + + spec is_at_least_impl(aggregator: &Aggregator, min_amount: IntElement): bool { + pragma intrinsic; + } + + spec read(aggregator: &Aggregator): IntElement { + pragma intrinsic; } - spec snapshot { - // TODO: temporary mockup. + spec snapshot(aggregator: &Aggregator): AggregatorSnapshot { pragma opaque; + include AbortsIfIntElement; + ensures [abstract] result.value == spec_get_value(aggregator); } - spec create_snapshot { - // TODO: temporary mockup. + spec create_snapshot(value: IntElement): AggregatorSnapshot { pragma opaque; + include AbortsIfIntElement; + ensures [abstract] result.value == value; } - spec read_snapshot { - // TODO: temporary mockup. + spec read_snapshot(snapshot: &AggregatorSnapshot): IntElement { pragma opaque; + include AbortsIfIntElement; + ensures [abstract] result == snapshot.value; } - spec read_derived_string { - // TODO: temporary mockup. + spec read_derived_string(snapshot: &DerivedStringSnapshot): String { pragma opaque; + aborts_if [abstract] false; + ensures [abstract] result == snapshot.value; } - spec create_derived_string { - // TODO: temporary mockup. + spec create_derived_string(value: String): DerivedStringSnapshot { pragma opaque; + aborts_if [abstract] len(value.bytes) > 1024; + ensures [abstract] result.value == value; } - spec derive_string_concat { - // TODO: temporary mockup. + spec derive_string_concat(before: String, snapshot: &AggregatorSnapshot, after: String): DerivedStringSnapshot { pragma opaque; + include AbortsIfIntElement; + ensures [abstract] result.value.bytes == concat(before.bytes, concat(spec_get_string_value(snapshot).bytes, after.bytes)); + aborts_if [abstract] len(before.bytes) + len(after.bytes) > 1024; + } + + spec schema AbortsIfIntElement { + use aptos_std::type_info; + aborts_if [abstract] type_info::type_name().bytes != b"u64" && type_info::type_name().bytes != b"u128"; } // deprecated spec copy_snapshot { - // TODO: temporary mockup. pragma opaque; + aborts_if [abstract] true; } // deprecated spec string_concat { - // TODO: temporary mockup. pragma opaque; + aborts_if [abstract] true; + } + + // Get aggregator.value + spec native fun spec_get_value(aggregator: Aggregator): IntElement; + // Get aggregator.max_value + spec native fun spec_get_max_value(aggregator: Aggregator): IntElement; + // Uninterpreted spec function that translates the value inside aggregator into corresponding string representation + spec fun spec_get_string_value(aggregator: AggregatorSnapshot): String; + spec fun spec_read_snapshot(snapshot: AggregatorSnapshot): IntElement { + snapshot.value + } + spec fun spec_read_derived_string(snapshot: DerivedStringSnapshot): String { + snapshot.value } } diff --git a/aptos-move/framework/aptos-token-objects/doc/collection.md b/aptos-move/framework/aptos-token-objects/doc/collection.md index 3193210b619f8..0416df35f7d72 100644 --- a/aptos-move/framework/aptos-token-objects/doc/collection.md +++ b/aptos-move/framework/aptos-token-objects/doc/collection.md @@ -63,6 +63,8 @@ require adding the field original_name. - [Function `set_description`](#0x4_collection_set_description) - [Function `set_uri`](#0x4_collection_set_uri) - [Function `set_max_supply`](#0x4_collection_set_max_supply) +- [Specification](#@Specification_1) + - [Function `increment_supply`](#@Specification_1_increment_supply)
use 0x1::aggregator_v2;
@@ -1824,5 +1826,41 @@ After changing the collection's name, to create tokens - only call functions tha
 
 
 
+
+
+## Specification
+
+
+
+
+### Function `increment_supply`
+
+
+
public(friend) fun increment_supply(collection: &object::Object<collection::Collection>, token: address): option::Option<aggregator_v2::AggregatorSnapshot<u64>>
+
+ + + + +
pragma aborts_if_is_partial;
+let collection_addr = object::object_address(collection);
+let supply = global<ConcurrentSupply>(collection_addr);
+let post supply_post = global<ConcurrentSupply>(collection_addr);
+aborts_if exists<ConcurrentSupply>(collection_addr) &&
+    aggregator_v2::spec_get_value(supply.current_supply) + 1
+        > aggregator_v2::spec_get_max_value(supply.current_supply);
+aborts_if exists<ConcurrentSupply>(collection_addr) &&
+    aggregator_v2::spec_get_value(supply.total_minted) + 1
+        > aggregator_v2::spec_get_max_value(supply.total_minted);
+ensures
+    aggregator_v2::spec_get_max_value(supply.current_supply)
+        == aggregator_v2::spec_get_max_value(supply_post.current_supply);
+ensures exists<ConcurrentSupply>(collection_addr) &&
+    aggregator_v2::spec_get_value(supply.current_supply) + 1
+        <= aggregator_v2::spec_get_max_value(supply.current_supply) ==>
+    aggregator_v2::spec_get_value(supply.current_supply) + 1
+        == aggregator_v2::spec_get_value(supply_post.current_supply);
+
+ [move-book]: https://aptos.dev/move/book/SUMMARY diff --git a/aptos-move/framework/aptos-token-objects/sources/collection.move b/aptos-move/framework/aptos-token-objects/sources/collection.move index ae545b241d606..ff81e64d1ea57 100644 --- a/aptos-move/framework/aptos-token-objects/sources/collection.move +++ b/aptos-move/framework/aptos-token-objects/sources/collection.move @@ -440,6 +440,27 @@ module aptos_token_objects::collection { } } + spec increment_supply { + pragma aborts_if_is_partial; + let collection_addr = object::object_address(collection); + let supply = global(collection_addr); + let post supply_post = global(collection_addr); + aborts_if exists(collection_addr) && + aggregator_v2::spec_get_value(supply.current_supply) + 1 + > aggregator_v2::spec_get_max_value(supply.current_supply); + aborts_if exists(collection_addr) && + aggregator_v2::spec_get_value(supply.total_minted) + 1 + > aggregator_v2::spec_get_max_value(supply.total_minted); + ensures + aggregator_v2::spec_get_max_value(supply.current_supply) + == aggregator_v2::spec_get_max_value(supply_post.current_supply); + ensures exists(collection_addr) && + aggregator_v2::spec_get_value(supply.current_supply) + 1 + <= aggregator_v2::spec_get_max_value(supply.current_supply) ==> + aggregator_v2::spec_get_value(supply.current_supply) + 1 + == aggregator_v2::spec_get_value(supply_post.current_supply); + } + /// Called by token on burn to decrement supply if there's an appropriate Supply struct. public(friend) fun decrement_supply( collection: &Object, diff --git a/aptos-move/framework/src/aptos-natives.bpl b/aptos-move/framework/src/aptos-natives.bpl index f31f51181e25e..717b510391fdb 100644 --- a/aptos-move/framework/src/aptos-natives.bpl +++ b/aptos-move/framework/src/aptos-natives.bpl @@ -16,6 +16,226 @@ procedure {:inline 1} $1_object_exists_at{{S}}(object: int) returns (res: bool) {%- endfor %} + + + +{%- for instance in aggregator_v2_instances %} +{%- set S = instance.suffix -%} +{%- set T = instance.name -%} + +// ================================================================================== +// Intrinsic implementation of aggregator_v2 for element type `{{instance.suffix}}` + + +datatype $1_aggregator_v2_Aggregator'{{S}}' { + $1_aggregator_v2_Aggregator'{{S}}'($value: {{T}}, $max_value: {{T}}) +} +function {:inline} $Update'$1_aggregator_v2_Aggregator'{{S}}''_value(s: $1_aggregator_v2_Aggregator'{{S}}', x: {{T}}): $1_aggregator_v2_Aggregator'{{S}}' { + $1_aggregator_v2_Aggregator'{{S}}'(x, s->$max_value) +} +function {:inline} $Update'$1_aggregator_v2_Aggregator'{{S}}''_max_value(s: $1_aggregator_v2_Aggregator'{{S}}', x: {{T}}): $1_aggregator_v2_Aggregator'{{S}}' { + $1_aggregator_v2_Aggregator'{{S}}'(s->$value, x) +} +function $IsValid'$1_aggregator_v2_Aggregator'{{S}}''(s: $1_aggregator_v2_Aggregator'{{S}}'): bool { + $IsValid'{{S}}'(s->$value) + && $IsValid'{{S}}'(s->$max_value) +} +function {:inline} $IsEqual'$1_aggregator_v2_Aggregator'{{S}}''(s1: $1_aggregator_v2_Aggregator'{{S}}', s2: $1_aggregator_v2_Aggregator'{{S}}'): bool { + $IsEqual'{{S}}'(s1->$value, s2->$value) + && $IsEqual'{{S}}'(s1->$max_value, s2->$max_value) +} + +procedure {:inline 1} $1_aggregator_v2_create_unbounded_aggregator'{{S}}'() returns (res: $1_aggregator_v2_Aggregator'{{S}}') +{ + {% if S == "u64" -%} + res := $1_aggregator_v2_Aggregator'{{S}}'(0, $MAX_U64); + {% elif S == "u128" -%} + res := $1_aggregator_v2_Aggregator'{{S}}'(0, $MAX_U128); + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} +} + + + procedure {:inline 1} $1_aggregator_v2_create_aggregator'{{S}}'($max_value: {{T}}) returns (res: $1_aggregator_v2_Aggregator'{{S}}') + { + {% if S == "u64" or S == "u128" -%} + res := $1_aggregator_v2_Aggregator'{{S}}'(0, $max_value); + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + + procedure {:inline 1} $1_aggregator_v2_try_add'{{S}}'(aggregator: $Mutation ($1_aggregator_v2_Aggregator'{{S}}'), value: {{T}}) returns (res: bool, aggregator_updated: $Mutation ($1_aggregator_v2_Aggregator'{{S}}')) + { + {% if S == "u64" or S == "u128" -%} + if ($Dereference(aggregator)->$max_value < value + $Dereference(aggregator)->$value) { + res := false; + aggregator_updated:= aggregator; + } else { + res := true; + aggregator_updated:= $UpdateMutation(aggregator, $1_aggregator_v2_Aggregator'{{S}}'(value + $Dereference(aggregator)->$value, $Dereference(aggregator)->$max_value)); + } + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_try_sub'{{S}}'(aggregator: $Mutation ($1_aggregator_v2_Aggregator'{{S}}'), value: {{T}}) returns (res: bool, aggregator_updated: $Mutation ($1_aggregator_v2_Aggregator'{{S}}')) + { + {% if S == "u64" or S == "u128" -%} + if ($Dereference(aggregator)->$value < value) { + res := false; + aggregator_updated:= aggregator; + return; + } else { + res := true; + aggregator_updated:= $UpdateMutation(aggregator, $1_aggregator_v2_Aggregator'{{S}}'($Dereference(aggregator)->$value - value, $Dereference(aggregator)->$max_value)); + return; + } + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_add'{{S}}'(aggregator: $Mutation ($1_aggregator_v2_Aggregator'{{S}}'), value: {{T}}) returns (aggregator_updated: $Mutation ($1_aggregator_v2_Aggregator'{{S}}')) + { + {% if S == "u64" or S == "u128" -%} + var try_result: bool; + var try_aggregator: $Mutation $1_aggregator_v2_Aggregator'{{S}}'; + call try_result, try_aggregator := $1_aggregator_v2_try_add'{{S}}'(aggregator, value); + if (!try_result) { + call $ExecFailureAbort(); + return; + } + aggregator_updated := try_aggregator; + return; + {% elif "#" in S -%} + var try_result: bool; + var try_aggregator: $Mutation $1_aggregator_v2_Aggregator'{{S}}'; + call try_result, try_aggregator := $1_aggregator_v2_try_add'{{S}}'(aggregator, value); + return; + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_sub'{{S}}'(aggregator: $Mutation ($1_aggregator_v2_Aggregator'{{S}}'), value: {{T}}) returns (aggregator_updated: $Mutation ($1_aggregator_v2_Aggregator'{{S}}')) + { + {% if S == "u64" or S == "u128" -%} + var try_result: bool; + var try_aggregator: $Mutation $1_aggregator_v2_Aggregator'{{S}}'; + call try_result, try_aggregator := $1_aggregator_v2_try_sub'{{S}}'(aggregator, value); + if (!try_result) { + call $ExecFailureAbort(); + return; + } + aggregator_updated := try_aggregator; + return; + {% elif "#" in S -%} + var try_result: bool; + var try_aggregator: $Mutation $1_aggregator_v2_Aggregator'{{S}}'; + call try_result, try_aggregator := $1_aggregator_v2_try_add'{{S}}'(aggregator, value); + return; + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_read'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}') returns (res: {{T}}) { + {% if S == "u64" or S == "u128" -%} + res := aggregator->$value; + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_max_value'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}') returns (res: {{T}}) { + {% if S == "u64" or S == "u128" -%} + res := aggregator->$max_value; + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + + procedure {:inline 1} $1_aggregator_v2_is_at_least_impl'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}', min_amount: {{T}}) returns (res: bool) + { + {% if S == "u64" or S == "u128" -%} + res := aggregator->$value >= min_amount; + return; + {% elif "#" in S -%} + if (!$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec3(117, 54, 52)) && !$IsEqual'vec'u8''($TypeName({{S}}_info), MakeVec4(117, 49, 50, 56))) { + call $ExecFailureAbort(); + return; + } + {% else -%} + call $ExecFailureAbort(); + return; + {% endif -%} + } + +function {:inline} $1_aggregator_v2_spec_get_value'{{S}}'(s: $1_aggregator_v2_Aggregator'{{S}}'): {{T}} { + s->$value +} + +function {:inline} $1_aggregator_v2_spec_get_max_value'{{S}}'(s: $1_aggregator_v2_Aggregator'{{S}}'): {{T}} { + s->$max_value +} + +function {:inline} $1_aggregator_v2_$read'{{S}}'(s: $1_aggregator_v2_Aggregator'{{S}}'): {{T}} { + s->$value +} + +{% if S == "u64" or S == "u128" -%} + function {:inline} $1_aggregator_v2_$is_at_least_impl'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}', min_amount: int): bool + { + aggregator->$value >= min_amount + } +{% else -%} + function $1_aggregator_v2_$is_at_least_impl'{{S}}'(aggregator: $1_aggregator_v2_Aggregator'{{S}}', min_amount: {{T}}): bool; +{% endif -%} + +{%- endfor %} + // ================================================================================== // Intrinsic implementation of aggregator and aggregator factory diff --git a/aptos-move/framework/src/prover.rs b/aptos-move/framework/src/prover.rs index c88b04596f094..fbe8a7fed97a1 100644 --- a/aptos-move/framework/src/prover.rs +++ b/aptos-move/framework/src/prover.rs @@ -161,11 +161,7 @@ impl ProverOptions { options.backend.custom_natives = Some(move_prover_boogie_backend::options::CustomNativeOptions { template_bytes: include_bytes!("aptos-natives.bpl").to_vec(), - module_instance_names: vec![( - "0x1::object".to_string(), - "object_instances".to_string(), - true, - )], + module_instance_names: move_prover_boogie_backend::options::custom_native_options(), }); let mut writer = StandardStream::stderr(ColorChoice::Auto); if compiler_version.unwrap_or_default() == CompilerVersion::V1 { diff --git a/third_party/move/move-prover/boogie-backend/src/options.rs b/third_party/move/move-prover/boogie-backend/src/options.rs index 5c16b9faa66fd..989095b5659f3 100644 --- a/third_party/move/move-prover/boogie-backend/src/options.rs +++ b/third_party/move/move-prover/boogie-backend/src/options.rs @@ -58,6 +58,21 @@ pub struct CustomNativeOptions { pub module_instance_names: Vec<(String, String, bool)>, } +pub fn custom_native_options() -> Vec<(String, String, bool)> { + vec![ + ( + "0x1::object".to_string(), + "object_instances".to_string(), + true, + ), + ( + "0x1::aggregator_v2".to_string(), + "aggregator_v2_instances".to_string(), + true, + ), + ] +} + /// Contains information about a native method implementing mutable borrow semantics for a given /// type in an alternative storage model (returning &mut without taking appropriate &mut as a /// parameter, much like vector::borrow_mut) diff --git a/third_party/move/move-prover/lab/src/benchmark.rs b/third_party/move/move-prover/lab/src/benchmark.rs index 6e4ba638f8bca..b05b6515083a4 100644 --- a/third_party/move/move-prover/lab/src/benchmark.rs +++ b/third_party/move/move-prover/lab/src/benchmark.rs @@ -178,11 +178,7 @@ fn run_benchmark( "../../../../../aptos-move/framework/src/aptos-natives.bpl" ) .to_vec(), - module_instance_names: vec![( - "0x1::object".to_string(), - "object_instances".to_string(), - true, - )], + module_instance_names: move_prover_boogie_backend::options::custom_native_options(), }); } // Do not allow any benchmark to run longer than 60s. If this is exceeded it usually diff --git a/third_party/move/move-prover/src/cli.rs b/third_party/move/move-prover/src/cli.rs index 6cb2af7d2ba76..1a34a4de84003 100644 --- a/third_party/move/move-prover/src/cli.rs +++ b/third_party/move/move-prover/src/cli.rs @@ -20,7 +20,10 @@ use move_model::{ model::VerificationScope, options::ModelBuilderOptions, }; -use move_prover_boogie_backend::options::{BoogieOptions, CustomNativeOptions, VectorTheory}; +use move_prover_boogie_backend::{ + options, + options::{BoogieOptions, CustomNativeOptions, VectorTheory}, +}; use move_prover_bytecode_pipeline::options::{AutoTraceLevel, ProverOptions}; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; @@ -808,11 +811,7 @@ impl Options { "../../../../aptos-move/framework/src/aptos-natives.bpl" ) .to_vec(), - module_instance_names: vec![( - "0x1::object".to_string(), - "object_instances".to_string(), - true, - )], + module_instance_names: options::custom_native_options(), }); options .move_named_address_values From 2a0e7d6140a9d21dd90e851b0674b2f3e77c031c Mon Sep 17 00:00:00 2001 From: Guoteng Rao <3603304+grao1991@users.noreply.github.com> Date: Mon, 14 Oct 2024 12:26:44 -0700 Subject: [PATCH 19/22] Serve some basic config info at /info. (#14842) --- api/doc/spec.json | 22 ++++++++++++++++++ api/doc/spec.yaml | 14 ++++++++++++ api/src/basic.rs | 57 ++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 92 insertions(+), 1 deletion(-) diff --git a/api/doc/spec.json b/api/doc/spec.json index 2693b4c7289a0..c633b142f25c8 100644 --- a/api/doc/spec.json +++ b/api/doc/spec.json @@ -1900,6 +1900,28 @@ "operationId": "spec" } }, + "/info": { + "get": { + "tags": [ + "General" + ], + "summary": "Show some basic info of the node.", + "responses": { + "200": { + "description": "", + "content": { + "application/json": { + "schema": { + "type": "object", + "additionalProperties": {} + } + } + } + } + }, + "operationId": "info" + } + }, "/-/healthy": { "get": { "tags": [ diff --git a/api/doc/spec.yaml b/api/doc/spec.yaml index 9ff83e9c1dbdc..2212289df30b8 100644 --- a/api/doc/spec.yaml +++ b/api/doc/spec.yaml @@ -1416,6 +1416,20 @@ paths: schema: type: string operationId: spec + /info: + get: + tags: + - General + summary: Show some basic info of the node. + responses: + '200': + description: '' + content: + application/json: + schema: + type: object + additionalProperties: {} + operationId: info /-/healthy: get: tags: diff --git a/api/src/basic.rs b/api/src/basic.rs index d6fed2dd70e3f..ae6fd8fb5c3b3 100644 --- a/api/src/basic.rs +++ b/api/src/basic.rs @@ -10,9 +10,14 @@ use crate::{ }; use anyhow::Context as AnyhowContext; use aptos_api_types::AptosErrorCode; -use poem_openapi::{param::Query, payload::Html, Object, OpenApi}; +use poem_openapi::{ + param::Query, + payload::{Html, Json}, + Object, OpenApi, +}; use serde::{Deserialize, Serialize}; use std::{ + collections::HashMap, ops::Sub, sync::Arc, time::{Duration, SystemTime, UNIX_EPOCH}, @@ -60,6 +65,56 @@ impl BasicApi { Html(OPEN_API_HTML.to_string()) } + /// Show some basic info of the node. + #[oai( + path = "/info", + method = "get", + operation_id = "info", + tag = "ApiTags::General" + )] + async fn info(&self) -> Json> { + let mut info = HashMap::new(); + info.insert( + "bootstrapping_mode".to_string(), + serde_json::to_value( + self.context + .node_config + .state_sync + .state_sync_driver + .bootstrapping_mode, + ) + .unwrap(), + ); + info.insert( + "continuous_syncing_mode".to_string(), + serde_json::to_value( + self.context + .node_config + .state_sync + .state_sync_driver + .continuous_syncing_mode, + ) + .unwrap(), + ); + info.insert( + "new_storage_format".to_string(), + serde_json::to_value( + self.context + .node_config + .storage + .rocksdb_configs + .enable_storage_sharding, + ) + .unwrap(), + ); + info.insert( + "internal_indexer_config".to_string(), + serde_json::to_value(&self.context.node_config.indexer_db_config).unwrap(), + ); + + Json(info) + } + /// Check basic node health /// /// By default this endpoint just checks that it can get the latest ledger From 3a77c220aaeb0ce557ebf9e0ece43f6f7e2470ea Mon Sep 17 00:00:00 2001 From: Hwangjae Lee Date: Wed, 16 Oct 2024 04:24:03 +0900 Subject: [PATCH 20/22] Fix Incorrect Domain Reference in RUST_SECURE_CODING.md (#14956) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updated the incorrect domain reference from create.io to crates.io in the “Rustup” section. This ensures the correct Rust package management site is referenced for security considerations. Signed-off-by: Hwangjae Lee --- RUST_SECURE_CODING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/RUST_SECURE_CODING.md b/RUST_SECURE_CODING.md index 698114c9e0ebd..99c7768ee6fdc 100644 --- a/RUST_SECURE_CODING.md +++ b/RUST_SECURE_CODING.md @@ -6,7 +6,7 @@ These Rust Secure Coding Guidelines are essential for anyone contributing to Apt ### Rustup -Utilize Rustup for managing Rust toolchains. However, keep in mind that, from a security perspective, Rustup performs all downloads over HTTPS, but it does not yet validate signatures of downloads. Security is shifted to [create.io](http://create.io) and GitHub repository hosting the code [[rustup]](https://www.rust-lang.org/tools/install). +Utilize Rustup for managing Rust toolchains. However, keep in mind that, from a security perspective, Rustup performs all downloads over HTTPS, but it does not yet validate signatures of downloads. Security is shifted to [crates.io](http://crates.io) and GitHub repository hosting the code [[rustup]](https://www.rust-lang.org/tools/install). ### Stable Toolchain From b97a30a1db1dc57462a12b14d8fe12de57f8a5e6 Mon Sep 17 00:00:00 2001 From: larry-aptos <112209412+larry-aptos@users.noreply.github.com> Date: Tue, 15 Oct 2024 13:27:44 -0700 Subject: [PATCH 21/22] Create a file checker. (#14925) --- Cargo.lock | 19 ++ Cargo.toml | 2 + docker/builder/build-indexer.sh | 4 +- docker/builder/indexer-grpc.Dockerfile | 2 +- .../indexer-grpc-file-checker/Cargo.toml | 30 +++ .../indexer-grpc-file-checker/README.md | 14 ++ .../indexer-grpc-file-checker/src/lib.rs | 44 ++++ .../indexer-grpc-file-checker/src/main.rs | 20 ++ .../src/processor.rs | 200 ++++++++++++++++++ 9 files changed, 332 insertions(+), 3 deletions(-) create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-checker/Cargo.toml create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-checker/README.md create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-checker/src/lib.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-checker/src/main.rs create mode 100644 ecosystem/indexer-grpc/indexer-grpc-file-checker/src/processor.rs diff --git a/Cargo.lock b/Cargo.lock index 4ab17dbdbd8d6..db0353c542f49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2095,6 +2095,25 @@ dependencies = [ "uuid", ] +[[package]] +name = "aptos-indexer-grpc-file-checker" +version = "1.0.0" +dependencies = [ + "anyhow", + "aptos-indexer-grpc-server-framework", + "aptos-indexer-grpc-utils", + "aptos-metrics-core", + "async-trait", + "clap 4.4.14", + "cloud-storage", + "jemallocator", + "once_cell", + "serde", + "serde_json", + "tokio", + "tracing", +] + [[package]] name = "aptos-indexer-grpc-file-store" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index d1f271a95416b..60121f8b12684 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -116,6 +116,7 @@ members = [ "dkg", "ecosystem/indexer-grpc/indexer-grpc-cache-worker", "ecosystem/indexer-grpc/indexer-grpc-data-service", + "ecosystem/indexer-grpc/indexer-grpc-file-checker", "ecosystem/indexer-grpc/indexer-grpc-file-store", "ecosystem/indexer-grpc/indexer-grpc-file-store-backfiller", "ecosystem/indexer-grpc/indexer-grpc-fullnode", @@ -360,6 +361,7 @@ aptos-indexer = { path = "crates/indexer" } aptos-indexer-grpc-cache-worker = { path = "ecosystem/indexer-grpc/indexer-grpc-cache-worker" } aptos-indexer-grpc-data-service = { path = "ecosystem/indexer-grpc/indexer-grpc-data-service" } aptos-indexer-grpc-file-store = { path = "ecosystem/indexer-grpc/indexer-grpc-file-store" } +aptos-indexer-grpc-file-checker = { path = "ecosystem/indexer-grpc/indexer-grpc-file-checker" } aptos-indexer-grpc-file-store-backfiller = { path = "ecosystem/indexer-grpc/indexer-grpc-file-store-backfiller" } aptos-indexer-grpc-fullnode = { path = "ecosystem/indexer-grpc/indexer-grpc-fullnode" } aptos-indexer-grpc-in-memory-cache-benchmark = { path = "ecosystem/indexer-grpc/indexer-grpc-in-memory-cache-benchmark" } diff --git a/docker/builder/build-indexer.sh b/docker/builder/build-indexer.sh index 95bebee0a5de7..aa73cf3a8eb3f 100755 --- a/docker/builder/build-indexer.sh +++ b/docker/builder/build-indexer.sh @@ -16,7 +16,7 @@ cargo build --locked --profile=$PROFILE \ -p aptos-indexer-grpc-file-store \ -p aptos-indexer-grpc-data-service \ -p aptos-nft-metadata-crawler \ - -p aptos-indexer-grpc-file-store-backfiller \ + -p aptos-indexer-grpc-file-checker \ "$@" # After building, copy the binaries we need to `dist` since the `target` directory is used as docker cache mount and only available during the RUN step @@ -25,7 +25,7 @@ BINS=( aptos-indexer-grpc-file-store aptos-indexer-grpc-data-service aptos-nft-metadata-crawler - aptos-indexer-grpc-file-store-backfiller + aptos-indexer-grpc-file-checker ) mkdir dist diff --git a/docker/builder/indexer-grpc.Dockerfile b/docker/builder/indexer-grpc.Dockerfile index 255293fd3e27c..2f79181084662 100644 --- a/docker/builder/indexer-grpc.Dockerfile +++ b/docker/builder/indexer-grpc.Dockerfile @@ -17,7 +17,7 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ COPY --link --from=indexer-builder /aptos/dist/aptos-indexer-grpc-cache-worker /usr/local/bin/aptos-indexer-grpc-cache-worker COPY --link --from=indexer-builder /aptos/dist/aptos-indexer-grpc-file-store /usr/local/bin/aptos-indexer-grpc-file-store COPY --link --from=indexer-builder /aptos/dist/aptos-indexer-grpc-data-service /usr/local/bin/aptos-indexer-grpc-data-service -COPY --link --from=indexer-builder /aptos/dist/aptos-indexer-grpc-file-store-backfiller /usr/local/bin/aptos-indexer-grpc-file-store-backfiller +COPY --link --from=indexer-builder /aptos/dist/aptos-indexer-grpc-file-checker /usr/local/bin/aptos-indexer-grpc-file-checker # The health check port EXPOSE 8080 diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-checker/Cargo.toml b/ecosystem/indexer-grpc/indexer-grpc-file-checker/Cargo.toml new file mode 100644 index 0000000000000..078984e2739ff --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-checker/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "aptos-indexer-grpc-file-checker" +description = "Indexer gRPC file checker." +version = "1.0.0" + +# Workspace inherited keys +authors = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +publish = { workspace = true } +repository = { workspace = true } +rust-version = { workspace = true } + +[dependencies] +anyhow = { workspace = true } +aptos-indexer-grpc-server-framework = { workspace = true } +aptos-indexer-grpc-utils = { workspace = true } +aptos-metrics-core = { workspace = true } +async-trait = { workspace = true } +clap = { workspace = true } +cloud-storage = { workspace = true } +once_cell = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } + +[target.'cfg(unix)'.dependencies] +jemallocator = { workspace = true } diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-checker/README.md b/ecosystem/indexer-grpc/indexer-grpc-file-checker/README.md new file mode 100644 index 0000000000000..7edda1fc7a8d5 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-checker/README.md @@ -0,0 +1,14 @@ +# Indexer GRPC file checker +A program that compares files in two buckets and to make sure the content are the same. + +## How to run it. + +Example of config: + +``` +health_check_port: 8081 + server_config: + existing_bucket_name: bucket_being_used + new_bucket_name: bucket_with_new_sharding + starting_version: 123123 +``` diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/lib.rs b/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/lib.rs new file mode 100644 index 0000000000000..d39ee422d748b --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/lib.rs @@ -0,0 +1,44 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +pub mod processor; + +use anyhow::Result; +use aptos_indexer_grpc_server_framework::RunnableConfig; +use processor::Processor; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, Serialize)] +#[serde(deny_unknown_fields)] +pub struct IndexerGrpcFileCheckerConfig { + pub existing_bucket_name: String, + pub new_bucket_name: String, + pub starting_version: u64, +} + +impl From for Processor { + fn from(val: IndexerGrpcFileCheckerConfig) -> Self { + Processor { + existing_bucket_name: val.existing_bucket_name, + new_bucket_name: val.new_bucket_name, + starting_version: val.starting_version, + } + } +} + +#[async_trait::async_trait] +impl RunnableConfig for IndexerGrpcFileCheckerConfig { + async fn run(&self) -> Result<()> { + let processor: Processor = self.clone().into(); + + processor + .run() + .await + .expect("File checker exited unexpectedly"); + Ok(()) + } + + fn get_server_name(&self) -> String { + "idxfilechk".to_string() + } +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/main.rs b/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/main.rs new file mode 100644 index 0000000000000..24507092357ef --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/main.rs @@ -0,0 +1,20 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::Result; +use aptos_indexer_grpc_file_checker::IndexerGrpcFileCheckerConfig; +use aptos_indexer_grpc_server_framework::ServerArgs; +use clap::Parser; + +#[cfg(unix)] +#[global_allocator] +static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc; + +#[tokio::main] +async fn main() -> Result<()> { + let args = ServerArgs::parse(); + args.run::() + .await + .expect("Failed to run server"); + Ok(()) +} diff --git a/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/processor.rs b/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/processor.rs new file mode 100644 index 0000000000000..63fc0130ad865 --- /dev/null +++ b/ecosystem/indexer-grpc/indexer-grpc-file-checker/src/processor.rs @@ -0,0 +1,200 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +use anyhow::{ensure, Context, Result}; +use aptos_indexer_grpc_utils::compression_util::{FileEntry, StorageFormat}; +use aptos_metrics_core::{register_int_counter, IntCounter}; +use cloud_storage::Client; +use once_cell::sync::Lazy; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +pub static FILE_DIFF_COUNTER: Lazy = Lazy::new(|| { + register_int_counter!( + "indexer_grpc_file_checker_file_diff", + "Count of the files that are different.", + ) + .unwrap() +}); + +const PROGRESS_FILE_NAME: &str = "file_checker_progress.json"; +const METADATA_FILE_NAME: &str = "metadata.json"; + +// Update the progress file every 3 minutes. +const PROGRESS_FILE_UPDATE_INTERVAL_IN_SECS: u64 = 180; + +/// Checker compares the data in the existing bucket with the data in the new bucket. +/// The progress is saved in a file under the new bucket. +pub struct Processor { + /// Existing bucket name. + pub existing_bucket_name: String, + /// New bucket name; this job is to make sure the data in the new bucket is correct. + pub new_bucket_name: String, + /// The version to start from. This is for **bootstrapping** the file checker only. + pub starting_version: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ProgressFile { + file_checker_version: u64, + file_checker_chain_id: u64, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct MetadataFile { + chain_id: u64, +} + +impl Processor { + pub async fn run(&self) -> Result<()> { + let (client, mut progress_file) = self.init().await?; + let mut last_update_time = std::time::Instant::now(); + + loop { + let current_version = progress_file.file_checker_version; + + let file_name = + FileEntry::build_key(current_version, StorageFormat::Lz4CompressedProto); + let existing_file = + download_raw_file(&client, &self.existing_bucket_name, &file_name).await?; + let new_file = download_raw_file(&client, &self.new_bucket_name, &file_name).await?; + if existing_file.is_none() || new_file.is_none() { + let bucket_name = if existing_file.is_none() { + &self.existing_bucket_name + } else { + &self.new_bucket_name + }; + tracing::info!( + bucket_name = bucket_name, + file_name = file_name.as_str(), + "Transaction file is not found in one of the buckets." + ); + // Wait for the next file to be uploaded. + tokio::time::sleep(tokio::time::Duration::from_secs(30)).await; + continue; + } + // Compare the files. + let existing_file = existing_file.unwrap(); + let new_file = new_file.unwrap(); + if existing_file != new_file { + // Files are different. + tracing::error!("Files are different: {}", file_name); + FILE_DIFF_COUNTER.inc(); + + // Sleep for a while to allow metrics to be updated. + tokio::time::sleep(tokio::time::Duration::from_secs(120)).await; + panic!("Files are different: {}", file_name); + } + tracing::info!( + file_name = file_name.as_str(), + transaction_version = progress_file.file_checker_version, + "File is verified." + ); + + progress_file.file_checker_version += 1000; + + // If the progress file is updated recently, skip the update. + if last_update_time.elapsed().as_secs() < PROGRESS_FILE_UPDATE_INTERVAL_IN_SECS { + continue; + } + // Upload the progress file. + let progress_file_bytes = + serde_json::to_vec(&progress_file).context("Failed to serialize progress file.")?; + client + .object() + .create( + &self.new_bucket_name, + progress_file_bytes, + PROGRESS_FILE_NAME, + "application/json", + ) + .await + .context("Update progress file failure")?; + tracing::info!("Progress file is updated."); + last_update_time = std::time::Instant::now(); + } + } + + /// Initialize the processor. + pub async fn init(&self) -> Result<(Client, ProgressFile)> { + let client = Client::new(); + + // All errors are considered fatal: files must exist for the processor to work. + let existing_metadata = + download_file::(&client, &self.existing_bucket_name, METADATA_FILE_NAME) + .await + .context("Failed to get metadata.")? + .expect("Failed to download metadata file"); + let new_metadata = + download_file::(&client, &self.new_bucket_name, METADATA_FILE_NAME) + .await + .context("Failed to get metadata.")? + .expect("Failed to download metadata file"); + + // Ensure the chain IDs match. + ensure!( + existing_metadata.chain_id == new_metadata.chain_id, + "Chain IDs do not match: {} != {}", + existing_metadata.chain_id, + new_metadata.chain_id + ); + + let progress_file = + download_file::(&client, &self.new_bucket_name, PROGRESS_FILE_NAME) + .await + .context("Failed to get progress file.")? + .unwrap_or(ProgressFile { + file_checker_version: self.starting_version, + file_checker_chain_id: existing_metadata.chain_id, + }); + // Ensure the chain IDs match. + ensure!( + existing_metadata.chain_id == progress_file.file_checker_chain_id, + "Chain IDs do not match: {} != {}", + existing_metadata.chain_id, + progress_file.file_checker_chain_id + ); + tracing::info!( + starting_version = self.starting_version, + "Processor initialized.", + ); + + Ok((client, progress_file)) + } +} + +async fn download_raw_file( + client: &Client, + bucket_name: &str, + file_name: &str, +) -> Result>> { + let file = client.object().download(bucket_name, file_name).await; + match file { + Ok(file) => Ok(Some(file)), + Err(cloud_storage::Error::Other(err)) => { + if err.contains("No such object: ") { + Ok(None) + } else { + anyhow::bail!( + "[Indexer File] Error happens when downloading transaction file. {}", + err + ); + } + }, + Err(e) => Err(e.into()), + } +} + +async fn download_file(client: &Client, bucket_name: &str, file_name: &str) -> Result> +where + T: DeserializeOwned, +{ + let file = download_raw_file(client, bucket_name, file_name).await?; + match file { + Some(file) => { + let file = serde_json::from_slice(&file).context("Failed to parse file.")?; + Ok(Some(file)) + }, + None => Ok(None), + } +} From 184ece9dab9d1cb3fac6964d714655a0e8a784e2 Mon Sep 17 00:00:00 2001 From: "Andrea Cappa (zi0Black)" <13380579+zi0Black@users.noreply.github.com> Date: Tue, 15 Oct 2024 23:08:50 +0200 Subject: [PATCH 22/22] Slightly change build process to prevent OOM (#14965) --- testsuite/fuzzer/fuzz.sh | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/testsuite/fuzzer/fuzz.sh b/testsuite/fuzzer/fuzz.sh index bdf98b9275816..c84b3ed9b0c1b 100755 --- a/testsuite/fuzzer/fuzz.sh +++ b/testsuite/fuzzer/fuzz.sh @@ -120,10 +120,18 @@ function build-oss-fuzz() { ld.lld --version clang --version - if ! build all ./target; then - env - error "Build failed. Exiting." - fi + # Limit the number of parallel jobs to avoid OOM + # export CARGO_BUILD_JOBS = 3 + + # Build the fuzz targets + # Doing one target at the time should prevent OOM, but use all thread while bulding dependecies + for fuzz_target in $(list); do + if ! build $fuzz_target ./target ; then + env + error "Build failed. Exiting." + fi + done + find ./target/*/release/ -maxdepth 1 -type f -perm /111 -exec cp {} $oss_fuzz_out \; # Download corpus zip